prompt
stringlengths 15
655k
| completion
stringlengths 3
32.4k
| api
stringlengths 8
52
|
---|---|---|
# Copyright 2016 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import random
import numpy as np
import six
import time
from multiprocessing import Process, Queue, Pipe
import copy
from pprint import pprint
import deepmind_lab
"""
Dependencies used for this:
pip uninstall numpy
pip install --no-cache-dir numpy==1.15.4
pip install --upgrade tensorflow
pip install --upgrade tensorflow-probability
pip install wrapt
"""
import tensorflow as tf
import trfl
# ______PARAMETERS______
# Network parameters
state_size = (80,80,3)
action_size = 6
kernel_size_1 = [8,8,3]
output_filters_conv1 = 32
output_filters_conv2 = 64
output_filters_conv3 = 64
hidden_size = 512 # number of units in each Q-network hidden layer
lstm_size = 256
# Training parameters
train_episodes = 5000 # max number of episodes to learn from
num_envs = 4 # experience mini-batch size
learning_rate = 0.001 # learning rate
n = 20 # n in n-step updating
max_steps = 40 # max steps before reseting the agent
gamma = 0.8 # future reward discount
entropy_reg_term = 0.05 # regularization term for entropy
normalise_entropy = False # when true normalizes entropy to be in [-1, 0] to be more invariant to different size action spaces
class ActorCriticNetwork:
def __init__(self, name):
with tf.variable_scope(name):
self.name = name
# Input images
self.inputs_ = tf.placeholder(tf.float32, [None, state_size[0], state_size[1], state_size[2]], name='inputs')
# One hot encode the actions:
# [look_left, look_right, strafe_left, strafe_right, forward, backward]
self.actions = tf.placeholder(tf.int32, [None, num_envs], name='actions')
self.rewards = tf.placeholder(tf.float32, [None, num_envs], name='rewards')
# Conv layers
self.conv1 = tf.contrib.layers.conv2d(self.inputs_, output_filters_conv1, kernel_size=8, stride=2)
self.conv2 = tf.contrib.layers.conv2d(self.conv1, output_filters_conv2, kernel_size=4, stride=2)
self.conv3 = tf.contrib.layers.conv2d(self.conv2, output_filters_conv3, kernel_size=4, stride=1)
# Constructing input to AC network
self.actions_input = tf.reshape(tf.one_hot(self.actions, action_size), [-1, action_size])
self.rewards_input = tf.reshape(self.rewards, [-1, 1])
self.vision_input = tf.reshape(self.conv3, [-1, self.conv3.shape[1]*self.conv3.shape[2]*self.conv3.shape[3]])
self.ac_input = tf.concat([self.actions_input, self.rewards_input, self.vision_input], axis=1)
# FC Layer
self.fc1 = tf.contrib.layers.fully_connected(self.ac_input, hidden_size)
# LSTM Layer
self.lstm_cell = tf.nn.rnn_cell.LSTMCell(lstm_size, state_is_tuple=False)
self.lstm_hidden_state_input = tf.placeholder_with_default(
self.lstm_cell.zero_state(batch_size=num_envs, dtype=tf.float32),
[num_envs, hidden_size])
# Should be lstm_size not hidden_size?
self.lstm_input = tf.reshape(self.fc1, [-1, num_envs, hidden_size])
# Dynamic RNN code - might not need to be dynamic
self.lstm_output, self.lstm_hidden_state_output = tf.nn.dynamic_rnn(
self.lstm_cell,
self.lstm_input,
initial_state=self.lstm_hidden_state_input,
dtype=tf.float32,
time_major=True,
# parallel_iterations=num_envs, # Note: not sure what these do
# swap_memory=True, # Note: not sure what these do
)
self.lstm_output_flat = tf.reshape(self.lstm_output, [-1, lstm_size])
# TODO: rethink layer shapes?
# Value function - Linear output layer
self.value_output = tf.contrib.layers.fully_connected(self.lstm_output_flat, 1,
activation_fn=None)
# Policy - softmax output layer
self.policy_logits = tf.contrib.layers.fully_connected(self.lstm_output_flat, action_size, activation_fn=None)
self.policy_output = tf.contrib.layers.softmax(self.policy_logits)
# Action sampling op
self.action_output = tf.squeeze(tf.multinomial(logits=self.policy_logits,num_samples=1), axis=1)
# Used for TRFL stuff
self.value_output_unflat = tf.reshape(self.value_output, [n, num_envs])
self.policy_logits_unflat = tf.reshape(self.policy_logits, [n, num_envs, -1])
self.discounts = tf.placeholder(tf.float32,[n, num_envs],name="discounts")
self.initial_Rs = tf.placeholder(tf.float32, [num_envs], name="initial_Rs")
#TRFL loss
a2c_loss, extra = trfl.sequence_advantage_actor_critic_loss(
policy_logits = self.policy_logits_unflat,
baseline_values = self.value_output_unflat,
actions = self.actions,
rewards = self.rewards,
pcontinues = self.discounts,
bootstrap_value = self.initial_Rs,
entropy_cost = entropy_reg_term,
normalise_entropy = normalise_entropy)
self.loss = tf.reduce_mean(a2c_loss)
self.extra = extra
self.opt = tf.train.AdamOptimizer(learning_rate).minimize(self.loss)
print("Network shapes:")
print("inputs_: ", self.inputs_.shape)
print("actions: ", self.actions.shape)
print("conv1: ", self.conv1.shape)
print("conv2: ", self.conv2.shape)
print("conv3: ", self.conv3.shape)
print("ac_input: ", self.ac_input.shape)
print("fc1: ", self.fc1.shape)
print("lstm_hidden_state_input: ", self.lstm_hidden_state_input.shape)
print("lstm_input: ", self.lstm_input.shape)
print("lstm_hidden_state_output: ", self.lstm_hidden_state_output.shape)
print("lstm_output: ", self.lstm_output.shape)
print("lstm_output_flat: ", self.lstm_output_flat.shape)
print("value_output: ", self.value_output.shape)
print("policy_logits: ", self.policy_logits.shape)
print("policy_output: ", self.policy_output.shape)
print("value_output_unflat: ", self.value_output_unflat.shape)
print("policy_logits_unflat: ", self.policy_logits_unflat.shape)
# Tensorboard
self.average_reward_metric = tf.placeholder(tf.float32, name="average_reward")
# self.average_length_of_episode = tf.placeholder(tf.float32, name="average_length_of_episode")
policy_summary = tf.summary.tensor_summary('policy', self.policy_output)
reward_summary = tf.summary.scalar('average_reward_metric', self.average_reward_metric)
loss_summary = tf.summary.scalar('loss', self.loss)
entropy_summary = tf.summary.scalar('policy_entropy', tf.math.reduce_mean(self.extra.entropy))
baseline_loss_summary = tf.summary.scalar('baseline_loss', tf.math.reduce_mean(self.extra.baseline_loss))
entropy_loss_summary = tf.summary.scalar('entropy_loss', tf.math.reduce_mean(self.extra.entropy_loss))
policy_gradient_loss = tf.summary.scalar('policy_gradient_loss', tf.math.reduce_mean(self.extra.policy_gradient_loss))
self.train_step_summary = tf.summary.merge([
reward_summary,
loss_summary,
entropy_summary,
baseline_loss_summary,
entropy_loss_summary,
policy_gradient_loss
])
self.action_step_summary = tf.summary.merge([policy_summary])
# tf.summary.scalar('average_length_of_episode', self.average_length_of_episode)
def get_action(self, sess, state, t_list, hidden_state_input, action_list, reward_list):
"""
Feed forward to get action.
"""
# Add the extra dimension for feed forward
if np.array(action_list).ndim == 1:
action_list = np.expand_dims(action_list, axis=0)
if np.array(reward_list).ndim == 1:
reward_list = np.expand_dims(reward_list, axis=0)
print("GET ACTION")
print("action_list: ", action_list)
print("reward_list: ", reward_list)
feed = {self.inputs_: np.reshape(state, [-1, state_size[0], state_size[1], state_size[2]]),
self.actions: action_list,
self.rewards: reward_list}
# Can also do placeholder with default
if hidden_state_input is not None:
feed[self.lstm_hidden_state_input] = hidden_state_input
# policies, hidden_state_output = sess.run([mainA2C.policy_output, mainA2C.lstm_hidden_state_output], feed_dict=feed)
actions, logits, policy, hidden_state_output, action_step_summary = \
sess.run([self.action_output,
self.policy_logits,
self.policy_output,
self.lstm_hidden_state_output,
self.action_step_summary], feed_dict=feed)
print("logits: \n", logits)
print("policy: \n", policy)
return actions, hidden_state_output, action_step_summary
def get_value(self, sess, state, hidden_state_input, action, reward):
"""
Feed forward to get the value.
"""
print("GET_VALUE")
print("state.shape: ", state.shape)
feed = {self.inputs_: np.reshape(state, [-1, state_size[0], state_size[1], state_size[2]]),
self.lstm_hidden_state_input: hidden_state_input,
self.actions:
|
np.expand_dims(action, axis=0)
|
numpy.expand_dims
|
import numpy as np
import pandas as pd
import os
from ffequity.utils.dataframefile import DataFrameFile
from datetime import datetime
from IPython.display import display, HTML
import matplotlib.pyplot as plt
import mpld3
from mpld3 import plugins
from mpld3.utils import get_id
import collections
import matplotlib
from ffequity.utils.dataframefile import DataFrameFile
class BenchmarkException(Exception):
pass
class Benchmark:
'''
Benchmark will host the back end code for data visualization in the
front-end Ipython Notebook to make the user experience cleaner
'''
def __init__(self, years, data={}, aggregateTable=None):
self.years = years # user will pass in years
self.data = data
self.aggregateTable = aggregateTable
def show_sample_tables(self, switch=None):
if switch == "Carbon":
columns = ["Company(Company)", "Coal(GtCO2)", "Oil(GtCO2)", "Gas(GtCO2)"]
data = {
"Company(Company)" : ["Best Coal", "Some Gas", "More Oil", "Better Coal", "Decent Coal"],
"Coal(GtCO2)" : [10.0,0.0,0.0,20.0,5.0],
"Oil(GtCO2)" : [0, 5.0, 10.0, 0, 0],
"Gas(GtCO2)" : [0, 2.5, 2.5, 0, 0],
}
df = pd.DataFrame(data, columns=columns)
elif switch == "Equity":
columns2 = ["Stocks", "EndingMarketValue"] #any other columns you include will be preserved but won't be operated on
data2 = {
"Stocks" : ["SM GAS CLASS A", "MORE OIL", "DCT COAL OPTIONS", "BST COAL", "CLOTHES R US"],
"EndingMarketValue" : [54987651.0, 13654977.0, 546879852.0, 1124568.0, 1549865.0]
}
df = pd.DataFrame(data2, columns=columns2)
elif switch == "Financial":
columns3 = ["Company", "MarketCap(B)"] # make sure you convert whatever the listed market caps are to B
data3 = {
"Company(Company)" : ["Best Coal", "Some Gas", "More Oil", "Better Coal", "Decent Coal"],
"MarketCap(B)" : [25.8, 50.2, 44.3, 10.0, 0.5]
}
df = pd.DataFrame(data3, columns=columns3)
else:
print("Please select a sample table to view.")
return display(df)
# pull the data frames that were written out by Analyst
# modify get_tables to be get_equity_tables from /assessment
def get_equity_tables(self):
dataframefile = DataFrameFile()
data = {}
with os.scandir(path="./data/assessment") as it:
currentFiles = [x.name for x in it] # store name attributes of all files in a folder
# commenting out recent date because I removed that function
# recentDate = max([datetime.strptime(fileName[:8], "%Y%m%d") for fileName in currentFiles if fileName != ".gitignore"])
# recentDate = datetime.strftime(recentDate, "%Y%m%d")
for fileName in currentFiles:
if fileName != ".gitignore":
df = dataframefile.read(os.path.join('./data/assessment/', fileName))
data[fileName[:4]] = df
assert len(data.keys()) == len(self.years)
self.data = data
return self.data
def get_tables(self):
dataframefile = DataFrameFile()
data = {}
with os.scandir(path="./data/benchmarks") as it:
currentFiles = [x.name for x in it] # store name attributes of all files in a folder
# recentDate = max([datetime.strptime(fileName[:8], "%Y%m%d") for fileName in currentFiles if fileName != ".gitignore"])
# recentDate = datetime.strftime(recentDate, "%Y%m%d")
for fileName in currentFiles:
if fileName != ".gitignore":
df = dataframefile.read(os.path.join('./data/benchmarks/', fileName))
data[fileName[:4]] = df
assert len(data.keys()) == len(self.years)
self.data = data
return self.data
def company_names(self):
for year in self.years:
matchedCompanies = self.data[year].loc[:, "Company(Company)"].notnull()
companyNames = set(self.data[year].loc[matchedCompanies, "Company(Company)"].values)
numberOfCompanies = len(companyNames)
print(f"{year}")
print(f"You owned investments in {numberOfCompanies} fossil fuel companies:")
companySentence = ", ".join(companyNames)
print(f"{companySentence}\n")
def get_total_equity(self):
# read in the equity dataframes
dataframefile = DataFrameFile()
data = {}
with os.scandir(path="./data/equity_data") as it:
currentFiles = [x.name for x in it if x.name != ".gitignore"] # store name attributes of all files in a folder
for fileName in currentFiles:
df = dataframefile.read(os.path.join("./data/equity_data/", fileName))
data[fileName[0:4]] = df
assert len(data.keys()) == len(self.years)
totalEquity = {}
for year in data:
totalEquity[year] = float(data[year]["EndingMarketValue"].sum())
return totalEquity
def aggregate_equity_table(self):
# make Total Individual Equity column
totalEquity = self.get_total_equity()
totalEquity = pd.DataFrame.from_dict(data=totalEquity, orient="index")
totalEquity.columns = ["Total Individual Equity"]
# make the aggregate columns with Year
aggregateColumns = ["Year"]
aggregateTable = pd.DataFrame(columns=aggregateColumns)
aggregateTable.loc[:, aggregateColumns] = 0
aggregateTable.loc[:, "Year"] = self.data.keys()
aggregateTable.set_index("Year", inplace=True)
# initiate table to aggregate fossil fuel equity by fuel type
fossilFuelEquity = pd.DataFrame()
row = 0
for year in self.data:
fossilFuelEquity.loc[row, "Year"] = year
fossilFuelRows = self.data[year].loc[:, "Company(Company)"].notnull()
fossilFuelEquity.loc[row, "Fossil Fuel Equity"] = self.data[year].loc[fossilFuelRows, "EndingMarketValue"].sum()
row += 1
fossilFuelEquity.set_index("Year", inplace=True)
# dollars by fuel type
coalEmv = {}
oilEmv = {}
gasEmv = {}
for year in self.data:
coalRows = self.data[year].loc[:, "Coal(GtCO2)"] > 0
oilRows = self.data[year].loc[:, "Oil(GtCO2)"] > 0
gasRows = self.data[year].loc[:, "Gas(GtCO2)"] > 0
# to allocate dollars by fuel type, divide fuel type reserve by sum of total reserves and multiply by EMV
totalReservesCoal = self.data[year].loc[coalRows, ["Coal(GtCO2)", "Oil(GtCO2)", "Gas(GtCO2)"]].sum(axis=1)
coalEmv[year] = ((self.data[year].loc[coalRows, "Coal(GtCO2)"] / totalReservesCoal)
* self.data[year].loc[coalRows, "EndingMarketValue"])
totalReservesOil = self.data[year].loc[oilRows, ["Coal(GtCO2)", "Oil(GtCO2)", "Gas(GtCO2)"]].sum(axis=1)
oilEmv[year] = ((self.data[year].loc[oilRows, "Oil(GtCO2)"] / totalReservesOil)
* self.data[year].loc[oilRows, "EndingMarketValue"])
totalReservesGas = self.data[year].loc[gasRows, ["Coal(GtCO2)", "Oil(GtCO2)", "Gas(GtCO2)"]].sum(axis=1)
gasEmv[year] = ((self.data[year].loc[gasRows, "Gas(GtCO2)"] / totalReservesGas)
* self.data[year].loc[gasRows, "EndingMarketValue"])
coalEquity = pd.DataFrame()
oilEquity = pd.DataFrame()
gasEquity = pd.DataFrame()
row = 0
for year in self.data:
coalEquity.loc[row, "Year"] = year
coalEquity.loc[row, "Coal Equity"] = coalEmv[year].sum()
oilEquity.loc[row, "Year"] = year
oilEquity.loc[row, "Oil Equity"] = oilEmv[year].sum()
gasEquity.loc[row, "Year"] = year
gasEquity.loc[row, "Gas Equity"] = gasEmv[year].sum()
row += 1
coalEquity.set_index("Year", inplace=True)
oilEquity.set_index("Year", inplace=True)
gasEquity.set_index("Year", inplace=True)
aggregateTable = pd.concat([aggregateTable, fossilFuelEquity, totalEquity, coalEquity, oilEquity, gasEquity]
, axis=1)
self.aggregateTable = aggregateTable
return aggregateTable
def aggregate_table(self):
# make Total Individual Equity column
totalEquity = self.get_total_equity()
totalEquity = pd.DataFrame.from_dict(data=totalEquity, orient="index")
totalEquity.columns = ["Total Individual Equity"]
# make the aggregate columns with Year
aggregateColumns = ["Year"]
aggregateTable = pd.DataFrame(columns=aggregateColumns)
aggregateTable.loc[:, aggregateColumns] = 0
aggregateTable.loc[:, "Year"] = self.data.keys()
aggregateTable.set_index("Year", inplace=True)
# initiate table to aggregate fossil fuel equity by fuel type
fossilFuelEquity = pd.DataFrame()
row = 0
for year in self.data:
fossilFuelEquity.loc[row, "Year"] = year
fossilFuelEquity.loc[row, "Fossil Fuel Equity"] = self.data[year].loc[:, "EndingMarketValue"].sum()
row += 1
fossilFuelEquity.set_index("Year", inplace=True)
# dollars by fuel type
coalEmv = {}
oilEmv = {}
gasEmv = {}
for year in self.data:
coalRows = self.data[year].loc[:, "Coal(tCO2)"] > 0
oilRows = self.data[year].loc[:, "Oil(tCO2)"] > 0
gasRows = self.data[year].loc[:, "Gas(tCO2)"] > 0
# to allocate dollars by fuel type, divide fuel type reserve by sum of total reserves and multiply by EMV
totalReservesCoal = self.data[year].loc[coalRows, ["Coal(tCO2)", "Oil(tCO2)", "Gas(tCO2)"]].sum(axis=1)
coalEmv[year] = ((self.data[year].loc[coalRows, "Coal(tCO2)"] / totalReservesCoal)
* self.data[year].loc[coalRows, "EndingMarketValue"])
totalReservesOil = self.data[year].loc[oilRows, ["Coal(tCO2)", "Oil(tCO2)", "Gas(tCO2)"]].sum(axis=1)
oilEmv[year] = ((self.data[year].loc[oilRows, "Oil(tCO2)"] / totalReservesOil)
* self.data[year].loc[oilRows, "EndingMarketValue"])
totalReservesGas = self.data[year].loc[gasRows, ["Coal(tCO2)", "Oil(tCO2)", "Gas(tCO2)"]].sum(axis=1)
gasEmv[year] = ((self.data[year].loc[gasRows, "Gas(tCO2)"] / totalReservesGas)
* self.data[year].loc[gasRows, "EndingMarketValue"])
coalEquity = pd.DataFrame()
oilEquity = pd.DataFrame()
gasEquity = pd.DataFrame()
row = 0
for year in self.data:
coalEquity.loc[row, "Year"] = year
coalEquity.loc[row, "Coal Equity"] = coalEmv[year].sum()
oilEquity.loc[row, "Year"] = year
oilEquity.loc[row, "Oil Equity"] = oilEmv[year].sum()
gasEquity.loc[row, "Year"] = year
gasEquity.loc[row, "Gas Equity"] = gasEmv[year].sum()
row += 1
coalEquity.set_index("Year", inplace=True)
oilEquity.set_index("Year", inplace=True)
gasEquity.set_index("Year", inplace=True)
# total carbon reserves
coalReserves = pd.DataFrame()
oilReserves = pd.DataFrame()
gasReserves = pd.DataFrame()
totalReserves = pd.DataFrame()
row = 0
for year in self.data:
coalReserves.loc[row, "Year"] = year
coalReserves.loc[row, "Coal Reserves (tCO2)"] = self.data[year].loc[:, "Coal(tCO2)"].sum()
oilReserves.loc[row, "Year"] = year
oilReserves.loc[row, "Oil Reserves (tCO2)"] = self.data[year].loc[:, "Oil(tCO2)"].sum()
gasReserves.loc[row, "Year"] = year
gasReserves.loc[row, "Gas Reserves (tCO2)"] = self.data[year].loc[:, "Gas(tCO2)"].sum()
totalReserves.loc[row, "Year"] = year
totalReserves.loc[row, "Total Reserves (tCO2)"] = self.data[year].loc[:, ["Coal(tCO2)", "Oil(tCO2)", "Gas(tCO2)"]].sum().sum()
row += 1
coalReserves.set_index("Year", inplace=True)
oilReserves.set_index("Year", inplace=True)
gasReserves.set_index("Year", inplace=True)
totalReserves.set_index("Year", inplace=True)
# carbon reserves by fuel type
aggregateTable = pd.concat([aggregateTable, fossilFuelEquity, totalEquity, coalEquity, oilEquity, gasEquity,
coalReserves, oilReserves, gasReserves, totalReserves]
, axis=1)
self.aggregateTable = aggregateTable
return aggregateTable
def show_top(self, rows=5, sort="EMV"):
sortMap = {
"EMV" : "EndingMarketValue",
"COAL" : "Coal(tCO2)",
"OIL" : "Oil(tCO2)",
"GAS" : "Gas(tCO2)"
}
columns = ["Company(Company)", "Coal(tCO2)", "Oil(tCO2)", "Gas(tCO2)", "EndingMarketValue"]
for year in self.years:
print(f"Top {rows} sorted by {sortMap[sort]} for {year}")
top5 = self.data[year].loc[:, columns].sort_values(by=sortMap[sort], ascending=False).iloc[0:rows, :]
display(top5)
def plot_fossil_fuel_equity(self):
N = len(self.aggregateTable.index)
x = self.aggregateTable.loc[:, "Fossil Fuel Equity"]
fig, ax = plt.subplots()
index =
|
np.arange(N)
|
numpy.arange
|
# Imports: standard library
import os
import re
import logging
from abc import ABC
from typing import Any, Set, Dict, List, Tuple, Optional
from datetime import datetime
# Imports: third party
import h5py
import numpy as np
import pandas as pd
import unidecode
# Imports: first party
from ml4c3.utils import get_unix_timestamps
from definitions.edw import EDW_FILES, MED_ACTIONS
from definitions.icu import ALARMS_FILES, ICU_SCALE_UNITS
from definitions.globals import TIMEZONE
from ingest.icu.data_objects import (
Event,
Procedure,
Medication,
StaticData,
Measurement,
BedmasterAlarm,
BedmasterSignal,
)
from tensorize.bedmaster.bedmaster_stats import BedmasterStats
from tensorize.bedmaster.match_patient_bedmaster import PatientBedmasterMatcher
# pylint: disable=too-many-branches, dangerous-default-value
class Reader(ABC):
"""
Parent class for our Readers class.
As an abstract class, it can't be directly instanced. Its children
should be used instead.
"""
@staticmethod
def _ensure_contiguous(data: np.ndarray) -> np.ndarray:
if len(data) > 0:
dtype = Any
try:
data = data.astype(float)
if all(x.is_integer() for x in data):
dtype = int
else:
dtype = float
except ValueError:
dtype = "S"
try:
data = np.ascontiguousarray(data, dtype=dtype)
except (UnicodeEncodeError, SystemError):
logging.info("Unknown character. Not ensuring contiguous array.")
new_data = []
for element in data:
new_data.append(unidecode.unidecode(str(element)))
data = np.ascontiguousarray(new_data, dtype="S")
except ValueError:
logging.exception(
f"Unknown method to convert np.ndarray of "
f"{dtype} objects to numpy contiguous type.",
)
raise
return data
class EDWReader(Reader):
"""
Implementation of the Reader for EDW data.
Usage:
>>> reader = EDWReader('MRN')
>>> hr = reader.get_measurement('HR')
"""
def __init__(
self,
path: str,
mrn: str,
csn: str,
med_file: str = EDW_FILES["med_file"]["name"],
move_file: str = EDW_FILES["move_file"]["name"],
adm_file: str = EDW_FILES["adm_file"]["name"],
demo_file: str = EDW_FILES["demo_file"]["name"],
vitals_file: str = EDW_FILES["vitals_file"]["name"],
lab_file: str = EDW_FILES["lab_file"]["name"],
surgery_file: str = EDW_FILES["surgery_file"]["name"],
other_procedures_file: str = EDW_FILES["other_procedures_file"]["name"],
transfusions_file: str = EDW_FILES["transfusions_file"]["name"],
events_file: str = EDW_FILES["events_file"]["name"],
medhist_file: str = EDW_FILES["medhist_file"]["name"],
surghist_file: str = EDW_FILES["surghist_file"]["name"],
socialhist_file: str = EDW_FILES["socialhist_file"]["name"],
):
"""
Init EDW Reader.
:param path: absolute path of files.
:param mrn: MRN of the patient.
:param csn: CSN of the patient visit.
:param med_file: file containing the medicines data from the patient.
Can be inferred if None.
:param move_file: file containing the movements of the patient
(admission, transfer and discharge) from the patient.
Can be inferred if None.
:param demo_file: file containing the demographic data from
the patient. Can be inferred if None.
:param vitals_file: file containing the vital signals from
the patient. Can be inferred if None.
:param lab_file: file containing the laboratory signals from
the patient. Can be inferred if None.
:param adm_file: file containing the admission data from
the patient. Can be inferred if None.
:param surgery_file: file containing the surgeries performed to
the patient. Can be inferred if None.
:param other_procedures_file: file containing procedures performed to
the patient. Can be inferred if None.
:param transfusions_file: file containing the transfusions performed to
the patient. Can be inferred if None.
:param eventss_file: file containing the events during
the patient stay. Can be inferred if None.
:param medhist_file: file containing the medical history information of the
patient. Can be inferred if None.
:param surghist_file: file containing the surgical history information of the
patient. Can be inferred if None.
:param socialhist_file: file containing the social history information of the
patient. Can be inferred if None.
"""
self.path = path
self.mrn = mrn
self.csn = csn
self.move_file = self.infer_full_path(move_file)
self.demo_file = self.infer_full_path(demo_file)
self.vitals_file = self.infer_full_path(vitals_file)
self.lab_file = self.infer_full_path(lab_file)
self.med_file = self.infer_full_path(med_file)
self.adm_file = self.infer_full_path(adm_file)
self.surgery_file = self.infer_full_path(surgery_file)
self.other_procedures_file = self.infer_full_path(other_procedures_file)
self.transfusions_file = self.infer_full_path(transfusions_file)
self.events_file = self.infer_full_path(events_file)
self.medhist_file = self.infer_full_path(medhist_file)
self.surghist_file = self.infer_full_path(surghist_file)
self.socialhist_file = self.infer_full_path(socialhist_file)
self.timezone = TIMEZONE
def infer_full_path(self, file_name: str) -> str:
"""
Infer a file name from MRN and type of data.
Used if a file is not specified on the input.
:param file_name: <str> 8 possible options:
'medications.csv', 'demographics.csv', 'labs.csv',
'flowsheet.scv', 'admission-vitals.csv',
'surgery.csv','procedures.csv', 'transfusions.csv'
:return: <str> the inferred path
"""
if not file_name.endswith(".csv"):
file_name = f"{file_name}.csv"
full_path = os.path.join(self.path, self.mrn, self.csn, file_name)
return full_path
def list_vitals(self) -> List[str]:
"""
List all the vital signs taken from the patient.
:return: <List[str]> List with all the available vital signals
from the patient
"""
signal_column = EDW_FILES["vitals_file"]["columns"][0]
vitals_df = pd.read_csv(self.vitals_file)
# Remove measurements out of dates
time_column = EDW_FILES["vitals_file"]["columns"][3]
admit_column = EDW_FILES["adm_file"]["columns"][3]
discharge_column = EDW_FILES["adm_file"]["columns"][4]
admission_df = pd.read_csv(self.adm_file)
init_date = admission_df[admit_column].values[0]
end_date = admission_df[discharge_column].values[0]
vitals_df = vitals_df[vitals_df[time_column] >= init_date]
if str(end_date) != "nan":
vitals_df = vitals_df[vitals_df[time_column] <= end_date]
return list(vitals_df[signal_column].astype("str").str.upper().unique())
def list_labs(self) -> List[str]:
"""
List all the lab measurements taken from the patient.
:return: <List[str]> List with all the available lab measurements
from the patient.
"""
signal_column = EDW_FILES["lab_file"]["columns"][0]
labs_df = pd.read_csv(self.lab_file)
return list(labs_df[signal_column].astype("str").str.upper().unique())
def list_medications(self) -> List[str]:
"""
List all the medications given to the patient.
:return: <List[str]> List with all the medications on
the patients record
"""
signal_column = EDW_FILES["med_file"]["columns"][0]
status_column = EDW_FILES["med_file"]["columns"][1]
med_df = pd.read_csv(self.med_file)
med_df = med_df[med_df[status_column].isin(MED_ACTIONS)]
return list(med_df[signal_column].astype("str").str.upper().unique())
def list_surgery(self) -> List[str]:
"""
List all the types of surgery performed to the patient.
:return: <List[str]> List with all the event types associated
with the patient
"""
return self._list_procedures(self.surgery_file, "surgery_file")
def list_other_procedures(self) -> List[str]:
"""
List all the types of procedures performed to the patient.
:return: <List[str]> List with all the event types associated
with the patient
"""
return self._list_procedures(
self.other_procedures_file,
"other_procedures_file",
)
def list_transfusions(self) -> List[str]:
"""
List all the transfusions types that have been done on the patient.
:return: <List[str]> List with all the transfusions type of
the patient
"""
return self._list_procedures(self.transfusions_file, "transfusions_file")
@staticmethod
def _list_procedures(file_name, file_key) -> List[str]:
"""
Filter and list all the procedures in the given file.
"""
signal_column, status_column, start_column, end_column = EDW_FILES[file_key][
"columns"
]
data = pd.read_csv(file_name)
data = data[data[status_column].isin(["Complete", "Completed"])]
data = data.dropna(subset=[start_column, end_column])
return list(data[signal_column].astype("str").str.upper().unique())
def list_events(self) -> List[str]:
"""
List all the event types during the patient stay.
:return: <List[str]> List with all the events type.
"""
signal_column, _ = EDW_FILES["events_file"]["columns"]
data = pd.read_csv(self.events_file)
return list(data[signal_column].astype("str").str.upper().unique())
def get_static_data(self) -> StaticData:
"""
Get the static data from the EDW csv file (admission + demographics).
:return: <StaticData> wrapped information
"""
movement_df = pd.read_csv(self.move_file)
admission_df = pd.read_csv(self.adm_file)
demographics_df = pd.read_csv(self.demo_file)
# Obtain patient's movement (location and when they move)
department_id = np.array(movement_df["DepartmentID"], dtype=int)
department_nm = np.array(movement_df["DepartmentDSC"], dtype="S")
room_bed = np.array(movement_df["BedLabelNM"], dtype="S")
move_time = np.array(movement_df["TransferInDTS"], dtype="S")
# Convert weight from ounces to pounds
weight = float(admission_df["WeightPoundNBR"].values[0]) / 16
# Convert height from feet & inches to meters
height = self._convert_height(admission_df["HeightTXT"].values[0])
admin_type = admission_df["HospitalAdmitTypeDSC"].values[0]
# Find possible diagnosis at admission
diag_info = admission_df["AdmitDiagnosisTXT"].dropna().drop_duplicates()
if list(diag_info):
diag_info = diag_info.astype("str")
admin_diag = diag_info.str.cat(sep="; ")
else:
admin_diag = "UNKNOWN"
admin_date = admission_df["HospitalAdmitDTS"].values[0]
birth_date = demographics_df["BirthDTS"].values[0]
race = demographics_df["PatientRaceDSC"].values[0]
sex = demographics_df["SexDSC"].values[0]
end_date = admission_df["HospitalDischargeDTS"].values[0]
# Check whether it exists a deceased date or not
end_stay_type = (
"Alive"
if str(demographics_df["DeathDTS"].values[0]) == "nan"
else "Deceased"
)
# Find local time, if patient is still in hospital, take today's date
if str(end_date) != "nan":
offsets = self._get_local_time(admin_date[:-1], end_date[:-1])
else:
today_date = datetime.today().strftime("%Y-%m-%d %H:%M:%S.%f")
offsets = self._get_local_time(admin_date[:-1], today_date)
offsets = list(set(offsets)) # Take unique local times
local_time = np.empty(0)
for offset in offsets:
local_time = np.append(local_time, f"UTC{int(offset/3600)}:00")
local_time = local_time.astype("S")
# Find medical, surgical and social history of patient
medical_hist = self._get_med_surg_hist("medhist_file")
surgical_hist = self._get_med_surg_hist("surghist_file")
tobacco_hist, alcohol_hist = self._get_social_hist()
return StaticData(
department_id,
department_nm,
room_bed,
move_time,
weight,
height,
admin_type,
admin_diag,
admin_date,
birth_date,
race,
sex,
end_date,
end_stay_type,
local_time,
medical_hist,
surgical_hist,
tobacco_hist,
alcohol_hist,
)
def get_med_doses(self, med_name: str) -> Medication:
"""
Get all the doses of the input medication given to the patient.
:param medication_name: <string> name of the medicine
:return: <Medication> wrapped list of medications doses
"""
(
signal_column,
status_column,
time_column,
route_column,
weight_column,
dose_column,
dose_unit_column,
infusion_column,
infusion_unit_column,
duration_column,
duration_unit_column,
) = EDW_FILES["med_file"]["columns"]
source = EDW_FILES["med_file"]["source"]
med_df = pd.read_csv(self.med_file)
med_df = med_df[med_df[status_column].isin(MED_ACTIONS)]
med_df = med_df.sort_values(time_column)
if med_name not in med_df[signal_column].astype("str").str.upper().unique():
raise ValueError(f"{med_name} was not found in {self.med_file}.")
idx = np.where(med_df[signal_column].astype("str").str.upper() == med_name)[0]
route = np.array(med_df[route_column])[idx[0]]
wt_base_dose = (
bool(1) if np.array(med_df[weight_column])[idx[0]] == "Y" else bool(0)
)
if med_df[duration_column].isnull().values[idx[0]]:
start_date = self._get_unix_timestamps(np.array(med_df[time_column])[idx])
action = np.array(med_df[status_column], dtype="S")[idx]
if (
np.array(med_df[status_column])[idx[0]] in [MED_ACTIONS[0]]
or med_df[infusion_column].isnull().values[idx[0]]
):
dose = np.array(med_df[dose_column], dtype="S")[idx]
units = np.array(med_df[dose_unit_column])[idx[0]]
else:
dose = np.array(med_df[infusion_column])[idx]
units = np.array(med_df[infusion_unit_column])[idx[0]]
else:
dose = np.array([])
units = np.array(med_df[infusion_unit_column])[idx[0]]
start_date = np.array([])
action = np.array([])
for _, row in med_df.iloc[idx, :].iterrows():
dose = np.append(dose, [row[infusion_column], 0])
time = self._get_unix_timestamps(np.array([row[time_column]]))[0]
conversion = 1
if row[duration_unit_column] == "Seconds":
conversion = 1
elif row[duration_unit_column] == "Minutes":
conversion = 60
elif row[duration_unit_column] == "Hours":
conversion = 3600
start_date = np.append(
start_date,
[time, time + float(row[duration_column]) * conversion],
)
action = np.append(action, [row[status_column], "Stopped"])
dose = self._ensure_contiguous(dose)
start_date = self._ensure_contiguous(start_date)
action = self._ensure_contiguous(action)
return Medication(
med_name,
dose,
units,
start_date,
action,
route,
wt_base_dose,
source,
)
def get_vitals(self, vital_name: str) -> Measurement:
"""
Get the vital signals from the EDW csv file 'flowsheet'.
:param vital_name: <string> name of the signal
:return: <Measurement> wrapped measurement signal
"""
vitals_df = pd.read_csv(self.vitals_file)
# Remove measurements out of dates
time_column = EDW_FILES["vitals_file"]["columns"][3]
admit_column = EDW_FILES["adm_file"]["columns"][3]
discharge_column = EDW_FILES["adm_file"]["columns"][4]
admission_df = pd.read_csv(self.adm_file)
init_date = admission_df[admit_column].values[0]
end_date = admission_df[discharge_column].values[0]
vitals_df = vitals_df[vitals_df[time_column] >= init_date]
if str(end_date) != "nan":
vitals_df = vitals_df[vitals_df[time_column] <= end_date]
return self._get_measurements(
"vitals_file",
vitals_df,
vital_name,
self.vitals_file,
)
def get_labs(self, lab_name: str) -> Measurement:
"""
Get the lab measurement from the EDW csv file 'labs'.
:param lab_name: <string> name of the signal
:return: <Measurement> wrapped measurement signal
"""
labs_df = pd.read_csv(self.lab_file)
return self._get_measurements("lab_file", labs_df, lab_name, self.lab_file)
def get_surgery(self, surgery_type: str) -> Procedure:
"""
Get all the surgery information of the input type performed to the
patient.
:param surgery_type: <string> type of surgery
:return: <Procedure> wrapped list surgeries of the input type
"""
return self._get_procedures("surgery_file", self.surgery_file, surgery_type)
def get_other_procedures(self, procedure_type: str) -> Procedure:
"""
Get all the procedures of the input type performed to the patient.
:param procedure: <string> type of procedure
:return: <Procedure> wrapped list procedures of the input type
"""
return self._get_procedures(
"other_procedures_file",
self.other_procedures_file,
procedure_type,
)
def get_transfusions(self, transfusion_type: str) -> Procedure:
"""
Get all the input transfusions type that were done to the patient.
:param transfusion_type: <string> Type of transfusion.
:return: <Procedure> Wrapped list of transfusions of the input type.
"""
return self._get_procedures(
"transfusions_file",
self.transfusions_file,
transfusion_type,
)
def get_events(self, event_type: str) -> Event:
"""
Get all the input event type during the patient stay.
:param event_type: <string> Type of event.
:return: <Event> Wrapped list of events of the input type.
"""
signal_column, time_column = EDW_FILES["events_file"]["columns"]
data = pd.read_csv(self.events_file)
data = data.dropna(subset=[time_column])
data = data.sort_values([time_column])
if event_type not in data[signal_column].astype("str").str.upper().unique():
raise ValueError(f"{event_type} was not found in {self.events_file}.")
idx = np.where(data[signal_column].astype("str").str.upper() == event_type)[0]
time = self._get_unix_timestamps(np.array(data[time_column])[idx])
time = self._ensure_contiguous(time)
return Event(event_type, time)
def _get_local_time(self, init_date: str, end_date: str) -> np.ndarray:
"""
Obtain local time from init and end dates.
:param init_date: <str> String with initial date.
:param end_date: <str> String with end date.
:return: <np.ndarray> List of offsets from UTC (it may be two in
case the time shift between summer/winter occurs while the
patient is in the hospital).
"""
init_dt = datetime.strptime(init_date, "%Y-%m-%d %H:%M:%S.%f")
end_dt = datetime.strptime(end_date, "%Y-%m-%d %H:%M:%S.%f")
offset_init = self.timezone.utcoffset( # type: ignore
init_dt,
is_dst=True,
).total_seconds()
offset_end = self.timezone.utcoffset( # type: ignore
end_dt,
is_dst=True,
).total_seconds()
return np.array([offset_init, offset_end], dtype=float)
def _get_unix_timestamps(self, time_stamps: np.ndarray) -> np.ndarray:
"""
Convert readable time stamps to unix time stamps.
:param time_stamps: <np.ndarray> Array with all readable time stamps.
:return: <np.ndarray> Array with Unix time stamps.
"""
try:
arr_timestamps = pd.to_datetime(time_stamps)
except pd.errors.ParserError as error:
raise ValueError("Array contains non datetime values.") from error
# Convert readable local timestamps in local seconds timestamps
local_timestamps = (
np.array(arr_timestamps, dtype=np.datetime64)
- np.datetime64("1970-01-01T00:00:00")
) / np.timedelta64(1, "s")
# Find local time shift to UTC
if not (pd.isnull(local_timestamps[0]) or pd.isnull(local_timestamps[-1])):
offsets = self._get_local_time(time_stamps[0][:-1], time_stamps[-1][:-1])
else:
offsets = np.random.random(2) # pylint: disable=no-member
# Compute unix timestamp (2 different methods: 1st ~5000 times faster)
if offsets[0] == offsets[1]:
unix_timestamps = local_timestamps - offsets[0]
else:
unix_timestamps = np.empty(np.size(local_timestamps))
for idx, val in enumerate(local_timestamps):
if not pd.isnull(val):
ntarray = datetime.utcfromtimestamp(val)
offset = self.timezone.utcoffset( # type: ignore
ntarray,
is_dst=True,
)
unix_timestamps[idx] = val - offset.total_seconds() # type: ignore
else:
unix_timestamps[idx] = val
return unix_timestamps
def _get_med_surg_hist(self, file_key: str) -> np.ndarray:
"""
Read medical or surgical history table and its information as arrays.
:param file_key: <str> Key name indicating the desired file name.
:return: <Tuple> Tuple with tobacco and alcohol information.
"""
if file_key == "medhist_file":
hist_df = pd.read_csv(self.medhist_file)
else:
hist_df = pd.read_csv(self.surghist_file)
hist_df = (
hist_df[EDW_FILES[file_key]["columns"]].fillna("UNKNOWN").drop_duplicates()
)
info_hist = []
for _, row in hist_df.iterrows():
id_num, name, comment, date = row
info_hist.append(
f"ID: {id_num}; DESCRIPTION: {name}; "
f"COMMENTS: {comment}; DATE: {date}",
)
return self._ensure_contiguous(np.array(info_hist))
def _get_social_hist(self) -> Tuple:
"""
Read social history table and return tobacco and alcohol patient
status.
:return: <Tuple> Tuple with tobacco and alcohol information.
"""
hist_df = pd.read_csv(self.socialhist_file)
hist_df = hist_df[EDW_FILES["socialhist_file"]["columns"]].drop_duplicates()
concat = []
for col in hist_df:
information = hist_df[col].drop_duplicates().dropna()
if list(information):
information = information.astype(str)
concat.append(information.str.cat(sep=" - "))
else:
concat.append("NONE")
tobacco_hist = f"STATUS: {concat[0]}; COMMENTS: {concat[1]}"
alcohol_hist = f"STATUS: {concat[2]}; COMMENTS: {concat[3]}"
return tobacco_hist, alcohol_hist
def _get_measurements(self, file_key: str, data, measure_name: str, file_name: str):
(
signal_column,
result_column,
units_column,
time_column,
additional_columns,
) = EDW_FILES[file_key]["columns"]
source = EDW_FILES[file_key]["source"]
# Drop repeated values and sort
data = data[
[signal_column, result_column, units_column, time_column]
+ additional_columns
].drop_duplicates()
data = data.sort_values(time_column)
if measure_name not in data[signal_column].astype("str").str.upper().unique():
raise ValueError(f"{measure_name} was not found in {file_name}.")
idx = np.where(data[signal_column].astype("str").str.upper() == measure_name)[0]
value = np.array(data[result_column])[idx]
time = self._get_unix_timestamps(np.array(data[time_column])[idx])
units = np.array(data[units_column])[idx[0]]
value = self._ensure_contiguous(value)
time = self._ensure_contiguous(time)
data_type = "Numerical"
additional_data = {}
for col in additional_columns:
col_data = np.array(data[col])[idx]
if "DTS" in col:
col_data = self._get_unix_timestamps(col_data)
col_data = self._ensure_contiguous(col_data)
additional_data[col] = col_data
return Measurement(
measure_name,
source,
value,
time,
units,
data_type,
additional_data,
)
def _get_procedures(
self,
file_key: str,
file_name: str,
procedure_type: str,
) -> Procedure:
signal_column, status_column, start_column, end_column = EDW_FILES[file_key][
"columns"
]
source = EDW_FILES[file_key]["source"]
data = pd.read_csv(file_name)
data = data[data[status_column].isin(["Complete", "Completed"])]
data = data.dropna(subset=[start_column, end_column])
data = data.sort_values([start_column, end_column])
if procedure_type not in data[signal_column].astype("str").str.upper().unique():
raise ValueError(f"{procedure_type} was not found in {file_name}.")
idx = np.where(data[signal_column].astype("str").str.upper() == procedure_type)[
0
]
start_date = self._get_unix_timestamps(np.array(data[start_column])[idx])
end_date = self._get_unix_timestamps(np.array(data[end_column])[idx])
start_date = self._ensure_contiguous(start_date)
end_date = self._ensure_contiguous(end_date)
return Procedure(procedure_type, source, start_date, end_date)
@staticmethod
def _convert_height(height_i):
if str(height_i) != "nan":
height_i = height_i[:-1].split("' ")
height_f = float(height_i[0]) * 0.3048 + float(height_i[1]) * 0.0254
else:
height_f = np.nan
return height_f
class BedmasterReader(h5py.File, Reader):
"""
Implementation of the Reader for Bedmaster data.
Usage:
>>> reader = BedmasterReader('file.mat')
>>> hr = reader.get_vs('HR')
"""
def __init__(
self,
file: str,
scaling_and_units: Dict[str, Dict[str, Any]] = ICU_SCALE_UNITS,
summary_stats: BedmasterStats = None,
):
super().__init__(file, "r")
self.max_segment = {
"vs": {"segmentNo": 0, "maxTime": -1, "signalName": ""},
"wv": {"segmentNo": 0, "maxTime": -1, "signalName": ""},
}
self.interbundle_corr: Dict[str, Optional[Dict]] = {
"vs": None,
"wv": None,
}
self.scaling_and_units: Dict[str, Dict[str, Any]] = scaling_and_units
self.summary_stats = summary_stats
if self.summary_stats:
self.summary_stats.add_file_stats("total_files")
def _update_max_segment(self, sig_name, sig_type, max_time):
"""
Update the signal that holds the segment with the last timespan.
Needed for inter-bundle correction.
:param sig_name: <str> name of the new candidate signal
:param sig_type: <str> wv or vs
:param max_time: <int> latest timespan for that signal
"""
packet = self["vs_packet"] if sig_type == "vs" else self["wv_time_original"]
max_seg = self.max_segment[sig_type]
max_seg["maxTime"] = max_time
max_seg["segmentNo"] = packet[sig_name]["SegmentNo"][-1][0]
max_seg["signalName"] = sig_name
def get_interbundle_correction(self, previous_max):
"""
Calculate interbundle correction parameters from previous bundle maxs.
Based on the signal with maximum time from the previous bundle,
it calculates the 'maxTime': the last timespan that is overlapped
with the previous bundle, and 'timeCorr': the time shifting to be
applied on this bundle.
Parameters are stored on attribute 'interbundle_corr'.
:param previous_max: <Dict> dict with the max timepans info from
the previous bundle. Same format than 'max_sement' attribute.
"""
def _ib_corr(previous_max, segments, time):
ib_corr = None
overlap_idx = np.where(segments[()] == previous_max["segmentNo"])[0]
if overlap_idx.size > 0: # Bundles overlap
last_overlap_idx = overlap_idx[-1]
if last_overlap_idx >= len(time):
last_overlap_idx = len(time) - 1
last_overlap_time = time[last_overlap_idx][0]
time_corr = previous_max["maxTime"] - last_overlap_time
ib_corr = {"maxTime": last_overlap_time, "timeCorr": time_corr}
return ib_corr
vs_corr = None
last_max_vs = previous_max["vs"]["signalName"]
if self.contains_group("vs"):
if last_max_vs in self["vs"].keys():
vs_corr = _ib_corr(
previous_max=previous_max["vs"],
segments=self["vs_packet"][last_max_vs]["SegmentNo"],
time=self["vs_time_corrected"][last_max_vs]["res_vs"],
)
wv_corr = None
last_max_wv = previous_max["wv"]["signalName"]
if self.contains_group("wv"):
if last_max_wv in self["wv"].keys():
wv_corr = _ib_corr(
previous_max=previous_max["wv"],
segments=self["wv_time_original"][last_max_wv]["SegmentNo"],
time=self["wv_time_corrected"][last_max_wv]["res_wv"],
)
self.max_segment = previous_max
self.interbundle_corr["vs"] = vs_corr
self.interbundle_corr["wv"] = wv_corr
def apply_ibcorr(self, signal: BedmasterSignal):
"""
Apply inter-bundle correction on a given signal.
The correction will be applied based on the 'interbundle_corr'
attribute, which needs is updated using the method:
'get_interbundle_correction'
The correction will cut the overlapping values between this bundle
and the previous one. In addition, it will shift the timespans so that
the first timespan on this bundle is the continuation of the last
timespan of the previouz value.
Note that this shifting will occur until a dataevent 1 or 5 is found.
:param signal: <BedmasterSignal> a Bedmaster signal.
"""
source = "vs" if signal._source_type == "vitals" else "wv"
if not self.interbundle_corr[source]:
return
overlap_idx = np.where(
signal.time <= self.interbundle_corr[source]["maxTime"], # type: ignore
)[0]
if overlap_idx.size > 0:
first_non_ol_idx = overlap_idx[-1] + 1
signal.time = signal.time[first_non_ol_idx:]
signal.time_corr_arr = signal.time_corr_arr[first_non_ol_idx:]
value_cut_idx = (
first_non_ol_idx
if source == "vs"
else np.sum(signal.samples_per_ts[:first_non_ol_idx])
)
signal.value = signal.value[value_cut_idx:]
signal.samples_per_ts = signal.samples_per_ts[first_non_ol_idx:]
if signal.source == "waveform":
signal.sample_freq = self.get_sample_freq_from_channel(
channel=signal.channel,
first_idx=first_non_ol_idx,
)
corr_to_apply = self.interbundle_corr[source]["timeCorr"] # type: ignore
if corr_to_apply:
de_idx = np.where(signal.time_corr_arr == 1)[0]
if de_idx.size > 0: # Contains data events
first_event = de_idx[0]
signal.time[:first_event] = signal.time[:first_event] + corr_to_apply
else:
signal.time = signal.time + corr_to_apply
if self.summary_stats and overlap_idx.size > 0:
if signal.value.size > 0:
self.summary_stats.add_signal_stats(
signal.name,
"overlapped_points",
first_non_ol_idx,
source=signal.source,
)
def contains_group(self, group_name: str) -> bool:
"""
Check if the .mat file contains the given group.
"""
has_group = False
if group_name in self.keys():
if isinstance(self[group_name], h5py.Group):
has_group = True
return has_group
def list_vs(self) -> List[str]:
"""
Get the JUST the names of vital signals contained on the .mat file.
It doesn't return the value of the vital signs.
:return: <list[str]> A list with the vital signals' names contained
on the .mat file
"""
if not self.contains_group("vs"):
logging.warning(f"No BM vitalsign found on file {self.filename}.")
if self.summary_stats:
self.summary_stats.add_file_stats("missing_vs")
return []
return list(self["vs"].keys())
def list_wv(self) -> Dict[str, str]:
"""
Get the the names of waveform signals contained on the .mat file.
The format is : {wv_name: channel}, where `channel` is the input
channel where the the signal enters. If a channel contains
no waveform or contains multiple waveforms, it will be ignored.
:return: <Dict[str:str]> A dict with the wave form signals
contained on the .mat file, along with their input channel.
"""
wv_signals: Dict[str, str] = {}
if not self.contains_group("wv"):
logging.warning(f"No BM waveform found on file {self.filename}.")
if self.summary_stats:
self.summary_stats.add_file_stats("missing_wv")
return wv_signals
for ch_name in self["wv"].keys():
signal_name = self.get_wv_from_channel(ch_name)
if signal_name:
wv_signals[signal_name] = ch_name
return wv_signals
def format_data(self, data) -> np.ndarray:
"""
Format multidimensional data into 1D arrays.
:param data: <np.array> Data to be formatted
:return: <np.array> formatted data
"""
# Pseudo 1D data to 1D data
if data.shape[1] == 1: # Case [[0],[1]]
data = np.transpose(data)
if data.shape[0] == 1: # Case [[0, 1]]
data = data[0]
# 2D data unicode encoded to 1D decoded
if data.ndim == 2:
if data.shape[0] < data.shape[1]:
data = np.transpose(data)
data = self.decode_data(data)
return data
@staticmethod
def decode_data(data: np.ndarray) -> np.ndarray:
"""
Decodes data stored as unicode identifiers and returns a 1D array.
Example:
>>> data # 3D array with unicode codes for '0','.','2'
array([[48, 46, 50],
[48, 46, 50],
[48, 46, 50],
[48, 46, 50]])
>>> BedmasterReader.decode_data(data)
array([0.2, 0.2, 0.2, 0.2])
:param data: <np.ndarray> Data to decode
:return: <np.ndarray> decoded data
"""
def _decode(row):
row = "".join([chr(code) for code in row]).strip()
if row in ("X", "None"):
return np.nan
return row
data = np.apply_along_axis(_decode, 1, data)
try:
data = data.astype(float)
if all(x.is_integer() for x in data):
dtype = int # type: ignore
else:
dtype = float # type: ignore
except ValueError:
dtype = "S" # type: ignore
data = data.astype(dtype)
return data
def get_vs(self, signal_name: str) -> Optional[BedmasterSignal]:
"""
Get the corrected vs signal from the.mat file.
2. Applies corrections on the signal
3. Wraps the corrected signal and its metadata on a BedmasterDataObject
:param signal_name: <string> name of the signal
:return: <BedmasterSignal> wrapped corrected signal
"""
if signal_name not in self["vs"].keys():
raise ValueError(
f"In bedmaster_file {self.filename}, the signal {signal_name} "
"was not found.",
)
# Get values and time
values = self["vs"][signal_name][()]
if values.ndim == 2:
values = self.format_data(values)
if values.dtype.char == "S":
logging.warning(
f"{signal_name} on .mat file {self.filename}, has unexpected "
"string values.",
)
return None
if values.ndim >= 2:
raise ValueError(
f"Signal {signal_name} on file: {self.filename}. The values"
f"of the signal have higher dimension than expected (>1) after"
f"being formatted. The signal is probably in a bad format so it "
f"won't be written.",
)
time = np.transpose(self["vs_time_corrected"][signal_name]["res_vs"][:])[0]
# Get the occurrence of event 1 and 5
de_1 = self["vs_time_corrected"][signal_name]["data_event_1"]
de_5 = self["vs_time_corrected"][signal_name]["data_event_5"]
events = (de_1[:] | de_5[:]).astype(np.bool)
# Get scaling factor and units
if signal_name in self.scaling_and_units:
scaling_factor = self.scaling_and_units[signal_name]["scaling_factor"]
units = self.scaling_and_units[signal_name]["units"]
else:
scaling_factor = 1
units = "UNKNOWN"
# Samples per timespan
samples_per_ts = np.array([1] * len(time))
signal = BedmasterSignal(
name=signal_name,
source="vitals",
channel=signal_name,
value=self._ensure_contiguous(values),
time=self._ensure_contiguous(time),
units=units,
sample_freq=np.array([(0.5, 0)], dtype="float,int"),
scale_factor=scaling_factor,
time_corr_arr=events,
samples_per_ts=self._ensure_contiguous(samples_per_ts),
)
# Apply inter-bundle correction
if self.interbundle_corr["vs"]:
self.apply_ibcorr(signal)
if signal.time.size == 0:
logging.info(
f"Signal {signal} on .mat file {self.filename} doesn't contain new "
f"information (only contains overlapped values from previous bundles). "
f"It won't be written.",
)
if self.summary_stats:
self.summary_stats.add_signal_stats(
signal.name,
"total_overlap_bundles",
source=signal.source,
)
return None
# Compress time_corr_arr
signal.time_corr_arr = np.packbits(
|
np.transpose(signal.time_corr_arr)
|
numpy.transpose
|
"""
factor analysis model in 'A Unifying Review of Linear Gaussian Models'.
"""
__docformat__ = 'restructuredtext'
from numpy import diag, identity
from numpy.linalg import qr, slogdet, solve
import numpy
symmetric_solve = solve
LOG2PI = numpy.log(2 * numpy.pi)
def log_likelihood_day(sigma, r):
from numpy.linalg import slogdet
tr_SiS = numpy.dot(r.T, symmetric_solve(sigma, r))
return -0.5 * (len(sigma) * LOG2PI +
|
slogdet(sigma)
|
numpy.linalg.slogdet
|
"""
"""
import glob
import os
from collections import defaultdict, Counter
from copy import deepcopy
from itertools import combinations
from shutil import copyfile
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import skew, kurtosis
from sklearn.decomposition import PCA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix, accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.multiclass import OneVsRestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
from sliced import SlicedInverseRegression
from ar.features.feature import _get_fft
from ar.utils.utils import make_confusion_matrix, load, dump, check_path, timer
keypoints = [
"nose",
"left_eye",
"right_eye",
"left_ear",
"right_ear",
"left_shoulder",
"right_shoulder",
"left_elbow",
"right_elbow",
"left_wrist",
"right_wrist",
"left_hip",
"right_hip",
"left_knee",
"right_knee",
"left_ankle",
"Right_ankle"]
def get_missclassified(cm, test_y, pred_y, test_raw_files, model_name, idx2label):
res = defaultdict(list)
# n, m = cm.shape
for i, (y_, y_2) in enumerate(zip(test_y, pred_y)):
if y_ != y_2:
# print(y_, y_2, test_raw_files[i])
in_file = os.path.relpath(test_raw_files[i], 'examples/classical_ml/out/keypoints3d-20210907/keypoints3d/')
# tmp_file = os.path.relpath(in_file, f'data/data-clean/refrigerator/{idx2label[y_]}')
out_file = f'examples/classical_ml/out/misclassified_users/{model_name}/{idx2label[y_]}->/{idx2label[y_2]}/{in_file}'[
:-4]
check_path(out_file)
try:
# copyfile(os.path.join('examples', in_file)[:-4], out_file)
copyfile(os.path.join('ar/features/videopose3d/out3d_pred', in_file)[:-4], out_file)
except Exception as e:
print(f'Error: {e}')
res[y_].append(test_raw_files[i])
res = sorted(res.items(), key=lambda x: x[0], reverse=False)
for vs in res:
label, lst = vs
print(f'label: {label}, misclassified_num: {len(lst)}, {lst}')
return
def get_fft_features(raw_data='', m=84, keypoint=7):
"""
Parameters
----------
npy_file
m:
without trimmming: m = 84
with trimming: m = 51
keypoint
coordinate
Returns
-------
"""
n = raw_data.shape[0]
raw_data = raw_data.reshape((n, 51))
res = []
for i in range(51):
data = raw_data[:, i]
# data = data.reshape((-1,))
flg = 'std1'
if flg == 'fft':
fft_features = _get_fft(data, fft_bin=m)
fft_features = fft_features[0:1 + int(np.ceil((m - 1) / 2))]
elif flg == 'std':
# fft_features = [np.mean(data), np.std(data)]
fft_features = [np.min(data), np.mean(data), np.std(data), np.max(data)]
# fft_features = list(np.quantile(data, q=[0, 0.25, 0.5, 0.75, 1]))
# fft_features = list(np.quantile(data, q = [0, 0.25, 0.5, 0.75, 1])) + [np.mean(data), np.std(data)]
elif flg == 'skew':
# fft_features = [np.min(data), np.max(data)]
# fft_features = [np.mean(data), np.std(data)]
# fft_features = [skew(data), kurtosis(data)]
# fft_features = [np.min(data), np.mean(data), np.std(data), np.max(data)]
fft_features = [np.mean(data), np.std(data), skew(data), kurtosis(data), np.min(data), np.max(data)]
else:
n = len(data)
step = int(np.ceil(n / m))
fft_features = []
for i in range(0, len(data), step):
vs = data[i:i + step]
flg2 = 'stats'
if flg2 == 'stats':
# tmp = list(np.quantile(vs, q = [0, 0.5, 1] )) # [0, 0.25, 0.5, 0.75, 1]+ [np.mean(vs), np.std(vs)]
# tmp = list(np.quantile(vs, q=[0, 0.5, 1]))
# tmp = [np.mean(vs), np.std(vs)]
tmp = [np.mean(vs), np.std(vs), skew(vs), kurtosis(vs), np.min(vs), np.max(vs)]
# tmp = [np.mean(vs)]
n_feat = len(tmp)
elif flg2:
tmp = _get_fft(vs)
tmp = sorted(tmp[0:1 + int(np.ceil((m - 1) / 2))], reverse=True)
n_feat = 2
fft_features.extend(tmp[:n_feat])
fft_features = fft_features + [0] * (n_feat * m - len(fft_features))
res.append(fft_features)
return np.asarray(res).reshape(-1, )
def _get_data(root_dir='examples/out/keypoints3d-20210907/keypoints3d/data', camera_type='_1.mp4', classes=[]):
dataset = []
users = []
activities = []
raw_files = []
for act in classes:
files = glob.glob(f'{root_dir}/data-clean/refrigerator/' + act + f'/*/*{camera_type}.npy')
# files = [f'{root_dir}/data-clean/refrigerator/take_out_item/4/take_out_item_2_1616179391_1.mp4.npy',
# f'{root_dir}/data-clean/refrigerator/take_out_item/4/take_out_item_2_1616179391_1.mp4.npy']
for file in files:
# print('Processing file', file)
user = int(file.split('/')[-2])
data = []
raw_data = np.load(file)
# for keypoint_idx, _ in enumerate(keypoints):
# tmp = get_fft_features(raw_data, keypoint=keypoint_idx)
# # data = special_keypoints(np.load(file))
# data.extend(list(tmp))
data = get_fft_features(raw_data)
data = np.asarray(data)
dataset.append(data)
users.append(user)
activities.append(classes.index(act))
raw_files.append(file)
print(act, len(files))
return np.array(dataset), np.array(users), np.array(activities), raw_files
def pca_plot(dataset, activities, classes, title):
from sklearn.decomposition import PCA
pca = PCA(n_components=2)
X = pca.fit_transform(dataset)
for i in range(5):
plt.scatter(X[activities == i, 0], X[activities == i, 1])
plt.title(title)
plt.legend(classes)
plt.show()
def pca_plot1(dataset, activities, classes, users, title):
nrows = 3
ncols = 3
fig, axes = plt.subplots(nrows, ncols, figsize=(20, 15)) # w, h
for i in sorted(np.unique(users)):
q, r = np.divmod(i - 1, ncols)
g = axes[q, r]
from sklearn.decomposition import PCA
pca = PCA(n_components=2)
X_new = pca.fit_transform(dataset)
X_new = X_new[users == i]
act_new = activities[users == i]
for act_idx in range(5):
g.scatter(X_new[act_new == act_idx, 0], X_new[act_new == act_idx, 1])
# g.legend(loc='upper left', bbox_to_anchor=(1.05, 1.0), ncol=2, borderaxespad=0,
# fancybox=False, shadow=False, fontsize=8, title='classes')
handles, labels = g.get_legend_handles_labels()
# fig.legend(handles, labels, loc='upper center')
g.legend(handles, labels, loc='center left', bbox_to_anchor=(1.25, 1), ncol=1, borderaxespad=0.)
g.set_title(f'user_{i}')
g.set_ylim([0, 40])
fig.suptitle(f'each user\'s data: {title}')
plt.tight_layout() # takes too much time
# out_file = f'{coordinate}-{color}-{title}.png'
# plt.savefig(out_file, bbox_inches='tight', dpi=300)
# print(out_file)
plt.show() # takes too much time
def plot_3dkeypoints(X, y, random_state=42, title=''):
N, D = X.shape
y_label = [idx2label[v] for v in y]
df = pd.DataFrame(np.concatenate([X, np.reshape(y, (-1, 1)), np.reshape(y_label, (-1, 1))], axis=1),
columns=[f'x{i + 1}' for i in range(X.shape[1])] + ['y', 'y_label'])
df = df.astype({"x1": float, "x2": float, 'y': int, 'y_label': str})
for i_coord, (coordinate, color) in enumerate(zip(['x', 'y', 'z'], ['r', 'g', 'b'])):
# only plot the red channel values for each point(R, G, B)
nrows = 4
# using the variable axs for multiple Axes
ncols = 5 # D // (17 * 3)
print(ncols)
fig, axes = plt.subplots(nrows, ncols, figsize=(20, 15)) # w, h
for i in range(0, 17): # 17 3d_keypoints
# the ith keypoint
# df_point = df[:, [(j, j+1, j+2) for j in range(0, 255, 17*3)]] # (j, j+1, j+2): (R, G, B)
df_point = df[df.columns[[j + (i * 3) + i_coord for j in range(0, D, 17 * 3)]]] # R
# df_point.shape (1000, 5) # 1000 is the number of videos, 5 is the number of timestamps
q, r = divmod(i, ncols)
if q >= nrows: break
g = axes[q, r]
for i_video, lab_ in enumerate(y_label):
# plt.plot(x, y)
g.plot(range(0, D // (17 * 3)), [float(v) for v in df_point.iloc[i_video].values],
linestyle='', marker='*', color=label2color[lab_])
# break
if r == 0:
g.set_ylabel(f'{coordinate} coordinate')
if q == 4 - 1:
g.set_xlabel('feature (aggregated on frames)')
g.set_title(f'{i + 1}th keypoint')
print(f'{i}, axes[{q}, {r}]: {g}')
fig.suptitle(f'{title}: {coordinate} coordinate.')
plt.tight_layout() # takes too much time
out_file = f'{coordinate}-{color}-{title}.png'
plt.savefig(out_file, bbox_inches='tight', dpi=300)
print(out_file)
plt.show() # takes too much time
# ### FacetGrid
# grid = sns.FacetGrid(df, col="y_label", hue="y_label", hue_order=list(sorted(set(y_label))), col_wrap=3)
# grid.map(sns.scatterplot, "x1", "x2", s=100, alpha=0.3)
# grid.add_legend()
# plt.show()
def pca_plot2(X, y, classes, title):
from sklearn.decomposition import PCA
pca = PCA(n_components=2)
X = pca.fit_transform(X)
for i in range(len(classes)):
plt.scatter(X[y == i, 0], X[y == i, 1])
plt.title(title)
plt.legend(classes)
plt.show()
def split_train_test_users(history, data_type='', test_users_list=[]):
# test_users_list = [1, 3]
if data_type in ['_1.mp4', '_2.mkv', '_3.mp4']:
dataset, users, activities, raw_files = history[data_type]
# # train_x, test_x, train_y, test_y = train_test_split(dataset, users, test_size=0.3, random_state=42)
# train_x, test_x, train_y, test_y, train_raw_files, test_raw_files = train_test_split(dataset, activities,
# raw_files, test_size=0.3,
# random_state=42)
train_x = []
train_y = []
train_users = []
train_raw_files = []
test_x = []
test_y = []
test_users = []
test_raw_files = []
cameras = [[dataset, users, activities, raw_files]]
for camera in cameras:
for (d, u, a, r) in zip(*camera):
if u in test_users_list:
test_x.append(d)
test_y.append(a)
test_users.append(u)
test_raw_files.append(r)
else:
train_x.append(d)
train_y.append(a)
train_users.append(u)
train_raw_files.append(r)
train_x = np.asarray(train_x)
train_y =
|
np.asarray(train_y)
|
numpy.asarray
|
#!/usr/bin/env python
import builtins
import operator
import warnings
from .duckprint import (duck_str, duck_repr, duck_array2string, typelessdata,
default_duckprint_options, default_duckprint_formatters, FormatDispatcher)
from .common import (is_ndducktype, is_ndscalar, is_ndarr, is_ndtype,
new_ducktype_implementation, ducktype_link, get_duck_cls, as_duck_cls)
from .ndarray_api_mixin import NDArrayAPIMixin
import numpy as np
from numpy import newaxis
import numpy.core.umath as umath
from numpy.lib.mixins import NDArrayOperatorsMixin
from numpy.lib.function_base import _quantile_is_valid
import numpy.core.numerictypes as ntypes
from numpy.core.multiarray import (normalize_axis_index,
interp as compiled_interp, interp_complex as compiled_interp_complex)
from numpy.lib.stride_tricks import _broadcast_shape
from numpy.core.numeric import normalize_axis_tuple
class MaskedOperatorMixin(NDArrayOperatorsMixin):
# shared implementations for MaskedArray, MaskedScalar
# override the NDArrayOperatorsMixin implementations for cmp ops, as
# currently those don't work for flexible types.
def _cmp_op(self, other, op):
if other is X:
db, mb = self._data.dtype.type(0), np.bool_(True)
else:
db, mb = getdata(other), getmask(other)
cls = get_duck_cls(self, other)
data = op(self._data, db)
mask = self._mask | mb
return maskedarray_or_scalar(data, mask, cls=cls)
def __lt__(self, other):
return self._cmp_op(other, operator.lt)
def __le__(self, other):
return self._cmp_op(other, operator.le)
def __eq__(self, other):
return self._cmp_op(other, operator.eq)
def __ne__(self, other):
return self._cmp_op(other, operator.ne)
def __gt__(self, other):
return self._cmp_op(other, operator.gt)
def __ge__(self, other):
return self._cmp_op(other, operator.ge)
def __complex__(self):
raise TypeError("Use .filled() before converting to non-masked scalar")
def __int__(self):
raise TypeError("Use .filled() before converting to non-masked scalar")
def __float__(self):
raise TypeError("Use .filled() before converting to non-masked scalar")
def __index__(self):
raise TypeError("Use .filled() before converting to non-masked scalar")
def __array_function__(self, func, types, arg, kwarg):
impl, check_args = implements.handled_functions.get(func, (None, None))
if impl is None or not check_args(arg, kwarg, types, self.known_types):
return NotImplemented
return impl(*arg, **kwarg)
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
if ufunc not in _masked_ufuncs:
return NotImplemented
return getattr(_masked_ufuncs[ufunc], method)(*inputs, **kwargs)
def _get_fill_value(self, fill_value, minmax):
if minmax is not None:
if fill_value != np._NoValue:
raise Exception("Do not give fill_value if providing minmax")
if minmax == 'max':
fill_value = _maxvals[self.dtype]
elif minmax == 'maxnan':
if issubclass(self.dtype.type, np.inexact):
# some functions, eg np.sort, treat nan as largest
fill_value = np.nan
else:
fill_value = _maxvals[self.dtype]
elif minmax == 'min':
fill_value = _minvals[self.dtype]
else:
raise ValueError("minmax should be 'min' or 'max'")
if fill_value is None:
raise ValueError("minmax not supported for dtype {}".format(
self.dtype))
elif fill_value is np._NoValue:
# default is 0 for all types (*not* np.nan for inexact)
fill_value = 0
return fill_value
@property
def flat(self):
return MaskedIterator(self)
def duck_require(data, dtype=None, ndmin=0, copy=True, order='K'):
"""
Return an ndarray-like that satisfies requirements.
Returns a view if possible.
Parameters
----------
data : array-like
Must be an ndarray or ndarray ducktype.
dtype : numpy datatype
Datatype to convert to
ndmin : integer
Same as 'ndmin' argument of np.array
copy : bool
Whether to guarantee a copy is made
order : one of 'K', 'F', 'C', 'A'
Same as 'order' argument of np.array
"""
# we must use only properties that work for ndarray ducktypes.
# This rules out using np.require
newdtype = dtype if dtype is not None else data.dtype
if copy or (newdtype != data.dtype):
data = data.astype(newdtype, order=order)
if order != 'K' and order is not None:
warnings.warn('order parameter of MaskedArray is ignored')
if ndmin != 0 and data.ndim < ndmin:
nd = ndmin - data.ndim
data = data[(None,)*nd + (Ellipsis,)]
return data
def asarr(v, **kwarg):
if is_ndarr(v):
return duck_require(v, **kwarg)
else: # must be ndscalar
if is_ndducktype(v):
# convert to duck-array class using our ducktype conventions
return get_duck_cls(v)(v, **kwarg)
else: # usually, np.generic type
return np.array(v, **kwarg)
class MaskedArray(MaskedOperatorMixin, NDArrayAPIMixin):
"An ndarray ducktype allowing array elements to be masked"
def __init__(self, data, mask=None, dtype=None, copy=False,
order=None, subok=False, ndmin=0):
"""
Constructs a MaskedArray given data and optional mask.
Parameters
----------
data : array-like
Any object following the numpy ducktype api or convertible to an
ndarray, but also allowing the masked signifier `X` to mark masked
elements. See Notes below.
mask : array-like
Any object convertible to a boolean `ndarray` of the same
shape as data, where true elements are masked. If omitted, defaults
to all `False`. See Notes below.
dtype : data-type, optional
The desired data-type for the array. See `np.array` argument.
copy : bool, optional
If false (default), the MaskedArray will view the data and mask
if they are ndarrays with the right properties. Otherwise
a they will be copied.
order : {'K', 'A', 'C', 'F'}, optional
Memory layout of the array. See `np.array` argument. This affects
both the data and mask.
ndmin : int, optional
Specifies the minimum number of dimensions the resulting array
should have. See `np.array` argument.
Returns
-------
out : MaskedArray
The resulting MaskedArray.
Notes
-----
This MaskedArray constructor supports a few different ways to mark
masked elements, which are sometimes exclusive.
First, `data` may be a MaskedArray, in which case `mask` should not
be supplied.
If `mask` is not supplied, then masked elements may be marked in the
`data` using the masked input element `X`. That is, `data` can be a
list-of-lists containing numerical scalars and `ndarray`s,
similar to that accepted by `np.array`, but additionally allowing
some elements to be replaced with `X`. The dtype will be inferred
based on the converted dtype of the non-masked elements. If all
elements are `X`, the `dtype` argument of `MaskedArray` must be
supplied:
>>> a = MaskedArray([[1, X, 3], np.arange(3)])
>>> b = MaskedArray([X, X, X], dtype='f8')
If `mask` is supplied, `X` should not be used in the `data. `mask`
should be any object convertible to bool datatype and broadcastable
to the shape of the data. If `mask` is already a bool ndarray
of the same shape as `data`, it will be viewed, otherwise it will
be copied.
"""
if isinstance(data, MaskedScalar):
self.__init__(data._data, data._mask, dtype=data.dtype,
order=order, ndmin=ndmin)
return
elif isinstance(data, MaskedArray):
self._mask = duck_require(data._mask, copy=copy, order=order,
ndmin=ndmin)
if mask is not None:
self._data = duck_require(data._data, copy=True, order=order,
ndmin=ndmin)
mask = np.array(mask, dtype=bool, copy=False)
self._mask |= np.broadcast_to(mask, self._data.shape)
else:
self._data = duck_require(data._data, copy=copy, order=order,
ndmin=ndmin)
return
elif data is X and mask is None:
# 0d masked array
if dtype is None:
raise ValueError("must supply dtype if all elements are X")
self._data = np.array(dtype.type(0))
self._mask = np.array(True)
return
# Otherwise got non-masked type, we convert data/mask to MaskedArray:
if mask is None:
# if mask is None, user can put X in the data.
# Otherwise, X will cause some kind of error in np.array below
data, mask, _ = replace_X(data, dtype=dtype)
# replace_X sometimes uses broadcast_to, which returns a
# readonly array with funny strides. Make writeable if so,
# since we will end up in the is_ndducktype code-path below.
if (isinstance(mask, np.ndarray) and
mask.flags['WRITEABLE'] == False):
mask = mask.copy()
self._data = asarr(data, dtype=dtype, copy=copy,order=order,ndmin=ndmin)
if mask is None:
self._mask = np.zeros(self._data.shape, dtype='bool', order=order)
elif is_ndtype(mask):
self._mask = asarr(mask, dtype=np.bool_, copy=copy, order=order)
if self._mask.shape != self._data.shape:
self._mask = np.broadcast_to(self._mask,self._data.shape).copy()
else:
self._mask = np.empty(self._data.shape, dtype='bool')
self._mask[...] = np.broadcast_to(mask, self._data.shape)
@classmethod
def __nd_duckprint_dispatcher__(cls):
return masked_dispatcher
def __str__(self):
return duck_str(self)
def __repr__(self):
return duck_repr(self, showdtype=self._mask.all())
def __getitem__(self, ind):
if is_string_or_list_of_strings(ind):
# for viewing fields of structured arrays, return readonly view.
# (see .real/.imag discussion in user guide)
ret = self._data[ind]
ret.flags['WRITEABLE'] = False
return type(self)(ret, self._mask)
if not isinstance(ind, tuple):
ind = (ind,)
# If a boolean MaskedArray is provided as an ind, treat masked vals as
# False. Allows code like "a[a>0]", which is then the same as
# "a[np.nonzero(a>0)]"
ind = tuple(i.filled(False, view=1) if
(isinstance(i, MaskedArray) and i.dtype.type is np.bool_)
else i for i in ind)
# TODO: Possible future improvement would be to support masked
# integer arrays as indices. Then marr[boolmask] should behave
# the same as marr[where(boolmask)], i.e. masked indices are
# ignored.
data = self._data[ind]
mask = self._mask[ind]
if is_ndscalar(mask): # test mask not data, to account for obj arrays
return type(self)._scalartype(data, mask, dtype=self.dtype)
return type(self)(data, mask, dtype=self.dtype)
def __setitem__(self, ind, val):
if not self.flags.writeable:
raise ValueError("assignment destination is read-only")
if self.dtype.names and is_string_or_list_of_strings(ind):
raise ValueError("Cannot assign to fields of a Masked structured "
"array")
if not isinstance(ind, tuple):
ind = (ind,)
# If a boolean MaskedArray is provided as an ind, treat masked vals as
# False. Allows code like "a[a>0] = X"
ind = tuple(i.filled(False, view=1) if
(isinstance(i, MaskedArray) and i.dtype.type is np.bool_)
else i for i in ind)
if val is X:
self._mask[ind] = True
elif isinstance(val, (MaskedArray, MaskedScalar)):
self._data[ind] = val._data
self._mask[ind] = val._mask
else:
self._data[ind] = val
self._mask[ind] = False
def __len__(self):
return len(self._data)
@property
def shape(self):
return self._data.shape
@shape.setter
def shape(self, shp):
self._data.shape = shp
self._mask.shape = shp
@property
def dtype(self):
return self._data.dtype
@dtype.setter
def dtype(self, dt):
dt = np.dtype(dt)
if self._data.dtype.itemsize != dt.itemsize:
raise ValueError("views of MaskedArrays cannot change the "
"datatype's itemsize")
self._data.dtype = dt
@property
def flags(self):
return self._data.flags
@property
def strides(self):
return self._data.strides
@property
def mask(self):
# return a readonly view of mask
m = self._mask.view()
m.flags['WRITEABLE'] = False
return m
def view(self, dtype=None, type=None):
if type is not None:
raise ValueError("subclasses not yet supported")
if dtype is None:
dtype = self.dtype
else:
try:
dtype = np.dtype(dtype)
except ValueError:
raise ValueError("dtype must be a dtype, not subclass")
if dtype.itemsize != self.itemsize:
raise ValueError("views of MaskedArrays cannot change the "
"datatype's itemsize")
return type(self)(self._data.view(dtype), self._mask)
def astype(self, dtype, order='K', casting='unsafe', subok=True, copy=True):
result_data = self._data.astype(dtype, order, casting, subok, copy)
# force a copy of mask if data was copied
if copy == False and result_data is not self:
copy = True
result_mask = self._mask.astype(bool, order, casting, subok, copy)
return type(self)(result_data, result_mask)
def tolist(self):
return [x.tolist() for x in self]
def filled(self, fill_value=np._NoValue, minmax=None, view=False):
"""
Parameters
==========
fill_value : scalar, optional
value to put in masked positions of the array. Defaults to 0
if minmax is not provided.
minmax : string 'min', 'max' or 'maxnan', optional
If 'min', fill masked elements with the minimum value for this
array's datatype. If 'max', fill with maximum value for this
datatype. If 'maxnan', fill with nan if a floating type, otherwise
same as 'max'.
view : boolean, optional
If True, then the returned array is a view of the underlying data
array rather than a copy (optimization). Be careful, as subsequent
actions on the maskedarray can put nonsense data in the view.
If the array is writeonly, this option is ignored and a copy is
always returned.
Returns
=======
data : ndarray
Returns a copy of this MaskedArray with masked elements replaced
by the fill value. (or a view of view=True).
"""
if view and self._data.flags['WRITEABLE']:
d = self._data.view()
d[self._mask] = self._get_fill_value(fill_value, minmax)
d.flags['WRITEABLE'] = False
return d
d = self._data.copy(order='K')
d[self._mask] = self._get_fill_value(fill_value, minmax)
return d
def count(self, axis=None, keepdims=False):
"""
Count the non-masked elements of the array along the given axis.
Parameters
----------
axis : None or int or tuple of ints, optional
Axis or axes along which the count is performed.
The default (`axis` = `None`) is perform the count sum over all
the dimensions of the input array. `axis` may be negative, in
which case it counts from the last to the first axis.
If this is a tuple of ints, the count is performed on multiple
axes, instead of a single axis or all the axes as before.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the array.
Returns
-------
result : ndarray or scalar
An array with the same shape as self, with the specified
axis removed. If self is a 0-d array, or if `axis` is None, a scalar
is returned.
See Also
--------
count_masked : Count masked elements in array or along a given axis.
Examples
--------
>>> import numpy.ma as ma
>>> a = ma.arange(6).reshape((2, 3))
>>> a[1, :] = ma.X
>>> a
masked_array(data =
[[0 1 2]
[-- -- --]],
mask =
[[False False False]
[ True True True]],
fill_value = 999999)
>>> a.count()
3
When the `axis` keyword is specified an array of appropriate size is
returned.
>>> a.count(axis=0)
array([1, 1, 1])
>>> a.count(axis=1)
array([3, 0])
"""
return (~self._mask).sum(axis=axis, dtype=np.intp, keepdims=keepdims)
# This works inplace, unlike np.sort
def sort(self, axis=-1, kind='quicksort', order=None):
# Note: See comment in np.sort impl below for trick used here.
# This is the inplace version
self._data[self._mask] = _maxvals[self.dtype]
self._data.sort(axis, kind, order)
self._mask.sort(axis, kind)
# This works inplace, unlike np.resize, and fills with repeat instead of 0
def resize(self, new_shape, refcheck=True):
self._data.resize(new_shape, refcheck)
self._mask.resize(new_shape, refcheck)
class MaskedScalar(MaskedOperatorMixin, NDArrayAPIMixin):
"An ndarray scalar ducktype allowing the value to be masked"
def __init__(self, data, mask=None, dtype=None):
"""
Construct masked scalar given a data value and mask value.
Parameters
----------
data : numpy scalar, MaskedScalar, or X
The value of the scalar. If `X` is given, `dtype` must be supplied.
mask : bool
If true, the scalar is masked. Default is false.
dtype : numpy dtype
dtype to convert to the data to
Notes
-----
To construct a masked MaskedScalar of a certain dtype, it may be
preferrable to use ``X(dtype)``.
If `data` is a MaskedScalar, do not supply a `mask`.
"""
if isinstance(data, MaskedScalar):
self._data = data._data
self._mask = data._mask
if mask is not None:
raise ValueError("don't use mask if passing a maskedscalar")
self._dtype = self._data.dtype
return
elif data is X:
if dtype is None:
raise ValueError("Must supply dtype when data is X")
if mask is not None:
raise ValueError("don't supply mask when data is X")
self._data = np.dtype(dtype).type(0)
self._mask = np.bool_(True)
self._dtype = self._data.dtype
return
# Otherwise, convert data/mask to MaskedScalar:
if dtype is not None:
dtype = np.dtype(dtype)
if dtype is None or dtype.type is not np.object_:
if is_ndtype(data):
if dtype is not None and data.dtype != dtype:
data = data.astype(dtype, copy=False)[()]
if not is_ndscalar(data):
data = data[()]
self._data = data
else:
# next line is more complicated than desired due to struct
# types, which numpy does not have a constructor for
# convert to scalar
self._data = np.array(data, dtype=dtype)[()]
self._mask = np.bool_(mask)
self._dtype = self._data.dtype
else:
# object dtype treated specially
self._data = data
self._mask = np.bool_(mask)
self._dtype = dtype
@property
def shape(self):
return ()
@property
def dtype(self):
return self._dtype
def __getitem__(self, ind):
if (self.dtype.names and is_string_or_list_of_strings(ind) or
isinstance(ind, int)):
# like structured scalars, support string indexing and int indexing
data = self._data[ind]
mask = self._mask
return type(self)(data, mask)
if ind == ():
return self
if ind == Ellipsis or ind == (Ellipsis,):
return MaskedArray(self)
raise IndexError("invalid index to scalar variable.")
def __setitem__(self, ind, val):
# non-masked structured scalars normally allow assignment (eg, to
# individual fields), but here we disallow *all* assignment, because of
# ambiguity about what to do with mask. See discussion of .real/.imag
raise ValueError("assignment destination is read-only")
def __str__(self):
if self._mask:
return MASK_STR
return str(self._data)
def __repr__(self):
if self._mask:
return "X({})".format(str(self.dtype))
if self.dtype.type in typelessdata and self.dtype.names is None:
dtstr = ''
else:
dtstr = ', dtype={}'.format(str(self.dtype))
return "MaskedScalar({}{})".format(repr(self._data), dtstr)
def __format__(self, format_spec):
if self._mask:
return 'X'
return format(self._data, format_spec)
def __bool__(self):
if self._mask:
return False
return bool(self._data)
def __hash__(self):
if self._mask:
return 0
return hash(self._data)
def astype(self, dtype, order='K', casting='unsafe', subok=True, copy=True):
result_data = self._data.astype(dtype, order, casting, subok, copy)
return MaskedScalar(result_data, self._mask)
def tolist(self):
if self._mask:
return self
return self._data.item()
@property
def mask(self):
return self._mask
def filled(self, fill_value=np._NoValue, minmax=None, view=False):
# view is ignored
fill_value = self._get_fill_value(fill_value, minmax)
if self._mask:
if self.dtype.names:
# next line is more complicated than desired due to struct
# types, which numpy does not have a constructor for
return np.array(fill_value, dtype=self.dtype)[()]
return type(self._data)(fill_value)
return self._data
def count(self, axis=None, keepdims=False):
return 0 if self._mask else 1
# create a special dummy object which signifies "masked", which users can put
# in lists to pass to MaskedArray constructor, or can assign to elements of
# a MaskedArray, to set the mask.
class MaskedX:
def __repr__(self):
return 'masked_input_X'
def __str__(self):
return 'masked_input_X'
# as a convenience, can make this typed by calling with a dtype
def __call__(self, dtype):
return MaskedScalar(0, True, dtype=dtype)
# prevent X from being used as an element in np.array, to avoid
# confusing the user. X should only be used in MaskedArrays
def __array__(self):
# hack: the only Exception that numpy doesn't clear here is MemoryError
raise MemoryError("Masked X should only be used in "
"MaskedArray assignment or construction")
masked = X = MaskedX()
ducktype_link(MaskedArray, MaskedScalar, (MaskedX,))
def replace_X(data, dtype=None):
"""
takes array-like input, replaces masked value by 0 and return filled data &
mask. This is more-or-less a reimplementation of PyArray_DTypeFromObject to
account for masked values
Parameters
==========
data : nested tuple.list of ndarrays/MaskedArrays/X
dtype : dtype to force for output
Returns
=======
data : ndarray (or duck)
The data array of the combined inputs
mask : ndarray (or duck)
The mask array of the combined inputs
cls : type
The most derived MaskedArray subtype seen in the inputs
"""
if isinstance(data, (list, tuple)) and len(data) == 0:
return data, [], MaskedArray
# we do two passes: First we figure out the output dtype, then we replace
# all masked values by the filler "type(0)".
def get_dtype(data, cur_dtype=X):
if isinstance(data, (list, tuple)):
dtypes = (get_dtype(d, cur_dtype) for d in data)
dtypes = [dt for dt in dtypes if dt is not X]
if not dtypes:
return cur_dtype
out_dtype = np.result_type(*dtypes)
if cur_dtype is X:
return out_dtype
else:
return np.promote_types(out_dtype, cur_dtype)
if data is X:
return X
if is_ndtype(data):
return data.dtype
# otherwise try to coerce it to an ndarray (accounts for __array__,
# __array_interface__ implementors)
return np.array(data).dtype
if dtype is None:
dtype = get_dtype(data)
if dtype is X:
raise ValueError("must supply dtype if all elements are X")
else:
dtype = np.dtype(dtype)
fill = dtype.type(0)
cls = MaskedArray
def replace(data):
nonlocal cls
if data is X:
return fill, True
if isinstance(data, (MaskedScalar, MaskedArray)):
# whenever we come across a Masked* subtype, update cls
cls = get_duck_cls(cls, data)
return data._data, data._mask
if isinstance(data, list):
return (list(x) for x in zip(*(replace(d) for d in data)))
if is_ndtype(data):
return data, np.broadcast_to(False, data.shape)
# otherwise assume it is some kind of scalar
return data, False
out_dat, out_mask = replace(data)
return out_dat, out_mask, cls
# used by marr.flat
class MaskedIterator:
def __init__(self, ma):
self.dataiter = ma._data.flat
self.maskiter = ma._mask.flat
def __iter__(self):
return self
def __getitem__(self, indx):
data = self.dataiter.__getitem__(indx)
mask = self.maskiter.__getitem__(indx)
return maskedarray_or_scalar(data, mask, cls=type(self))
def __setitem__(self, index, value):
if value is X or (isinstance(value, MaskedScalar) and value.mask):
self.maskiter[index] = True
else:
self.dataiter[index] = getdata(value)
self.maskiter[index] = getmask(value)
def __next__(self):
return maskedarray_or_scalar(next(self.dataiter), next(self.maskiter),
cls=type(self))
next = __next__
_minvals = ntypes._minvals
_minvals.update([(k, -np.inf) for k in [np.float16, np.float32, np.float64]])
_maxvals = ntypes._maxvals
_maxvals.update([(k, +np.inf) for k in [np.float16, np.float32, np.float64]])
if 'float128' in ntypes.typeDict:
_minvals.update([(np.float128, -np.inf)])
_maxvals.update([(np.float128, +np.inf)])
def is_string_or_list_of_strings(val):
if isinstance(val, str):
return True
if not isinstance(val, list):
return False
for v in val:
if not isinstance(v, str):
return False
return True
################################################################################
# Printing setup
################################################################################
def as_masked_fmt(formattercls):
# we subclass the original formatter class, and wrap the result of
# `get_format_func` to take care of masked values.
class MaskedFormatter(formattercls):
def get_format_func(self, elem, **options):
if not elem._mask.any():
default_fmt = super().get_format_func(elem._data, **options)
return lambda x: default_fmt(x._data)
masked_str = options['masked_str']
# only get fmt_func based on non-masked values
# (we take care of masked elements ourselves)
unmasked = elem._data[~elem._mask]
if unmasked.size == 0:
default_fmt = lambda x: ''
reslen = len(masked_str)
else:
default_fmt = super().get_format_func(unmasked, **options)
# default_fmt should always give back same str length.
# Figure out what this is with a test call.
# This is a bit complicated to account for struct types.
example_elem = elem._data.ravel()[0]
example_str = default_fmt(example_elem)
reslen = builtins.max(len(example_str), len(masked_str))
# pad the columns to align when including the masked string
if issubclass(elem.dtype.type, np.floating) and unmasked.size > 0:
# for floats, try to align with decimal point if present
frac = example_str.partition('.')
nfrac = len(frac[1]) + len(frac[2])
masked_str = (masked_str + ' '*nfrac).rjust(reslen)
# Would it be safer/better to simply center the X?
else:
masked_str = masked_str.rjust(reslen)
def fmt(x):
if x._mask:
return masked_str
return default_fmt(x._data).rjust(reslen)
return fmt
return MaskedFormatter
MASK_STR = 'X'
masked_formatters = [as_masked_fmt(f) for f in default_duckprint_formatters]
default_options = default_duckprint_options.copy()
default_options['masked_str'] = MASK_STR
masked_dispatcher = FormatDispatcher(masked_formatters, default_options)
################################################################################
# Ufunc setup
################################################################################
_masked_ufuncs = {}
class _Masked_UFunc:
def __init__(self, ufunc):
self.f = ufunc
self.__doc__ = ufunc.__doc__
self.__name__ = ufunc.__name__
def __str__(self):
return "Masked version of {}".format(self.f)
def getdata(a):
if isinstance(a, (MaskedArray, MaskedScalar)):
return a._data
return a
def getmask(a):
if isinstance(a, (MaskedArray, MaskedScalar)):
return a._mask
return False
class _Masked_UniOp(_Masked_UFunc):
"""
Masked version of unary ufunc. Assumes 1 output.
Parameters
----------
ufunc : ufunc
The ufunc for which to define a masked version.
"""
def __init__(self, ufunc):
super().__init__(ufunc)
def __call__(self, a, *args, **kwargs):
if a is X:
raise ValueError("must supply dtype if all inputs are X")
a = as_duck_cls(a, base=MaskedArray)
out = kwargs.get('out', ())
if not isinstance(out, tuple):
out = (out,)
if out:
if not isinstance(out[0], MaskedArray):
raise ValueError("out must be a MaskedArray")
kwargs['out'] = (out[0]._data,)
d, m = a._data, a._mask
where = ~m
kwhere = kwargs.get('where', None)
if isinstance(kwhere, (MaskedArray, MaskedScalar)):
if kwhere.dtype.type != np.bool_:
raise ValueError("'where' only supports masks for boolean "
"dtype")
kwhere = kwhere.filled(False)
if kwhere is not None:
where &= kwhere
kwargs['where'] = where
result = self.f(d, *args, **kwargs)
if out != ():
out[0]._mask[...] = m
return out[0]
cls = get_duck_cls(a, base=MaskedArray)
if is_ndscalar(result):
return type(a)._scalartype(result, m)
return type(a)(result, m)
class _Masked_BinOp(_Masked_UFunc):
"""
Masked version of binary ufunc. Assumes 1 output.
Parameters
----------
ufunc : ufunc
The ufunc for which to define a masked version.
reduce_fill : function or scalar, optional
Determines what fill_value is used during reductions. If a function is
supplied, it shoud accept a dtype as argument and return a fill value
with that dtype. A scalar value may also be supplied, which is used
for all dtypes of the ufunc.
"""
def __init__(self, ufunc, reduce_fill=None):
super().__init__(ufunc)
if reduce_fill is None:
reduce_fill = ufunc.identity
if (reduce_fill is not None and
(is_ndscalar(reduce_fill) or not callable(reduce_fill))):
self.reduce_fill = lambda dtype: reduce_fill
else:
self.reduce_fill = reduce_fill
def __call__(self, a, b, **kwargs):
# treat X as a masked value of the other array's dtype
if a is X:
a = X(b.dtype)
if b is X:
b = X(a.dtype)
a, b = as_duck_cls(a, b, base=MaskedArray)
da, db = a._data, b._data
ma, mb = a._mask, b._mask
mkwargs = {}
for k in ['where', 'order']:
if k in kwargs:
mkwargs[k] = kwargs[k]
out = kwargs.get('out', ())
if not isinstance(out, tuple):
out = (out,)
if out:
if not isinstance(out[0], MaskedArray):
raise ValueError("out must be a MaskedArray")
kwargs['out'] = (out[0]._data,)
mkwargs['out'] = (out[0]._mask,)
m = np.logical_or(ma, mb, **mkwargs)
where = ~m
kwhere = kwargs.get('where', None)
if isinstance(kwhere, (MaskedArray, MaskedScalar)):
if kwhere.dtype.type != np.bool_:
raise ValueError("'where' only supports masks for boolean "
"dtype")
kwhere = kwhere.filled(False)
if kwhere is not None:
where &= kwhere
kwargs['where'] = where
result = self.f(da, db, **kwargs)
if out:
return out[0]
if is_ndscalar(result):
return type(a)._scalartype(result, m)
return type(a)(result, m)
def reduce(self, a, **kwargs):
if self.reduce_fill is None:
raise TypeError("reduce not supported for masked {}".format(self.f))
da, ma = getdata(a), getmask(a)
mkwargs = kwargs.copy()
for k in ['initial', 'dtype']:
if k in mkwargs:
del mkwargs[k]
out = kwargs.get('out', ())
if out:
if not isinstance(out[0], MaskedArray):
raise ValueError("out must be a MaskedArray")
kwargs['out'] = (out[0]._data,)
mkwargs['out'] = (out[0]._mask,)
initial = kwargs.get('initial', None)
if isinstance(initial, (MaskedScalar, MaskedX)):
raise ValueError("initial should not be masked")
if 0: # two different implementations, investigate performance
wheremask = ~ma
if 'where' in kwargs:
wheremask &= kwargs['where']
kwargs['where'] = wheremask
if 'initial' not in kwargs:
kwargs['initial'] = self.reduce_fill(da.dtype)
result = self.f.reduce(da, **kwargs)
m = np.logical_and.reduce(ma, **mkwargs)
else:
if not is_ndscalar(da):
da[ma] = self.reduce_fill(da.dtype)
# if da is a scalar, we get correct result no matter fill
result = self.f.reduce(da, **kwargs)
m = np.logical_and.reduce(ma, **mkwargs)
if out:
return out[0]
cls = get_duck_cls(a, base=MaskedArray)
if is_ndscalar(result):
return cls._scalartype(result, m)
return cls(result, m)
def accumulate(self, a, axis=0, dtype=None, out=None):
if self.reduce_fill is None:
raise TypeError("accumulate not supported for masked {}".format(
self.f))
da, ma = getdata(a), getmask(a)
dataout, maskout = None, None
if out:
if not isinstance(out[0], MaskedArray):
raise ValueError("out must be a MaskedArray")
dataout = out[0]._data
maskout = out[0]._mask
if not is_ndscalar(da):
da[ma] = self.reduce_fill(da.dtype)
result = self.f.accumulate(da, axis, dtype, dataout)
m = np.logical_and.accumulate(ma, axis, out=maskout)
if out:
return out[0]
if is_ndscalar(result):
return MaskedScalar(result, m)
return type(a)(result, m)
def outer(self, a, b, **kwargs):
if self.reduce_fill is None:
raise TypeError("outer not supported for masked {}".format(self.f))
da, db = getdata(a), getdata(b)
ma, mb = getmask(a), getmask(b)
# treat X as a masked value of the other array's dtype
if da is X:
da, ma = db.dtype.type(0), np.bool_(True)
if db is X:
db, mb = da.dtype.type(0), np.bool_(True)
mkwargs = kwargs.copy()
if 'dtype' in mkwargs:
del mkwargs['dtype']
out = kwargs.get('out', ())
if out:
if not isinstance(out[0], MaskedArray):
raise ValueError("out must be a MaskedArray")
kwargs['out'] = (out[0]._data,)
mkwargs['out'] = (out[0]._mask,)
if not is_ndscalar(da):
da[ma] = self.reduce_fill(da.dtype)
if not is_ndscalar(db):
db[mb] = self.reduce_fill(db.dtype)
result = self.f.outer(da, db, **kwargs)
m = np.logical_or.outer(ma, mb, **mkwargs)
if out:
return out[0]
if is_ndscalar(result):
return MaskedScalar(result, m)
return type(a)(result, m)
def reduceat(self, a, indices, **kwargs):
if self.reduce_fill is None:
raise TypeError("reduce not supported for masked {}".format(self.f))
da, ma = getdata(a), getmask(a)
mkwargs = kwargs.copy()
for k in ['initial', 'dtype']:
if k in mkwargs:
del mkwargs[k]
out = kwargs.get('out', ())
if out:
if not isinstance(out[0], MaskedArray):
raise ValueError("out must be a MaskedArray")
kwargs['out'] = (out[0]._data,)
mkwargs['out'] = (out[0]._mask,)
initial = kwargs.get('initial', None)
if isinstance(initial, (MaskedScalar, MaskedX)):
raise ValueError("initial should not be masked")
if not is_ndscalar(da):
da[ma] = self.reduce_fill(da.dtype)
# if da is a scalar, we get correct result no matter fill
result = self.f.reduceat(da, indices, **kwargs)
m = np.logical_and.reduceat(ma, indices, **mkwargs)
if out:
return out[0]
if is_ndscalar(result):
return MaskedScalar(result, m)
return type(a)(result, m)
def at(self, a, indices, b=None):
if isinstance(indices, (MaskedArray, MaskedScalar)):
raise ValueError("indices should not be masked. "
"Use .filled() first")
da, ma = getdata(a), getmask(a)
db, mb = None, None
if b is not None:
db, mb = getdata(b), getmask(b)
self.f.at(da, indices, db)
np.logical_or.at(ma, indices, mb)
def _add_ufunc(ufunc, uni=False, glob=globals(), **kwargs):
if uni:
impl = _Masked_UniOp(ufunc, **kwargs)
else:
impl = _Masked_BinOp(ufunc, **kwargs)
_masked_ufuncs[ufunc] = impl
glob[ufunc.__name__] = impl
# unary funcs
for ufunc in [umath.exp, umath.conjugate, umath.sin, umath.cos, umath.tan,
umath.arctan, umath.arcsinh, umath.sinh, umath.cosh,
umath.tanh, umath.absolute, umath.fabs, umath.negative,
umath.floor, umath.ceil, umath.logical_not, umath.isfinite,
umath.isinf, umath.isnan, umath.invert, umath.sqrt, umath.log,
umath.log2, umath.log10, umath.tan, umath.arcsin,
umath.arccos, umath.arccosh, umath.arctanh]:
_add_ufunc(ufunc, uni=True)
# binary ufuncs
for ufunc in [umath.add, umath.subtract, umath.multiply,
umath.arctan2, umath.hypot, umath.equal, umath.not_equal,
umath.less_equal, umath.greater_equal, umath.less,
umath.greater, umath.logical_and, umath.logical_or,
umath.logical_xor, umath.bitwise_and, umath.bitwise_or,
umath.bitwise_xor, umath.true_divide, umath.floor_divide,
umath.remainder, umath.fmod, umath.mod, umath.power]:
_add_ufunc(ufunc)
# fill value depends on dtype
_add_ufunc(umath.maximum, reduce_fill=lambda dt: _minvals[dt])
_add_ufunc(umath.minimum, reduce_fill=lambda dt: _maxvals[dt])
################################################################################
# __array_function__ setup
################################################################################
implements = new_ducktype_implementation()
def get_maskedout(out):
if out is not None:
if isinstance(out, MaskedArray):
return out._data, out._mask
raise Exception("out must be a masked array")
return None, None
def maskedarray_or_scalar(data, mask, out=None, cls=MaskedArray):
if out is not None:
return out
if is_ndscalar(data):
return cls._scalartype(data, mask)
return cls(data, mask)
def _copy_mask(mask, outmask=None):
if outmask is not None:
result_mask = outmask
result_mask[...] = mask
else:
result_mask = mask.copy()
return result_mask
def _inplace_not(v):
if isinstance(v, np.ndarray):
return np.logical_not(v, out=v)
return np.logical_not(v)
################################################################################
# npy-api implementations
################################################################################
@implements(np.all)
def all(a, axis=None, out=None, keepdims=np._NoValue):
a = as_duck_cls(a, base=MaskedArray)
# out can be maskedarray or ndarray since we never return masked elements
# (or.. should we only allow ndarray out?)
if isinstance(out, MaskedArray):
np.all(a.filled(True, view=1), axis, out._data, keepdims)
out._mask[...] = False
return out
return np.all(a.filled(True, view=1), axis, out, keepdims)
# Note: returns boolean, not MaskedArray. If case of fully masked,
# return True, like np.all([]).
@implements(np.any)
def any(a, axis=None, out=None, keepdims=np._NoValue):
a = as_duck_cls(a, base=MaskedArray)
if isinstance(out, MaskedArray):
np.any(a.filled(False, view=1), axis, out._data, keepdims)
out._mask[...] = False
return out
return np.any(a.filled(False, view=1), axis, out, keepdims)
# Note: returns boolean, not MaskedArray. If case of fully masked,
# return False, like np.any([])
@implements(np.amax)
@implements(np.max)
def max(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue,
where=True):
a = as_duck_cls(a, base=MaskedArray)
outdata, outmask = get_maskedout(out)
kwarg = {}
if keepdims is not np._NoValue:
kwarg['keepdims'] = keepdims
if where is not np._NoValue:
kwarg['where'] = where
initial_m = initial_d = np._NoValue
if initial is not np._NoValue:
ismasked = isinstance(initial, MaskedScalar)
if initial is X or ismasked and initial._mask:
raise ValueError("initial cannot be masked")
initial_m = False
initial_d = initial._data if ismasked else initial
filled = a.filled(minmax='min', view=1)
result_data = np.max(filled, axis, outdata, initial=initial_d, **kwarg)
result_mask = np.logical_and.reduce(a._mask, axis, out=outmask,
initial=initial_m, **kwarg)
return maskedarray_or_scalar(result_data, result_mask, out, type(a))
@implements(np.argmax)
def argmax(a, axis=None, out=None):
if isinstance(out, MaskedArray):
raise TypeError("out argument of argmax should be an ndarray")
a = as_duck_cls(a, base=MaskedArray)
# most of the time this is enough
filled = a.filled(minmax='min', view=1)
result_data = np.argmax(filled, axis, out)
# except if the only unmasked elem is minval. Have to check and do carefully
data_min = filled == _minvals[a.dtype]
is_min = data_min & ~a._mask
has_min = np.any(is_min, axis=axis)
if np.any(has_min):
has_no_other_data = np.all(data_min, axis=axis)
has_lonely_min = has_min & has_no_other_data
if np.any(has_lonely_min):
min_ind = np.argmax(is_min, axis=axis)
if is_ndscalar(result_data):
return min_ind
result_data[has_lonely_min] = min_ind[has_lonely_min]
# one day, might speed up with numba/extension. Or with np.take?
return result_data
@implements(np.amin)
@implements(np.min)
def min(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue,
where=np._NoValue):
a = as_duck_cls(a, base=MaskedArray)
outdata, outmask = get_maskedout(out)
kwarg = {}
if keepdims is not np._NoValue:
kwarg['keepdims'] = keepdims
if where is not np._NoValue:
kwarg['where'] = where
initial_m = initial_d = np._NoValue
if initial is not np._NoValue:
ismasked = isinstance(initial, MaskedScalar)
if initial is X or ismasked and initial._mask:
raise ValueError("initial cannot be masked")
initial_m = False
initial_d = initial._data if ismasked else initial
filled = a.filled(minmax='max', view=1)
result_data = np.min(filled, axis, outdata, initial=initial_d, **kwarg)
result_mask = np.logical_and.reduce(a._mask, axis, out=outmask,
initial=initial_m, **kwarg)
return maskedarray_or_scalar(result_data, result_mask, out, type(a))
@implements(np.argmin)
def argmin(a, axis=None, out=None):
if isinstance(out, MaskedArray):
raise TypeError("out argument of argmax should be an ndarray")
a = as_duck_cls(a, base=MaskedArray)
# most of the time this is enough
filled = a.filled(minmax='max', view=1)
result_data = np.argmin(filled, axis, out)
# except if the only unmasked elem is maxval. Have to check and do carefully
data_max = filled == _maxvals[a.dtype]
is_max = data_max & ~a._mask
has_max = np.any(is_max, axis=axis)
if np.any(has_max):
has_no_other_data = np.all(data_max, axis=axis)
has_lonely_max = has_max & has_no_other_data
if np.any(has_lonely_max):
max_ind = np.argmax(is_max, axis=axis)
if is_ndscalar(result_data):
return max_ind
result_data[has_lonely_max] = max_ind[has_lonely_max]
return result_data
@implements(np.sort)
def sort(a, axis=-1, kind='quicksort', order=None):
a = as_duck_cls(a, base=MaskedArray)
# Note: This is trickier than it looks. The first line sorts the mask
# together with any min_vals which may be present, so there appears to
# be a problem ordering mask vs min_val elements.
# But, since we know all the masked elements have to end up at the end
# of the axis, we can sort the mask too and everything works out. The
# mask-sort only swaps the mask between min_val and masked positions
# which have the same underlying data.
# np.nan should sort higher than all others, so use it as fill if floating
result_data = np.sort(a.filled(minmax='maxnan', view=1), axis, kind, order)
result_mask = np.sort(a._mask, axis, kind) #or partition for speed?
return maskedarray_or_scalar(result_data, result_mask, cls=type(a))
# Note: lexsort may be faster, but doesn't provide kind or order kwd
@implements(np.argsort)
def argsort(a, axis=-1, kind='quicksort', order=None):
a = as_duck_cls(a, base=MaskedArray)
# Similar to mask-sort trick in sort above, here after sorting data we
# re-sort based on mask. Use the property that if you argsort the index
# array produced by argsort you get the element rank, which can be
# argsorted again to get back the sort indices. However, here we
# modify the rank based on the mask before inverting back to indices.
# Uses two argsorts plus a temp array.
inds = np.argsort(a.filled(minmax='maxnan', view=1), axis, kind, order)
# next two lines "reverse" the argsort (same as double-argsort)
ranks = np.empty(inds.shape, dtype=inds.dtype)
np.put_along_axis(ranks, inds, np.arange(a.shape[axis]), axis)
# prepare to resort but make masked elem highest rank
ranks[a._mask] = _maxvals[ranks.dtype]
return np.argsort(ranks, axis, kind)
@implements(np.partition)
def partition(a, kth, axis=-1, kind='introselect', order=None):
a = as_duck_cls(a, base=MaskedArray)
inds = np.argpartition(a, kth, axis, kind, order)
return np.take_along_axis(a, inds, axis=axis)
@implements(np.argpartition)
def argpartition(a, kth, axis=-1, kind='introselect', order=None):
# see argsort for explanation
a = as_duck_cls(a, base=MaskedArray)
filled = a.filled(minmax='maxnan', view=1)
inds = np.argpartition(filled, kth, axis, kind, order)
ranks = np.empty(inds.shape, dtype=inds.dtype)
np.put_along_axis(ranks, inds, np.arange(a.shape[axis]), axis)
ranks[a._mask] = _maxvals[ranks.dtype]
return np.argpartition(ranks, kth, axis, kind)
@implements(np.searchsorted, checked_args=('v',))
def searchsorted(a, v, side='left', sorter=None):
a = as_duck_cls(a, base=MaskedArray)
maskleft = len(a) - np.sum(a._mask)
aval = a.filled(minmax='maxnan', view=1)
inds = np.searchsorted(aval, v.filled(minmax='maxnan', view=1),
side, sorter)
# Line above treats mask and maxval as the same, we need to fix it up
if side == 'left':
# masked vals in v need to be moved right to the left end of the
# masked vals in a (which have to be to the right end of a).
inds[v._mask] = maskleft
else:
# maxvals in v meed to be moved left to the left end of the
# masked vals in a.
if issubclass(v.dtype.type, np.inexact):
maxinds = np.isnan(v._data)
else:
maxinds = v._data == _maxvals[v.dtype]
inds[maxinds & ~v._mask] = maskleft
return inds
@implements(np.digitize)
def digitize(x, bins, right=False):
x = as_duck_cls(x, base=MaskedArray)
# Original comment:
# here for compatibility, searchsorted below is happy to take this
if np.issubdtype(x.dtype, np.complexfloating):
raise TypeError("x may not be complex")
if isinstance(bins, (MaskedArray, MaskedScalar)):
raise ValueError("bins should not be masked. "
"Use .filled() first")
mono = np.lib.function_base._monotonicity(bins)
if mono == 0:
raise ValueError("bins must be monotonically "
"increasing or decreasing")
# this is backwards because the arguments below are swapped
side = 'left' if right else 'right'
if mono == -1:
# reverse the bins, and invert the results
return len(bins) - np.searchsorted(bins[::-1], x, side=side)
else:
return np.searchsorted(bins, x, side=side)
@implements(np.lexsort)
def lexsort(keys, axis=-1):
if not isinstance(keys, tuple):
keys = tuple(keys)
keys = as_duck_cls(*keys, base=MaskedArray, single=False)
# strategy: for each key, split into a mask and data key.
# So, we end up sorting twice as many keys. Mask is primary key (last).
keys = tuple(x for k in keys for x in (k._data, k._mask))
return np.lexsort(keys, axis)
@implements(np.mean)
def mean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
"""
Returns the average of the array elements along given axis.
Masked entries are ignored, and result elements which are not
finite will be masked.
Refer to `numpy.mean` for full documentation.
See Also
--------
ndarray.mean : corresponding function for ndarrays
numpy.mean : Equivalent function
numpy.ma.average: Weighted average.
Examples
--------
>>> a = np.ma.array([1,2,3], mask=[False, False, True])
>>> a
masked_array(data = [1 2 --],
mask = [False False True],
fill_value = 999999)
>>> a.mean()
1.5
"""
kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
a = as_duck_cls(a, base=MaskedArray)
outdata, outmask = get_maskedout(out)
cls = get_duck_cls(a, base=MaskedArray)
if type(a) is not cls:
a = cls(a)
# code partly copied from _mean in numpy/core/_methods.py
is_float16_result = False
rcount = a.count(axis=axis, **kwargs)
# Cast bool, unsigned int, and int to float64 by default
if dtype is None:
if issubclass(a.dtype.type, (np.integer, np.bool_)):
dtype = np.dtype('f8')
elif issubclass(a.dtype.type, np.float16):
dtype = np.dtype('f4')
is_float16_result = True
ret = np.sum(a.filled(0, view=1), axis=axis, out=outdata, dtype=dtype,
**kwargs)
retmask = np.all(a._mask, axis=axis, out=outmask, **kwargs)
with np.errstate(divide='ignore', invalid='ignore'):
if is_ndarr(ret):
ret = np.true_divide(
ret, rcount, out=ret, casting='unsafe', subok=False)
if is_float16_result and out is None:
ret = arr.dtype.type(ret)
elif hasattr(ret, 'dtype'):
if is_float16_result:
ret = arr.dtype.type(ret / rcount)
else:
ret = ret.dtype.type(ret / rcount)
else:
ret = ret / rcount
return maskedarray_or_scalar(ret, retmask, out, type(a))
@implements(np.var)
def var(a, axis=None, dtype=None, out=None, ddof=0,
keepdims=np._NoValue):
"""
Returns the variance of the array elements along given axis.
Masked entries are ignored, and result elements which are not
finite will be masked.
Refer to `numpy.var` for full documentation.
See Also
--------
ndarray.var : corresponding function for ndarrays
numpy.var : Equivalent function
"""
kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
a = as_duck_cls(a, base=MaskedArray)
outdata, outmask = get_maskedout(out)
# code largely copied from _methods.var
rcount = a.count(axis=axis, **kwargs)
# Cast bool, unsigned int, and int to float64 by default
if dtype is None and issubclass(a.dtype.type, (np.integer, np.bool_)):
dtype = np.dtype('f8')
# Compute the mean, keeping same dims. Note that if dtype is not of
# inexact type then arraymean will not be either.
rcount = a.count(axis=axis, keepdims=True)
arrmean = a.filled(0).sum(axis=axis, dtype=dtype, keepdims=True)
with np.errstate(divide='ignore', invalid='ignore'):
if not is_ndscalar(arrmean):
arrmean = np.true_divide(arrmean, rcount, out=arrmean,
casting='unsafe', subok=False)
else:
arrmean = arrmean.dtype.type(arrmean / rcount)
# Compute sum of squared deviations from mean
x = type(a)(a - arrmean)
if issubclass(a.dtype.type, np.complexfloating):
x = np.multiply(x, np.conjugate(x), out=x).real
else:
x = np.multiply(x, x, out=x)
ret = x.filled(0, view=1).sum(axis, dtype, out=outdata, **kwargs)
# Compute degrees of freedom and make sure it is not negative.
rcount = a.count(axis=axis, **kwargs)
rcount = np.maximum(rcount - ddof, 0)
# divide by degrees of freedom
with np.errstate(divide='ignore', invalid='ignore'):
if is_ndarr(ret):
ret = np.true_divide(
ret, rcount, out=ret, casting='unsafe', subok=False)
elif hasattr(ret, 'dtype'):
ret = ret.dtype.type(ret / rcount)
else:
ret = ret / rcount
if out is not None:
out[rcount == 0] = X
return out
return maskedarray_or_scalar(ret, rcount == 0, cls=type(a))
@implements(np.std)
def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
a = as_duck_cls(a, base=MaskedArray)
ret = var(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
keepdims=keepdims)
if isinstance(ret, MaskedArray):
ret = np.sqrt(ret, out=ret)
elif hasattr(ret, 'dtype'):
ret = np.sqrt(ret).astype(ret.dtype)
else:
ret = np.sqrt(ret)
return ret
@implements(np.average, checked_args=('a',))
def average(a, axis=None, weights=None, returned=False):
a = as_duck_cls(a, base=MaskedArray)
if weights is None:
avg = a.mean(axis)
if returned:
return avg, avg.dtype.type(a.count(axis))
return avg
wgt = weights if is_ndtype(weights) else np.array(weights)
if isinstance(wgt, MaskedArray):
raise TypeError("weight must not be a MaskedArray")
if issubclass(a.dtype.type, (np.integer, np.bool_)):
result_dtype = np.result_type(a.dtype, wgt.dtype, 'f8')
else:
result_dtype = np.result_type(a.dtype, wgt.dtype)
# Note: No float16 special case, since ndarray.average skips it
# Sanity checks
if a.shape != wgt.shape:
if axis is None:
raise TypeError(
"Axis must be specified when shapes of a and weights "
"differ.")
if wgt.ndim != 1:
raise TypeError(
"1D weights expected when shapes of a and weights differ.")
if wgt.shape[0] != a.shape[axis]:
raise ValueError(
"Length of weights not compatible with specified axis.")
# setup wgt to broadcast along axis
wgt = np.broadcast_to(wgt, (a.ndim-1)*(1,) + wgt.shape)
wgt = wgt.swapaxes(-1, axis)
if wgt.shape != a.shape:
wgt = np.broadcast_to(wgt, a.shape)
wgt = type(a)(wgt, a._mask)
scl = wgt.sum(axis=axis, dtype=result_dtype)
if np.any(scl == 0.0):
raise ZeroDivisionError(
"Weights sum to zero, can't be normalized")
avg = np.multiply(a, wgt, dtype=result_dtype).sum(axis)/scl
if returned:
return avg, scl
return avg
def _move_reduction_axis_last(a, axis=None):
"""
Modified from numpy.lib.function_base._ureduce.
Reshape/transpose array so desired axes are grouped at the end.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : int or iterable of ints
axes or axis to reduce
Returns
-------
arr : ndarray
Input ndarray with iteration axis/axes moved to be a single axis
at the end.
keepdims : tuple
a.shape with axis dims set to 1 which can be used to reshape the
result of a reduction to the same shape a ufunc with keepdims=True
would produce.
"""
if axis is not None:
keepdim = list(a.shape)
nd = a.ndim
axis = normalize_axis_tuple(axis, nd)
for ax in axis:
keepdim[ax] = 1
if len(axis) == 1:
# arr, with the iteration axis at the end
ax = axis[0]
dims = list(range(a.ndim))
a = np.transpose(a, dims[:ax] + dims[ax+1:] + [ax])
else:
keep = set(range(nd)) - set(axis)
nkeep = len(keep)
# swap axis that should not be reduced to front
for i, s in enumerate(sorted(keep)):
a = a.swapaxes(i, s)
# merge reduced axis
a = a.reshape(a.shape[:nkeep] + (-1,))
keepdim = tuple(keepdim)
else:
keepdim = (1,) * a.ndim
a = a.ravel()
return a, keepdim
@implements(np.median)
def median(a, axis=None, out=None, overwrite_input=False, keepdims=False):
return np.quantile(a, 0.5, axis=axis, out=out,
overwrite_input=overwrite_input,
interpolation='midpoint', keepdims=keepdims)
@implements(np.percentile)
def percentile(a, q, axis=None, out=None, overwrite_input=False,
interpolation='linear', keepdims=False):
q = np.true_divide(q, 100)
q = np.asanyarray(q) # undo any decay the ufunc performed (gh-13105)
if not _quantile_is_valid(q):
raise ValueError("Percentiles must be in the range [0, 100]")
return _quantile_unchecked(
a, q, axis, out, overwrite_input, interpolation, keepdims)
@implements(np.quantile)
def quantile(a, q, axis=None, out=None, overwrite_input=False,
interpolation='linear', keepdims=False):
q = np.asanyarray(q)
if not _quantile_is_valid(q):
raise ValueError("Quantiles must be in the range [0, 1]")
return _quantile_unchecked(
a, q, axis, out, overwrite_input, interpolation, keepdims)
def _quantile_unchecked(a, q, axis=None, out=None, overwrite_input=False,
interpolation='linear', keepdims=False):
"""Assumes that q is in [0, 1], and is an ndarray"""
a = as_duck_cls(a, base=MaskedArray)
a, kdim = _move_reduction_axis_last(a, axis)
if len(q.shape) > 1:
raise ValueError("q must be a scalar or 1d array")
out_shape = (q.size,) + a.shape[:-1]
if out is None:
dt = np.promote_types(a.dtype, np.float64)
outarr = get_duck_cls(a)(np.empty(out_shape, dtype=dt))
else:
if out.shape == out_shape:
outarr = out
elif q.size == 1 and (1,)+out.shape == out_shape:
outarr = out[None,...]
else:
raise ValueError('out has wrong shape')
inds = np.ndindex(a.shape[:-1])
inds = (ind + (Ellipsis,) for ind in inds)
for ind in inds:
ai = a[ind]
dat = ai._data[~ai.mask]
oind = (slice(None),) + ind
if dat.size == 0:
outarr[oind] = X
else:
outarr[oind] = np.quantile(dat, q, interpolation=interpolation)
if out is not None:
return out
# return a scalar in simple case
if q.shape == () and axis is None:
return outarr[0]
out_dim = kdim if keepdims else a.shape[:-1]
return outarr.reshape(q.shape + out_dim)
@implements(np.cov, checked_args=('m', 'y'))
def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None,
aweights=None):
# Check inputs
if ddof is not None and ddof != int(ddof):
raise ValueError(
"ddof must be integer")
# Handles complex arrays too
if m.ndim > 2:
raise ValueError("m has more than 2 dimensions")
cls = get_duck_cls(m, base=MaskedArray)
if type(m) is not cls:
m = cls(m)
if y is None:
dtype = np.result_type(m, np.float64)
else:
if not is_ndtype(y):
y = cls(y)
else:
cls = get_duck_cls(m, y, base=MaskedArray)
if y.ndim > 2:
raise ValueError("y has more than 2 dimensions")
dtype = np.result_type(m, y, np.float64)
X = cls(m, ndmin=2, dtype=dtype)
if not rowvar and X.shape[0] != 1:
X = X.T
if X.shape[0] == 0:
return cls([]).reshape(0, 0)
if y is not None:
y = cls(y, copy=False, ndmin=2, dtype=dtype)
if not rowvar and y.shape[0] != 1:
y = y.T
X = np.concatenate((X, y), axis=0)
if ddof is None:
if bias == 0:
ddof = 1
else:
ddof = 0
# Get the product of frequencies and weights
w = None
if fweights is not None:
fweights = np.asarray(fweights, dtype=float)
if not np.all(fweights == np.around(fweights)):
raise TypeError(
"fweights must be integer")
if fweights.ndim > 1:
raise RuntimeError(
"cannot handle multidimensional fweights")
if fweights.shape[0] != X.shape[1]:
raise RuntimeError(
"incompatible numbers of samples and fweights")
if np.any(fweights < 0):
raise ValueError(
"fweights cannot be negative")
w = fweights
if aweights is not None:
aweights = np.asarray(aweights, dtype=float)
if aweights.ndim > 1:
raise RuntimeError(
"cannot handle multidimensional aweights")
if aweights.shape[0] != X.shape[1]:
raise RuntimeError(
"incompatible numbers of samples and aweights")
if np.any(aweights < 0):
raise ValueError(
"aweights cannot be negative")
if w is None:
w = aweights
else:
w *= aweights
avg = np.average(X, axis=1, weights=w)
X -= avg[:, None]
if w is None:
X_T = X.T
else:
X_T = (X*w).T
c = np.dot(X, X_T.conj())
# Determine the normalization
nomask = ~X.mask
wnm = nomask.astype(dtype) if w is None else w*nomask
w_sum = np.dot(wnm, nomask.T)
if ddof == 0:
fact = w_sum
elif aweights is None:
fact = w_sum - ddof
else:
a_sum = np.dot(w*aweights*nomask, nomask.T)
fact = w_sum - ddof*a_sum/w_sum
nonpos_fact = fact <= 0
if np.any(nonpos_fact):
warnings.warn("Degrees of freedom <= 0 for slice",
RuntimeWarning, stacklevel=3)
fact[nonpos_fact] = X
c *= np.true_divide(1, fact)
return c.squeeze()
@implements(np.corrcoef, checked_args=('x', 'y'))
def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, ddof=np._NoValue):
if bias is not np._NoValue or ddof is not np._NoValue:
# 2015-03-15, 1.10
warnings.warn('bias and ddof have no effect and are deprecated',
DeprecationWarning, stacklevel=3)
c = np.cov(x, y, rowvar)
try:
d = np.diag(c)
except ValueError:
# scalar covariance
# nan if incorrect value (nan, inf, 0), 1 otherwise
return c / c
stddev = np.sqrt(d.real)
c /= stddev[:, None]
c /= stddev[None, :]
# Clip real and imaginary parts to [-1, 1]. This does not guarantee
# abs(a[i,j]) <= 1 for complex arrays, but is the best we can do without
# excessive work.
cd = c._data
with np.errstate(invalid='ignore'):
np.clip(cd.real, -1, 1, out=cd.real)
if np.iscomplexobj(cd):
np.clip(cd.imag, -1, 1, out=cd.imag)
return c
@implements(np.clip)
def clip(a, a_min, a_max, out=None):
a = as_duck_cls(a, base=MaskedArray)
outdata, outmask = get_maskedout(out)
result_data = np.clip(a._data, a_min, a_max, outdata)
result_mask = _copy_mask(a._mask, outmask)
return maskedarray_or_scalar(result_data, result_mask, out, type(a))
@implements(np.compress)
def compress(condition, a, axis=None, out=None):
# Note: masked values in condition treated as False
outdata, outmask = get_maskedout(out)
cls = get_duck_cls(condition, a, base=MaskedArray)
cond = cls(condition).filled(False, view=1)
a = cls(a)
result_data = np.compress(cond, a._data, axis, outdata)
result_mask = np.compress(cond, a._mask, axis, outmask)
return maskedarray_or_scalar(result_data, result_mask, out, cls)
@implements(np.copy)
def copy(a, order='K'):
a = as_duck_cls(a, base=MaskedArray)
result_data = np.copy(a._data, order=order)
result_mask = np.copy(a._mask, order=order)
return maskedarray_or_scalar(result_data, result_mask, cls=type(a))
@implements(np.product)
@implements(np.prod)
def prod(a, axis=None, dtype=None, out=None, keepdims=False):
a = as_duck_cls(a, base=MaskedArray)
outdata, outmask = get_maskedout(out)
result_data = np.prod(a.filled(1, view=1), axis=axis, dtype=dtype,
out=outdata, keepdims=keepdims)
result_mask = np.all(a._mask, axis=axis, out=outmask, keepdims=keepdims)
return maskedarray_or_scalar(result_data, result_mask, out, type(a))
@implements(np.cumproduct)
@implements(np.cumprod)
def cumprod(a, axis=None, dtype=None, out=None):
a = as_duck_cls(a, base=MaskedArray)
outdata, outmask = get_maskedout(out)
result_data = np.cumprod(a.filled(1, view=1), axis, dtype=dtype,
out=outdata)
result_mask = np.logical_or.accumulate(~a._mask, axis, out=outmask)
result_mask =_inplace_not(result_mask)
return maskedarray_or_scalar(result_data, result_mask, out, type(a))
@implements(np.sum)
def sum(a, axis=None, dtype=None, out=None, keepdims=False):
a = as_duck_cls(a, base=MaskedArray)
outdata, outmask = get_maskedout(out)
result_data = np.sum(a.filled(0, view=1), axis, dtype=dtype,
out=outdata, keepdims=keepdims)
result_mask = np.all(a._mask, axis, out=outmask, keepdims=keepdims)
return maskedarray_or_scalar(result_data, result_mask, out, type(a))
@implements(np.cumsum)
def cumsum(a, axis=None, dtype=None, out=None):
a = as_duck_cls(a, base=MaskedArray)
outdata, outmask = get_maskedout(out)
result_data = np.cumsum(a.filled(0, view=1), axis, dtype=dtype,
out=outdata)
result_mask = np.logical_or.accumulate(~a._mask, axis, out=outmask)
result_mask =_inplace_not(result_mask)
return maskedarray_or_scalar(result_data, result_mask, out, type(a))
@implements(np.diagonal)
def diagonal(a, offset=0, axis1=0, axis2=1):
a = as_duck_cls(a, base=MaskedArray)
result = np.diagonal(a._data, offset=offset, axis1=axis1, axis2=axis2)
rmask = np.diagonal(a._mask, offset=offset, axis1=axis1, axis2=axis2)
return maskedarray_or_scalar(result, rmask, cls=type(a))
@implements(np.diag)
def diag(v, k=0):
v = as_duck_cls(v, base=MaskedArray)
s = v.shape
if len(s) == 1:
n = s[0]+abs(k)
res = type(v)(np.zeros((n, n), v.dtype))
if k >= 0:
i = k
else:
i = (-k) * n
res[:n-k].flat[i::n+1] = v
return res
elif len(s) == 2:
return np.diagonal(v, k)
else:
raise ValueError("Input must be 1- or 2-d.")
@implements(np.diagflat)
def diagflat(v, k=0):
v = as_duck_cls(v, base=MaskedArray)
return np.diag(v.ravel(), k)
@implements(np.tril)
def tril(m, k=0):
m = as_duck_cls(m, base=MaskedArray)
mask = np.tri(*m.shape[-2:], k=k, dtype=bool)
return np.where(mask, m, np.zeros(1, m.dtype))
@implements(np.triu)
def triu(m, k=0):
m = as_duck_cls(m, base=MaskedArray)
mask = np.tri(*m.shape[-2:], k=k-1, dtype=bool)
return np.where(mask, np.zeros(1, m.dtype), m)
@implements(np.trace)
def trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None):
outdata, outmask = get_maskedout(out)
a = as_duck_cls(a, base=MaskedArray)
result_data = np.trace(a.filled(0, view=1), offset=offset, axis1=axis1,
axis2=axis2, dtype=dtype, out=outdata)
result_mask = np.trace(~a._mask, offset=offset, axis1=axis1, axis2=axis2,
dtype=bool, out=outmask)
result_mask = _inplace_not(result_mask)
return maskedarray_or_scalar(result_data, result_mask, out, type(a))
@implements(np.dot)
def dot(a, b, out=None):
outdata, outmask = get_maskedout(out)
cls = get_duck_cls(a, b, base=MaskedArray)
a, b = cls(a), cls(b)
result_data = np.dot(a.filled(0, view=1), b.filled(0, view=1),
out=outdata)
result_mask = np.dot(~a._mask, ~b._mask, out=outmask)
result_mask = _inplace_not(result_mask)
return maskedarray_or_scalar(result_data, result_mask, out, cls)
@implements(np.vdot)
def vdot(a, b):
cls = get_duck_cls(a, b, base=MaskedArray)
a, b = cls(a), cls(b)
result_data = np.vdot(a.filled(0, view=1), b.filled(0, view=1))
result_mask = np.vdot(~a._mask, ~b._mask)
result_mask = _inplace_not(result_mask)
return maskedarray_or_scalar(result_data, result_mask, cls=cls)
@implements(np.cross)
def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None):
cls = get_duck_cls(a, b, base=MaskedArray)
a, b = cls(a), cls(b)
# because of mask calculation, we don't support vectors of length 2.
# convert them if present. First have to do axis manip as in np.cross
if axis is not None:
axisa, axisb, axisc = (axis,) * 3
axis = None
# Check axisa and axisb are within bounds
axisa = normalize_axis_index(axisa, a.ndim, msg_prefix='axisa')
axisb = normalize_axis_index(axisb, b.ndim, msg_prefix='axisb')
# Move working axis to the end of the shape
a = moveaxis(a, axisa, -1)
b = moveaxis(b, axisb, -1)
msg = ("incompatible dimensions for cross product\n"
"(dimension must be 2 or 3)")
if a.shape[-1] not in (2, 3) or b.shape[-1] not in (2, 3):
raise ValueError(msg)
if a.shape[-1] == 2:
a = np.append(a, np.broadcast_to(0, a.shape[:-1] + (1,)), axis=-1)
if b.shape[-1] == 2:
b = np.append(b, np.broadcast_to(0, b.shape[:-1] + (1,)), axis=-1)
result_data = np.cross(a.filled(0, view=1), b.filled(0, view=1), axisa,
axisb, axisc, axis)
# trick: use nan behavior to compute mask
ma = np.where(a._mask, np.nan, 0)
mb = np.where(b._mask, np.nan, 0)
mab = np.cross(ma, mb, axisa, axisb, axisc, axis)
result_mask = np.isnan(mab)
return maskedarray_or_scalar(result_data, result_mask, cls=cls)
@implements(np.inner)
def inner(a, b):
cls = get_duck_cls(a, b, base=MaskedArray)
a, b = cls(a), cls(b)
result_data = np.inner(a.filled(0, view=1), b.filled(0, view=1))
result_mask = np.inner(~a._mask, ~b._mask)
result_mask = _inplace_not(result_mask)
return maskedarray_or_scalar(result_data, result_mask, cls=cls)
@implements(np.outer)
def outer(a, b, out=None):
outdata, outmask = get_maskedout(out)
cls = get_duck_cls(a, b, base=MaskedArray)
a, b = cls(a), cls(b)
result_data = np.outer(a.filled(0, view=1), b.filled(0, view=1),
out=outdata)
result_mask = np.outer(~a._mask, ~b._mask, out=outmask)
result_mask = _inplace_not(result_mask)
return maskedarray_or_scalar(result_data, result_mask, out, cls)
@implements(np.kron)
def kron(a, b):
cls = get_duck_cls(a, b, base=MaskedArray)
a = cls(a, copy=False, subok=True, ndmin=b.ndim)
nda, ndb = a.ndim, b.ndim
if (nda == 0 or ndb == 0):
return np.multiply(a, b)
a_shape = a.shape
b_shape = b.shape
nd = ndb
if ndb > nda:
a_shape = (1,)*(ndb-nda) + a_shape
elif b.ndim < a.ndim:
b_shape = (1,)*(nda-ndb) + b_shape
nd = nda
result = np.outer(a, b).reshape(a_shape + b_shape)
axis = nd-1
for _ in range(nd):
result = np.concatenate(result, axis=axis)
return result
@implements(np.tensordot)
def tensordot(a, b, axes=2):
try:
iter(axes)
except Exception:
axes_a = list(range(-axes, 0))
axes_b = list(range(0, axes))
else:
axes_a, axes_b = axes
def nax(ax):
try:
return len(ax), list(ax)
except TypeError:
return 1, [ax]
na, axes_a = nax(axes_a)
nb, axes_b = nax(axes_b)
cls = get_duck_cls(a, b, base=MaskedArray)
a, b = cls(a), cls(b)
ashape, bshape = a.shape, b.shape
nda, ndb = a.ndim, b.ndim
equal = True
if na != nb:
equal = False
else:
for k in range(na):
if ashape[axes_a[k]] != bshape[axes_b[k]]:
equal = False
break
if axes_a[k] < 0:
axes_a[k] += nda
if axes_b[k] < 0:
axes_b[k] += ndb
if not equal:
raise ValueError("shape-mismatch for sum")
# Move the axes to sum over to the end of "a"
# and to the front of "b"
notin = [k for k in range(nda) if k not in axes_a]
newaxes_a = notin + axes_a
N2 = 1
for axis in axes_a:
N2 *= ashape[axis]
newshape_a = (int(np.multiply.reduce([ashape[ax] for ax in notin])), N2)
olda = [ashape[axis] for axis in notin]
notin = [k for k in range(ndb) if k not in axes_b]
newaxes_b = axes_b + notin
N2 = 1
for axis in axes_b:
N2 *= bshape[axis]
newshape_b = (N2, int(np.multiply.reduce([bshape[ax] for ax in notin])))
oldb = [bshape[axis] for axis in notin]
at = a.transpose(newaxes_a).reshape(newshape_a)
bt = b.transpose(newaxes_b).reshape(newshape_b)
res = np.dot(at, bt)
return res.reshape(olda + oldb)
def _process_einsum_operands(operands):
# operands can either start with a strong, followed by op arrays,
# or can alternate op arrays and axes
if isinstance(operands[0], str):
arrs = operands[1:]
cls = get_duck_cls(*arrs, base=MaskedArray)
arrs = tuple(cls(x) for x in arrs)
data_ops = (operands[0],) + tuple(a.filled(0) for a in arrs)
imask_ops = (operands[0],) + tuple(~a._mask for a in arrs)
else:
cls = get_duck_cls(*operands[0::2], base=MaskedArray)
ops = tuple(o if n%2 else cls(o) for n,o in enumerate(operands))
data_ops = tuple(o if n%2 else o.filled(0) for n,o in enumerate(ops))
imask_ops = tuple(o if n%2 else ~o._mask for n,o in enumerate(ops))
return data_ops, imask_ops, cls
@implements(np.einsum)
def einsum(*operands, **kwargs):
out = kwargs.pop('out', None)
outdata, outmask = get_maskedout(out)
dtype = kwargs.pop('dtype', None)
data_ops, imask_ops, cls = _process_einsum_operands(operands)
result_data = np.einsum(*data_ops, out=outdata, dtype=dtype, **kwargs)
result_mask = np.einsum(*imask_ops, out=outmask, **kwargs)
result_mask = _inplace_not(result_mask)
return maskedarray_or_scalar(result_data, result_mask, out, cls)
@implements(np.einsum_path)
def einsum_path(*operands, **kwargs):
out = kwargs.pop('out', None)
outdata, outmask = get_maskedout(out)
dtype = kwargs.pop('dtype', None)
data_ops, imask_ops, cls = _process_einsum_operands(operands)
result_data = np.einsum_path(*data_ops, out=outdata, dtype=dtyle, **kwargs)
result_mask = np.einsum_path(*imask_ops, out=outmask, **kwargs)
result_mask = _inplace_not(result_mask)
return maskedarray_or_scalar(result_data, result_mask, out, cls)
@implements(np.correlate)
def correlate(a, v, mode='valid'):
cls = get_duck_cls(a, v)
result_data = np.correlate(a.filled(view=1), v.filled(view=1), mode)
result_mask = np.correlate(~a._mask, v._mask, mode)
result_mask = _inplace_not(result_mask)
return maskedarray_or_scalar(result_data, result_mask, cls=cls)
@implements(np.convolve)
def convolve(a, v, mode='full'):
cls = get_duck_cls(a, v)
a, v = cls(a), cls(v)
result_data = np.convolve(a.filled(view=1), v.filled(view=1), mode)
result_mask = np.convolve(~a._mask, ~v._mask, mode)
result_mask = _inplace_not(result_mask)
return maskedarray_or_scalar(result_data, result_mask, cls=cls)
@implements(np.real)
def real(a):
result_data = np.real(a._data)
result_data.flags['WRITEABLE'] = False
result_mask = a._mask.copy()
return maskedarray_or_scalar(result_data, result_mask, cls=type(a))
@implements(np.imag)
def imag(a):
result_data = np.imag(a._data)
result_data.flags['WRITEABLE'] = False
result_mask = a._mask
return maskedarray_or_scalar(result_data, result_mask, cls=type(a))
@implements(np.ptp)
def ptp(a, axis=None, out=None, keepdims=False):
return np.subtract(
np.maximum.reduce(a, axis, None, out, keepdims),
np.minimum.reduce(a, axis, None, None, keepdims), out)
@implements(np.take)
def take(a, indices, axis=None, out=None, mode='raise'):
outdata, outmask = get_maskedout(out)
if isinstance(indices, (MaskedArray, MaskedScalar)):
raise ValueError("indices should not be masked. "
"Use .filled() first")
result_data = np.take(a._data, indices, axis, outdata, mode)
result_mask = np.take(a._mask, indices, axis, outmask, mode)
return maskedarray_or_scalar(result_data, result_mask, out, cls=type(a))
@implements(np.put)
def put(a, indices, values, mode='raise'):
data, mask, _ = replace_X(values, dtype=a.dtype)
np.put(a._data, indices, data, mode)
np.put(a._mask, indices, mask, mode)
return None
@implements(np.take_along_axis, checked_args=('arr',))
def take_along_axis(arr, indices, axis):
result_data = np.take_along_axis(arr._data, indices, axis)
result_mask = np.take_along_axis(arr._mask, indices, axis)
return maskedarray_or_scalar(result_data, result_mask, cls=type(arr))
@implements(np.put_along_axis, checked_args=('arr',))
def put_along_axis(arr, indices, values, axis):
data, mask, _ = replace_X(values, dtype=arr.dtype)
|
np.put_along_axis(arr._data, indices, data, axis)
|
numpy.put_along_axis
|
# coding: utf-8
# public items
__all__ = ["pca", "chopper_calibration", "r_division", "gauss_fit"]
# standard library
from logging import getLogger
# dependent packages
import decode as dc
import numpy as np
from astropy.modeling import fitting, models
from sklearn.decomposition import TruncatedSVD
def pca(onarray, offarray, n=10, exchs=None, pc=False, mode="mean"):
"""Apply Principal Component Analysis (PCA) method to estimate baselines at each time.
Args:
onarray (decode.array): Decode array of on-point observations.
offarray (decode.array): Decode array of off-point observations.
n (int): The number of pricipal components.
pc (bool): When True, this function also returns
eigen vectors and their coefficients.
mode (None or str): The way of correcting offsets.
'mean': Mean.
'median': Median.
None: No correction.
Returns:
filtered (decode.array): Baseline-subtracted array.
When pc is True:
Ps (list(np.ndarray)): Eigen vectors.
Cs (list(np.ndarray)): Coefficients.
"""
logger = getLogger("decode.models.pca")
logger.info("n_components exchs mode")
if exchs is None:
exchs = [16, 44, 46]
logger.info("{} {} {}".format(n, exchs, mode))
offid = np.unique(offarray.scanid)
onid = np.unique(onarray.scanid)
onarray = onarray.copy() # Xarray
onarray[:, exchs] = 0
onvalues = onarray.values
onscanid = onarray.scanid.values
offarray = offarray.copy() # Xarray
offarray[:, exchs] = 0
offvalues = offarray.values
offscanid = offarray.scanid.values
Ps, Cs = [], []
Xatm = dc.full_like(onarray, onarray)
Xatmvalues = Xatm.values
model = TruncatedSVD(n_components=n)
for i in onid:
leftid = np.searchsorted(offid, i) - 1
rightid = np.searchsorted(offid, i)
Xon = onvalues[onscanid == i]
if leftid == -1:
Xoff = offvalues[offscanid == offid[rightid]]
Xoff_m = getattr(np, "nan" + mode)(Xoff, axis=0) if mode is not None else 0
Xon_m = Xoff_m
model.fit(Xoff - Xoff_m)
elif rightid == len(offid):
Xoff = offvalues[offscanid == offid[leftid]]
Xoff_m = getattr(np, "nan" + mode)(Xoff, axis=0) if mode is not None else 0
Xon_m = Xoff_m
model.fit(Xoff - Xoff_m)
else:
Xoff_l = offvalues[offscanid == offid[leftid]]
Xoff_lm = (
getattr(np, "nan" + mode)(Xoff_l, axis=0) if mode is not None else 0
)
Xoff_r = offvalues[offscanid == offid[rightid]]
Xoff_rm = (
getattr(np, "nan" + mode)(Xoff_r, axis=0) if mode is not None else 0
)
Xon_m = (
getattr(np, "nan" + mode)(np.vstack([Xoff_l, Xoff_r]), axis=0)
if mode is not None
else 0
)
model.fit(np.vstack([Xoff_l - Xoff_lm, Xoff_r - Xoff_rm]))
P = model.components_
C = model.transform(Xon - Xon_m)
Xatmvalues[onscanid == i] = C @ P + Xon_m
# Xatms.append(dc.full_like(Xon, C @ P + Xon_m.values))
Ps.append(P)
Cs.append(C)
if pc:
return Xatm, Ps, Cs
else:
return Xatm
def chopper_calibration(onarray, offarray, rarray, Tamb, mode="mean"):
logger = getLogger("decode.models.chopper_calibration")
logger.info("mode")
logger.info("{}".format(mode))
onarray, offarray = r_division(onarray, offarray, rarray, mode=mode)
offid = np.unique(offarray.scanid)
onid = np.unique(onarray.scanid)
onarray = onarray.copy() # Xarray
onvalues = onarray.values
onscanid = onarray.scanid.values
offarray = offarray.copy() # Xarray
offvalues = offarray.values
offscanid = offarray.scanid.values
for i in onid:
oleftid = np.searchsorted(offid, i) - 1
orightid = np.searchsorted(offid, i)
Xon = onvalues[onscanid == i]
if oleftid == -1:
Xoff = offvalues[offscanid == offid[orightid]]
Xoff_m = getattr(np, "nan" + mode)(Xoff, axis=0)
elif orightid == len(offid):
Xoff = offvalues[offscanid == offid[oleftid]]
Xoff_m = getattr(np, "nan" + mode)(Xoff, axis=0)
else:
Xoff_l = offvalues[offscanid == offid[oleftid]]
Xoff_r = offvalues[offscanid == offid[orightid]]
Xoff_m = getattr(np, "nan" + mode)(np.vstack([Xoff_l, Xoff_r]), axis=0)
onvalues[onscanid == i] = Tamb * (Xon - Xoff_m) / (1 - Xoff_m)
for j in offid:
Xoff = offvalues[offscanid == j]
Xoff_m = getattr(np, "nan" + mode)(Xoff, axis=0)
offvalues[offscanid == j] = Tamb * (Xoff - Xoff_m) / (1 - Xoff_m)
return onarray, offarray
def r_division(onarray, offarray, rarray, mode="mean"):
"""Apply R division.
Args:
onarray (decode.array): Decode array of on-point observations.
offarray (decode.array): Decode array of off-point observations.
rarray (decode.array): Decode array of R observations.
mode (str): Method for the selection of nominal R value.
'mean': Mean.
'median': Median.
Returns:
onarray_cal (decode.array): Calibrated array of on-point observations.
offarray_cal (decode.array): Calibrated array of off-point observations.
"""
logger = getLogger("decode.models.r_division")
logger.info("mode")
logger.info("{}".format(mode))
offid = np.unique(offarray.scanid)
onid = np.unique(onarray.scanid)
rid = np.unique(rarray.scanid)
onarray = onarray.copy() # Xarray
onvalues = onarray.values
onscanid = onarray.scanid.values
offarray = offarray.copy() # Xarray
offvalues = offarray.values
offscanid = offarray.scanid.values
rarray = rarray.copy() # Xarray
rvalues = rarray.values
rscanid = rarray.scanid.values
for i in onid:
rleftid = np.searchsorted(rid, i) - 1
rrightid = np.searchsorted(rid, i)
if rleftid == -1:
Xr = rvalues[rscanid == rid[rrightid]]
Xr_m = getattr(np, "nan" + mode)(Xr, axis=0)
elif rrightid == len(rid):
Xr = rvalues[rscanid == rid[rleftid]]
Xr_m = getattr(np, "nan" + mode)(Xr, axis=0)
else:
Xr_l = rvalues[rscanid == rid[rleftid]]
Xr_r = rvalues[rscanid == rid[rrightid]]
Xr_m = getattr(np, "nan" + mode)(np.vstack([Xr_l, Xr_r]), axis=0)
onvalues[onscanid == i] /= Xr_m
for j in offid:
rleftid = np.searchsorted(rid, j) - 1
rrightid = np.searchsorted(rid, j)
if rleftid == -1:
Xr = rvalues[rscanid == rid[rrightid]]
Xr_m = getattr(np, "nan" + mode)(Xr, axis=0)
elif rrightid == len(rid):
Xr = rvalues[rscanid == rid[rleftid]]
Xr_m = getattr(np, "nan" + mode)(Xr, axis=0)
else:
Xr_l = rvalues[rscanid == rid[rleftid]]
Xr_r = rvalues[rscanid == rid[rrightid]]
Xr_m = getattr(np, "nan" + mode)(np.vstack([Xr_l, Xr_r]), axis=0)
offvalues[offscanid == j] /= Xr_m
Xon_rdiv = dc.full_like(onarray, onarray)
Xoff_rdiv = dc.full_like(offarray, offarray)
Xonoff_rdiv = dc.concat([Xon_rdiv, Xoff_rdiv], dim="t")
Xonoff_rdiv_sorted = Xonoff_rdiv[np.argsort(Xonoff_rdiv.time.values)]
scantype = Xonoff_rdiv_sorted.scantype.values
newscanid = np.cumsum(np.hstack([False, scantype[1:] != scantype[:-1]]))
onmask = np.in1d(Xonoff_rdiv_sorted.scanid, onid)
offmask = np.in1d(Xonoff_rdiv_sorted.scanid, offid)
Xon_rdiv = Xonoff_rdiv_sorted[onmask]
Xoff_rdiv = Xonoff_rdiv_sorted[offmask]
Xon_rdiv.coords.update({"scanid": ("t", newscanid[onmask])})
Xoff_rdiv.coords.update({"scanid": ("t", newscanid[offmask])})
return Xon_rdiv, Xoff_rdiv
def gauss_fit(
map_data,
chs=None,
mode="deg",
amplitude=1,
x_mean=0,
y_mean=0,
x_stddev=None,
y_stddev=None,
theta=None,
cov_matrix=None,
noise=0,
**kwargs
):
"""make a 2D Gaussian model and fit the observed data with the model.
Args:
map_data (xarray.Dataarray): Dataarray of cube or single chs.
chs (list of int): in prep.
mode (str): Coordinates for the fitting
'pix'
'deg'
amplitude (float or None): Initial amplitude value of Gaussian fitting.
x_mean (float): Initial value of mean of the fitting Gaussian in x.
y_mean (float): Initial value of mean of the fitting Gaussian in y.
x_stddev (float or None): Standard deviation of the Gaussian
in x before rotating by theta.
y_stddev (float or None): Standard deviation of the Gaussian
in y before rotating by theta.
theta (float, optional or None): Rotation angle in radians.
cov_matrix (ndarray, optional): A 2x2 covariance matrix. If specified,
overrides the ``x_stddev``, ``y_stddev``, and ``theta`` defaults.
Returns:
decode cube (xarray cube) with fitting results in array and attrs.
"""
if chs is None:
chs = np.ogrid[0:63] # the number of channels would be changed
if len(chs) > 1:
for n, ch in enumerate(chs):
subdata = np.transpose(
np.full_like(map_data[:, :, ch], map_data.values[:, :, ch])
)
subdata[np.isnan(subdata)] = 0
if mode == "deg":
mX, mY = np.meshgrid(map_data.x, map_data.y)
elif mode == "pix":
mX, mY = np.mgrid[0 : len(map_data.y), 0 : len(map_data.x)]
g_init = models.Gaussian2D(
amplitude=np.nanmax(subdata),
x_mean=x_mean,
y_mean=y_mean,
x_stddev=x_stddev,
y_stddev=y_stddev,
theta=theta,
cov_matrix=cov_matrix,
**kwargs
) + models.Const2D(noise)
fit_g = fitting.LevMarLSQFitter()
g = fit_g(g_init, mX, mY, subdata)
g_init2 = models.Gaussian2D(
amplitude=np.nanmax(subdata - g.amplitude_1),
x_mean=x_mean,
y_mean=y_mean,
x_stddev=x_stddev,
y_stddev=y_stddev,
theta=theta,
cov_matrix=cov_matrix,
**kwargs
)
fit_g2 = fitting.LevMarLSQFitter()
g2 = fit_g2(g_init2, mX, mY, subdata)
if n == 0:
results = np.array([g2(mX, mY)])
peaks = np.array([g2.amplitude.value])
x_means = np.array([g2.x_mean.value])
y_means = np.array([g2.y_mean.value])
x_stddevs = np.array([g2.x_stddev.value])
y_stddevs = np.array([g2.y_stddev.value])
thetas = np.array([g2.theta.value])
if fit_g2.fit_info["param_cov"] is None:
uncerts = np.array([0])
else:
error = np.diag(fit_g2.fit_info["param_cov"]) ** 0.5
uncerts = np.array([error[0]])
else:
results = np.append(results, [g2(mX, mY)], axis=0)
peaks =
|
np.append(peaks, [g2.amplitude.value], axis=0)
|
numpy.append
|
# Copyright (c) 1996-2015 PSERC. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
"""Computes 2nd derivatives of power injection w.r.t. voltage.
"""
from numpy import ones, conj, arange
from scipy.sparse import csr_matrix as sparse
def d2Sbus_dV2(Ybus, V, lam):
"""Computes 2nd derivatives of power injection w.r.t. voltage.
Returns 4 matrices containing the partial derivatives w.r.t. voltage angle
and magnitude of the product of a vector C{lam} with the 1st partial
derivatives of the complex bus power injections. Takes sparse bus
admittance matrix C{Ybus}, voltage vector C{V} and C{nb x 1} vector of
multipliers C{lam}. Output matrices are sparse.
For more details on the derivations behind the derivative code used
in PYPOWER information, see:
[TN2] <NAME>, I{"AC Power Flows, Generalized OPF Costs and
their Derivatives using Complex Matrix Notation"}, MATPOWER
Technical Note 2, February 2010.
U{http://www.pserc.cornell.edu/matpower/TN2-OPF-Derivatives.pdf}
@author: <NAME> (PSERC Cornell)
"""
nb = len(V)
ib = arange(nb)
Ibus = Ybus * V
diaglam = sparse((lam, (ib, ib)))
diagV = sparse((V, (ib, ib)))
A = sparse((lam * V, (ib, ib)))
B = Ybus * diagV
C = A * conj(B)
D = Ybus.H * diagV
E = diagV.conj() * (D * diaglam - sparse((D * lam, (ib, ib))))
F = C - A * sparse((
|
conj(Ibus)
|
numpy.conj
|
# Reference: https://www.kaggle.com/c/bengaliai-cv19/discussion/123757
import numpy as np
import cv2
# ----------------------------------- Geometric -----------------------------------------
class RandomProjective():
def __init__(self, prob, magnitude=0.5):
self.prob = np.clip(prob, 0.0, 1.0)
self.magnitude = 0.5
def __call__(self, image):
if np.random.uniform() > self.prob:
return image
mag = np.random.uniform(-1, 1) * 0.5 * self.magnitude
height, width = image.shape[:2]
x0, y0 = 0, 0
x1, y1 = 1, 0
x2, y2 = 1, 1
x3, y3 = 0, 1
mode = np.random.choice(['top', 'bottom', 'left', 'right'])
if mode == 'top':
x0 += mag
x1 -= mag
if mode == 'bottom':
x3 += mag
x2 -= mag
if mode == 'left':
y0 += mag
y3 -= mag
if mode == 'right':
y1 += mag
y2 -= mag
s = np.array([[0, 0], [1, 0], [1, 1], [0, 1]]) * [[width, height]]
d = np.array([[x0, y0], [x1, y1], [x2, y2], [x3, y3]]) * [[width, height]]
transform = cv2.getPerspectiveTransform(s.astype(np.float32), d.astype(np.float32))
image = cv2.warpPerspective(image, transform, (width, height), flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_CONSTANT, borderValue=0)
return image
class RandomPerspective():
def __init__(self, prob, magnitude=0.5):
self.prob = np.clip(prob, 0.0, 1.0)
self.magnitude = 0.5
def __call__(self, image):
if np.random.uniform() > self.prob:
return image
mag = np.random.uniform(-1, 1, (4, 2)) * 0.25 * self.magnitude
height, width = image.shape[:2]
s = np.array([[0, 0], [1, 0], [1, 1], [0, 1]])
d = s + mag
s *= [[width, height]]
d *= [[width, height]]
transform = cv2.getPerspectiveTransform(s.astype(np.float32), d.astype(np.float32))
image = cv2.warpPerspective(image, transform, (width, height), flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_CONSTANT, borderValue=0)
return image
class RandomRotate():
def __init__(self, prob, magnitude=0.5):
self.prob = np.clip(prob, 0.0, 1.0)
self.magnitude = 0.5
def __call__(self, image):
if np.random.uniform() > self.prob:
return image
angle = 1 + np.random.uniform(-1, 1) * 30 * self.magnitude
height, width = image.shape[:2]
cx, cy = width // 2, height // 2
transform = cv2.getRotationMatrix2D((cx, cy), -angle, 1.0)
image = cv2.warpAffine(image, transform, (width, height), flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_CONSTANT, borderValue=0)
return image
class RandomScale():
def __init__(self, prob, magnitude=0.5):
self.prob = np.clip(prob, 0.0, 1.0)
self.magnitude = 0.5
def __call__(self, image):
if np.random.uniform() > self.prob:
return image
s = 1 + np.random.uniform(-1, 1) * self.magnitude * 0.5
height, width = image.shape[:2]
transform = np.array([[s, 0, 0], [0, s, 0], ], np.float32)
image = cv2.warpAffine(image, transform, (width, height), flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_CONSTANT, borderValue=0)
return image
class RandomShearX():
def __init__(self, prob, magnitude=0.5):
self.prob = np.clip(prob, 0.0, 1.0)
self.magnitude = 0.5
def __call__(self, image):
if np.random.uniform() > self.prob:
return image
sx = np.random.uniform(-1, 1) * self.magnitude
height, width = image.shape[:2]
transform = np.array([[1, sx, 0], [0, 1, 0]], np.float32)
image = cv2.warpAffine(image, transform, (width, height), flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_CONSTANT, borderValue=0)
return image
class RandomShearY():
def __init__(self, prob, magnitude=0.5):
self.prob = np.clip(prob, 0.0, 1.0)
self.magnitude = 0.5
def __call__(self, image):
if np.random.uniform() > self.prob:
return image
sy = np.random.uniform(-1, 1) * self.magnitude
height, width = image.shape[:2]
transform = np.array([[1, 0, 0], [sy, 1, 0]], np.float32)
image = cv2.warpAffine(image, transform, (width, height), flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_CONSTANT, borderValue=0)
return image
class RandomStretchX():
def __init__(self, prob, magnitude=0.5):
self.prob =
|
np.clip(prob, 0.0, 1.0)
|
numpy.clip
|
import torch
import torch.nn as nn
import numpy as np
import cv2
from collections import deque
EPSILON = 1e-30
def ValidStructTensorIndicator(a,d,b):
# return 1-(a**2==d**2)*(b*(a+d)==0)
return ((2 * b * (a + d))**2+(a ** 2 - d ** 2)**2)>EPSILON
def Latent_channels_desc_2_num_channels(latent_channels_desc):
if isinstance(latent_channels_desc,int):
return latent_channels_desc
elif latent_channels_desc == 'STD_1dir': # Channel 0 controls STD, channel 1 controls horizontal Sobel
return 2
elif latent_channels_desc=='STD_directional' or 'structure_tensor' in latent_channels_desc:
return 3
class FilterLoss(nn.Module):
def __init__(self,latent_channels,constant_Z=None,reference_images=None,masks=None):
super(FilterLoss,self).__init__()
self.latent_channels = latent_channels
self.num_channels = Latent_channels_desc_2_num_channels(self.latent_channels)
if self.num_channels==0:
print('No control input channels z')
return
self.NOISE_STD = 1e-15#1/255
# self.model_training = constant_Z is None
self.model_training = isinstance(self.latent_channels,str)
if self.model_training:
if self.latent_channels == 'STD_1dir':#Channel 0 controls STD, channel 1 controls horizontal Sobel
DELTA_SIZE = 7
delta_im = np.zeros([DELTA_SIZE,DELTA_SIZE]); delta_im[DELTA_SIZE//2,DELTA_SIZE//2] = 1;
dir_filter = cv2.Sobel(delta_im,ddepth=cv2.CV_64F,dx=1,dy=0)
filter_margins = np.argwhere(np.any(dir_filter!=0,0))[0][0]
dir_filter = dir_filter[filter_margins:-filter_margins,filter_margins:-filter_margins]
self.filter = nn.Conv2d(in_channels=3,out_channels=3,kernel_size=dir_filter.shape,bias=False,groups=3)
self.filter.weight = nn.Parameter(data=torch.from_numpy(np.tile(np.expand_dims(np.expand_dims(dir_filter, 0), 0), reps=[3, 1, 1, 1])).type(torch.cuda.FloatTensor), requires_grad=False)
self.filter.filter_layer = True
elif 'structure_tensor' in self.latent_channels:
self.NOISE_STD = 1e-7
gradient_filters = [[[-1,1],[0,0]],[[-1,0],[1,0]]]
self.filters = []
for filter in gradient_filters:
filter = np.array(filter)
conv_layer = nn.Conv2d(in_channels=3,out_channels=3,kernel_size=filter.shape,bias=False,groups=3)
conv_layer.weight = nn.Parameter(data=torch.from_numpy(np.tile(np.expand_dims(np.expand_dims(filter, 0), 0), reps=[3, 1, 1, 1])).type(torch.cuda.FloatTensor), requires_grad=False)
conv_layer.filter_layer = True
self.filters.append(conv_layer)
else:
raise Exception('Unknown latent channel setting %s' % (self.latent_channels))
# if self.model_training:
self.collected_ratios = [deque(maxlen=10000) for i in range(self.num_channels)]
# else:
if constant_Z is not None:
self.HR_mask,self.LR_mask = masks['HR'],masks['LR']
self.constant_Z = (constant_Z.to(self.LR_mask.device)*self.LR_mask).sum(dim=(2,3))/self.LR_mask.sum()
reference_derivatives = {}
for ref_image in reference_images.keys():
reference_derivatives[ref_image] = []
for filter in self.filters:
reference_derivatives[ref_image].append(filter(reference_images[ref_image]))
reference_derivatives[ref_image] = torch.stack(reference_derivatives[ref_image], 0)
reference_derivatives[ref_image] = torch.cat([reference_derivatives[ref_image]**2,torch.prod(reference_derivatives[ref_image],dim=0,keepdim=True)],0)
reference_derivatives[ref_image] = (reference_derivatives[ref_image].mean(dim=2)*self.HR_mask[:-1,:-1]).sum(dim=(2,3))/self.HR_mask[:-1,:-1].sum()
reference_derivatives['tensor_normalizer'] = torch.sqrt(torch.prod(torch.stack([torch.mean(torch.cat([reference_derivatives[ref_image][i] for ref_image in reference_images.keys()])) for i in range(2)]))).item()
for ref_image in reference_images.keys():
reference_derivatives[ref_image] = [(val/(reference_derivatives['tensor_normalizer']+self.NOISE_STD)).item() for val in reference_derivatives[ref_image]]
self.reference_derivatives = reference_derivatives
def forward(self, data):
image_shape = list(data['SR'].size())
LOWER_PERCENTILE,HIGHER_PERCENTILE = 5,95
if self.model_training:
cur_Z = data['Z'].mean(dim=(2,3))
else:
cur_Z = self.constant_Z
if self.latent_channels == 'STD_1dir':
dir_filter_output_SR = self.filter(data['SR'])
dir_filter_output_HR = self.filter(data['HR'])
dir_magnitude_ratio = dir_filter_output_SR.abs().mean(dim=(1, 2, 3)) / (
dir_filter_output_HR.abs().mean(dim=(1, 2, 3)) + self.NOISE_STD)
STD_ratio = data['SR'].contiguous().view(tuple(image_shape[:2] + [-1])).std(dim=-1).mean(1) /\
(data['HR'].contiguous().view(tuple(image_shape[:2] + [-1])).std(dim=-1).mean(1) + self.NOISE_STD)
normalized_Z = []
for ch_num in range(self.num_channels):
self.collected_ratios[ch_num] += [val.item() for val in list(measured_values[:, ch_num])]
upper_bound = np.percentile(self.collected_ratios[ch_num], HIGHER_PERCENTILE)
lower_bound = np.percentile(self.collected_ratios[ch_num], LOWER_PERCENTILE)
normalized_Z.append(
(cur_Z[:, ch_num]) / 2 * (upper_bound - lower_bound) + np.mean([upper_bound, lower_bound]))
normalized_Z = torch.stack(normalized_Z, 1)
measured_values = torch.stack([STD_ratio, dir_magnitude_ratio], 1)
elif self.latent_channels == 'STD_directional':
horizontal_derivative_SR = (data['SR'][:,:,:,2:]-data['SR'][:,:,:,:-2])[:,:,1:-1,:].unsqueeze(1)/2
vertical_derivative_SR = (data['SR'][:, :, 2:,:] - data['SR'][:, :, :-2, :])[:,:,:,1:-1].unsqueeze(1)/2
horizontal_derivative_HR = (data['HR'][:, :, :, 2:] - data['HR'][:, :, :, :-2])[:,:,1:-1,:].unsqueeze(1)/2
vertical_derivative_HR = (data['HR'][:, :, 2:, :] - data['HR'][:, :, :-2, :])[:,:,:,1:-1].unsqueeze(1)/2
dir_normal = cur_Z[:,1:3]
dir_normal = dir_normal/torch.sqrt(torch.sum(dir_normal**2,dim=1,keepdim=True))
dir_filter_output_SR = (dir_normal.unsqueeze(2).unsqueeze(3).unsqueeze(4)*torch.cat([horizontal_derivative_SR,vertical_derivative_SR],dim=1)).sum(1)
dir_filter_output_HR = (dir_normal.unsqueeze(2).unsqueeze(3).unsqueeze(4) * torch.cat([horizontal_derivative_HR, vertical_derivative_HR],dim=1)).sum(1)
dir_magnitude_ratio = dir_filter_output_SR.abs().mean(dim=(1,2,3))/(dir_filter_output_HR.abs().mean(dim=(1,2,3))+self.NOISE_STD)
self.collected_ratios[1] += [val.item() for val in list(dir_magnitude_ratio)]
STD_ratio = (data['SR'][:,:,1:-1,1:-1]-dir_filter_output_SR).abs().mean(dim=(1,2,3))/((data['HR'][:,:,1:-1,1:-1]-dir_filter_output_HR).abs().mean(dim=(1,2,3))+self.NOISE_STD)
self.collected_ratios[0] += [val.item() for val in list(STD_ratio)]
STD_upper_bound = np.percentile(self.collected_ratios[0], HIGHER_PERCENTILE)
STD_lower_bound = np.percentile(self.collected_ratios[0],LOWER_PERCENTILE)
dir_magnitude_upper_bound = np.percentile(self.collected_ratios[1], HIGHER_PERCENTILE)
dir_magnitude_lower_bound = np.percentile(self.collected_ratios[1], LOWER_PERCENTILE)
mag_normal = (cur_Z[:,1:3]**2).sum(1).sqrt()
normalized_Z = torch.stack([cur_Z[:,0]*(STD_upper_bound-STD_lower_bound)+np.mean([STD_upper_bound,STD_lower_bound]),
mag_normal/np.sqrt(2)*(dir_magnitude_upper_bound-dir_magnitude_lower_bound)+np.mean([dir_magnitude_upper_bound,dir_magnitude_lower_bound])],1)
measured_values = torch.stack([STD_ratio, dir_magnitude_ratio], 1)
elif'structure_tensor' in self.latent_channels:
ZERO_CENTERED_IxIy = False
if not self.model_training:
RATIO_LOSS = 'No'
elif self.latent_channels == 'SVD_structure_tensor':
RATIO_LOSS = 'OnlyDiagonals'
elif self.latent_channels=='SVDinNormedOut_structure_tensor':
RATIO_LOSS = 'SingleNormalizer'
else:
RATIO_LOSS = 'OnlyDiagonals' #'No','All','OnlyDiagonals','Diagonals_IxIyRelative'
assert not (RATIO_LOSS=='All' and ZERO_CENTERED_IxIy),'Do I want to combine these two flags?'
derivatives_SR,derivatives_HR = [],[]
for filter in self.filters:
derivatives_SR.append(filter(data['SR']))
if RATIO_LOSS!='No':
derivatives_HR.append(filter(data['HR']))
non_squared_derivatives_SR = torch.stack(derivatives_SR,0)
derivatives_SR = torch.cat([non_squared_derivatives_SR**2,torch.prod(non_squared_derivatives_SR,dim=0,keepdim=True)],0)
if self.model_training:
derivatives_SR = derivatives_SR.mean(dim=(2, 3,4)) # In ALL configurations, I also average all values before taking ratios - should think whether it might be a problem.
else:
derivatives_SR = (derivatives_SR.mean(dim=2)*self.HR_mask[:-1,:-1]).sum(dim=(2,3))/self.HR_mask[:-1,:-1].sum()
if self.latent_channels == 'SVD_structure_tensor':
lambda0_SR,lambda1_SR,theta_SR = SVD_Symmetric_2x2(*derivatives_SR)
images_validity_4_backprop = ValidStructTensorIndicator(*derivatives_SR)
else:
measured_values = [derivatives_SR[i] for i in range(derivatives_SR.size(0))]
if RATIO_LOSS!='No':
non_squared_derivatives_HR = torch.stack(derivatives_HR, 0)
derivatives_HR = torch.cat([non_squared_derivatives_HR**2,torch.prod(non_squared_derivatives_HR,dim=0,keepdim=True)],0)
derivatives_HR = derivatives_HR.mean(dim=(2, 3, 4))
if self.latent_channels == 'SVD_structure_tensor':
lambda0_HR, lambda1_HR, theta_HR = SVD_Symmetric_2x2(*derivatives_HR)
images_validity_4_backprop = images_validity_4_backprop*ValidStructTensorIndicator(*derivatives_HR)
measured_values = [lambda0_SR/(lambda0_HR+self.NOISE_STD),lambda1_SR/(lambda1_HR+self.NOISE_STD),theta_SR]
# measured_values = [val.mean(dim=(1,2,3)).to() for val in measured_values]
elif self.latent_channels=='SVDinNormedOut_structure_tensor':
tensor_normalizer = torch.prod(torch.sqrt(derivatives_HR[:2]),dim=0)
measured_values = [measured_val/(tensor_normalizer+self.NOISE_STD) for measured_val in measured_values]
else:
measured_values = [measured_values[i]/((derivatives_HR[i]+torch.sign(measured_values[i])*self.NOISE_STD)
if (i<2 or RATIO_LOSS=='All') else 1) for i in range(derivatives_SR.size(0))]
elif not self.model_training:
measured_values = [measured_val / (self.reference_derivatives['tensor_normalizer'] + self.NOISE_STD) for measured_val in measured_values]
normalized_Z = []
for i in range(len(measured_values)):
if self.model_training:
self.collected_ratios[i] += [val.item() for val in measured_values[i]]
upper_bound = np.percentile(self.collected_ratios[i], HIGHER_PERCENTILE)
lower_bound =
|
np.percentile(self.collected_ratios[i], LOWER_PERCENTILE)
|
numpy.percentile
|
from clustviz.denclue import (
gaussian_density,
gradient_gaussian_density,
square_wave_density,
FindRect,
pop_cubes,
highly_pop_cubes,
check_connection,
find_connected_cubes,
near_with_cube,
near_without_cube,
density_attractor,
assign_cluster,
DENCLUE,
)
import numpy as np
from sklearn.datasets import make_blobs
def test_gaussian_density():
D = np.array([[1, 0], [0, 0], [2, 0]])
point = np.array([0, 0])
res = gaussian_density(point, D, 1, dist="euclidean")
assert round(res, 2) == 1.74
def test_gradient_gaussian_density():
D = np.array([[1, 0], [0, 2]])
point = np.array([0, 0])
res = gradient_gaussian_density(point, D, 1, dist="euclidean")
first_component = round(res[0], 2) == 0.61
second_component = round(res[1], 2) == 0.27
assert first_component & second_component
def test_square_wave_density():
D = np.array([[1, 0], [0, 0], [2, 0]])
point = np.array([0, 0])
res = square_wave_density(point, D, 1, dist="euclidean")
assert res == 2
def test_pop_cubes():
D = np.array([[0, 0], [0, 1], [0, 2]])
res = pop_cubes(D, 1)
expected_first = {
(0, 0): {"num_points": 2, "linear_sum": np.array([0, 1]), "points_coords": np.array([[0, 0], [0, 1]])},
(0, 1): {"num_points": 1, "linear_sum": np.array([0, 2]), "points_coords": np.array([[0, 2]])},
}
expected_second = {
(0, 0): (-0.05, -0.05, 1.95, 1.95),
(0, 1): (-0.05, 1.95, 1.95, 3.95),
}
condition0 = res[0][(0, 0)]["num_points"] == expected_first[(0, 0)]["num_points"]
condition1 = (res[0][(0, 0)]["linear_sum"] == expected_first[(0, 0)]["linear_sum"]).all()
condition2 = (res[0][(0, 0)]["points_coords"] == expected_first[(0, 0)]["points_coords"]).all()
condition3 = res[0][(0, 1)]["num_points"] == expected_first[(0, 1)]["num_points"]
condition4 = (res[0][(0, 1)]["linear_sum"] == expected_first[(0, 1)]["linear_sum"]).all()
condition5 = (res[0][(0, 1)]["points_coords"] == expected_first[(0, 1)]["points_coords"]).all()
condition6 = res[1] == expected_second
assert condition0 & condition1 & condition2 & condition3 & condition4 & condition5 & condition6
def test_FindRect():
coord_dict = {
(0, 0): (-0.05, -0.05, 1.95, 1.95),
(0, 1): (-0.05, 1.95, 1.95, 3.95),
}
point = np.array([0, 2])
assert FindRect(point, coord_dict) == (0, 1)
def test_highly_pop_cubes():
z = {
(0, 0): {"num_points": 3, "linear_sum": np.array([1, 2]), "points_coords": np.array([[0, 0], [0, 1], [1, 1]])},
(0, 1): {"num_points": 1, "linear_sum": np.array([0, 2]), "points_coords": np.array([[0, 2]])}
}
res = {(0, 0): {"num_points": 3, "linear_sum": np.array([1, 2]),
"points_coords": np.array([[0, 0], [0, 1], [1, 1]])}}
res = highly_pop_cubes(z, 2)
condition0 = res[(0, 0)]["num_points"] == z[(0, 0)]["num_points"]
condition1 = (res[(0, 0)]["linear_sum"] == z[(0, 0)]["linear_sum"]).all()
condition2 = (res[(0, 0)]["points_coords"] == z[(0, 0)]["points_coords"]).all()
assert condition0 & condition1 & condition2
def test_check_connection():
hpc = {
(0, 0): {"num_points": 2, "linear_sum": np.array([0, 1]), "points_coords": np.array([[0, 0], [0, 1]])},
(0, 1): {"num_points": 1, "linear_sum": np.array([0, 2]), "points_coords": np.array([[0, 2]])}
}
assert check_connection(hpc[(0, 0)], hpc[0, 1], 1)
def test_find_connected_cubes():
z = {
(0, 0): {"num_points": 4, "linear_sum": np.array([2.1, 3.0]),
"points_coords":
|
np.array([[0.0, 0.0], [0.0, 1.0], [1.0, 1.0], [1.1, 1.0]])
|
numpy.array
|
"""
desispec.fiberfluxcorr
========================
Routines to compute fiber flux corrections
based on the fiber location, the exposure seeing,
and the target morphology.
"""
import numpy as np
from desiutil.log import get_logger
from desimodel.fastfiberacceptance import FastFiberAcceptance
from desimodel.io import load_platescale
def flat_to_psf_flux_correction(fibermap,exposure_seeing_fwhm=1.1) :
"""
Multiplicative factor to apply to the flat-fielded spectroscopic flux of a fiber
to calibrate the spectrum of a point source, given the current exposure seeing
Args:
fibermap: fibermap of frame, astropy.table.Table
exposure_seeing_fwhm: seeing FWHM in arcsec
Returns: 1D numpy array with correction factor to apply to fiber fielded fluxes, valid for point sources.
"""
log = get_logger()
for k in ["FIBER_X","FIBER_Y"] :
if k not in fibermap.dtype.names :
log.warning("no column '{}' in fibermap, cannot do the flat_to_psf correction, returning 1")
return np.ones(len(fibermap))
#- Compute point source flux correction and fiber flux correction
fa = FastFiberAcceptance()
x_mm = fibermap["FIBER_X"]
y_mm = fibermap["FIBER_Y"]
bad = np.isnan(x_mm)|np.isnan(y_mm)
x_mm[bad]=0.
y_mm[bad]=0.
if "DELTA_X" in fibermap.dtype.names :
dx_mm = fibermap["DELTA_X"] # mm
else :
log.warning("no column 'DELTA_X' in fibermap, assume DELTA_X=0")
dx_mm = np.zeros(len(fibermap))
if "DELTA_Y" in fibermap.dtype.names :
dy_mm = fibermap["DELTA_Y"] # mm
else :
log.warning("no column 'DELTA_Y' in fibermap, assume DELTA_Y=0")
dy_mm = np.zeros(len(fibermap))
bad = np.isnan(dx_mm)|np.isnan(dy_mm)
dx_mm[bad]=0.
dy_mm[bad]=0.
ps = load_platescale()
isotropic_platescale = np.interp(x_mm**2+y_mm**2,ps['radius']**2,np.sqrt(ps['radial_platescale']*ps['az_platescale'])) # um/arcsec
sigmas_um = exposure_seeing_fwhm/2.35 * isotropic_platescale # um
offsets_um = np.sqrt(dx_mm**2+dy_mm**2)*1000. # um
fiber_frac = fa.value("POINT",sigmas_um,offsets_um)
# at large r,
# isotropic_platescale is larger
# fiber angular size is smaller
# fiber flat is smaller
# fiber flat correction is larger
# have to divide by isotropic_platescale^2
ok = (fiber_frac>0.01)
point_source_correction = np.zeros(x_mm.shape)
point_source_correction[ok] = 1./fiber_frac[ok]/isotropic_platescale[ok]**2
# normalize to one because this is a relative correction here
point_source_correction[ok] /= np.mean(point_source_correction[ok])
return point_source_correction
def psf_to_fiber_flux_correction(fibermap,exposure_seeing_fwhm=1.1) :
"""
Multiplicative factor to apply to the psf flux of a fiber
to obtain the fiber flux, given the current exposure seeing.
The fiber flux is the flux one would collect for this object in a fiber of 1.5 arcsec diameter,
for a 1 arcsec seeing, FWHM (same definition as for the Legacy Surveys).
Args:
fibermap: fibermap of frame, astropy.table.Table
exposure_seeing_fwhm: seeing FWHM in arcsec
Returns: 1D numpy array with correction factor to apply to fiber fielded fluxes, valid for any sources.
"""
log = get_logger()
for k in ["FIBER_X","FIBER_Y"] :
if k not in fibermap.dtype.names :
log.warning("no column '{}' in fibermap, cannot do the flat_to_psf correction, returning 1".format(k))
return np.ones(len(fibermap))
# compute the seeing and plate scale correction
fa = FastFiberAcceptance()
x_mm = fibermap["FIBER_X"]
y_mm = fibermap["FIBER_Y"]
bad = np.isnan(x_mm)|
|
np.isnan(y_mm)
|
numpy.isnan
|
import torch
import os
import logging
import math
from pathlib import Path
import warnings
from rasterio import Affine, MemoryFile
from rasterio.profiles import DefaultGTiffProfile
from rasterio.warp import calculate_default_transform, reproject, Resampling
import numpy as np
from scipy.spatial import distance
from functools import reduce
import rasterio
from rasterio.mask import mask
from rasterio.crs import CRS
import rasterio.warp
import rasterio.shutil
from rasterio.enums import Resampling
from rasterio import shutil as rio_shutil
from rasterio.vrt import WarpedVRT
from rasterio.coords import disjoint_bounds
from rasterio.enums import Resampling
from rasterio.windows import Window
from deepbiosphere.scripts import new_window
from deepbiosphere.scripts import GEOCLEF_Utils as utils
from rasterio.transform import Affine
import rasterio.transform as transforms
import matplotlib as mpl
import matplotlib.patches as patches
import math
import matplotlib.cm as cm
import matplotlib as mpl
import time
# deepbio packages
from deepbiosphere.scripts.GEOCLEF_Config import paths
import deepbiosphere.scripts.GEOCLEF_Config as config
from deepbiosphere.scripts.GEOCLEF_Run import setup_dataset
import deepbiosphere.scripts.GEOCLEF_Utils as utils
import deepbiosphere.scripts.GEOCLEF_CNN as cnn
import pandas as pd
import geopandas as gpd
from shapely.geometry import Point, Polygon, MultiPolygon, LineString
import shapely.speedups
# Standard packages
import tempfile
import warnings
import urllib
import shutil
import os
# Less standard, but still pip- or conda-installable
import matplotlib.pyplot as plt
import numpy as np
import rasterio
import re # regex
import rtree
import shapely
import pickle
# pip install progressbar2, not progressbar
import progressbar
from geopy.geocoders import Nominatim
from rasterio.merge import merge
from tqdm import tqdm
import fiona
import fiona.transform
import requests
import json
import torch
import numpy as np
## fields
# not all the NAIP are teh same coorediate reference system
# this is WGS84, what the VRT are converted to
NAIP_CRS='EPSG:4326'
ALPHA_NODATA = 9999
class DownloadProgressBar():
"""
https://stackoverflow.com/questions/37748105/how-to-use-progressbar-module-with-urlretrieve
"""
def __init__(self):
self.pbar = None
def __call__(self, block_num, block_size, total_size):
if not self.pbar:
self.pbar = progressbar.ProgressBar(max_value=total_size)
self.pbar.start()
downloaded = block_num * block_size
if downloaded < total_size:
self.pbar.update(downloaded)
else:
self.pbar.finish()
# very important class!
class NAIPTileIndex:
"""
Utility class for performing NAIP tile lookups by location.
"""
tile_rtree = None
tile_index = None
base_path = None
def __init__(self, index_blob_root, index_files, base_path=None, temp_dir=None):
temp_dir = os.path.join(tempfile.gettempdir(),'naip') if temp_dir is None else temp_dir
if base_path is None:
base_path = temp_dir
os.makedirs(base_path,exist_ok=True)
for file_path in index_files:
download_url(index_blob_root + file_path, base_path + '/' + file_path,
progress_updater=DownloadProgressBar())
self.base_path = base_path
# tile_rtree is an rtree that stores I believe bounding boxes for the tifs
self.tile_rtree = rtree.index.Index(base_path + "/tile_index")
self.tile_index = pickle.load(open(base_path + "/tiles.p", "rb"))
def lookup_tile(self, lat, lon):
""""
Given a lat/lon coordinate pair, return the list of NAIP tiles that contain
that location.
Returns a list of COG file paths.
"""
point = shapely.geometry.Point(float(lon),float(lat))
intersected_indices = list(self.tile_rtree.intersection(point.bounds)) # oh wow so this rtree does ALL the heavy lifting, phew....
intersected_files = []
tile_intersection = False
for idx in intersected_indices:
intersected_file = self.tile_index[idx][0]
intersected_geom = self.tile_index[idx][1]
if intersected_geom.contains(point): # Ohh I see, so it might be an rtree miss so still have to check to be sure
tile_intersection = True
intersected_files.append(intersected_file)
if not tile_intersection and len(intersected_indices) > 0: # How can this be??
print('''Error: there are overlaps with tile index,
but no tile completely contains selection''')
return None
elif len(intersected_files) <= 0:
print("No tile intersections")
return None
else:
return intersected_files
def download_url(url, destination_filename=None, progress_updater=None, force_download=False):
"""
Download a URL to a temporary file
"""
# This is not intended to guarantee uniqueness, we just know it happens to guarantee
# uniqueness for this application.
if destination_filename is None:
url_as_filename = url.replace('://', '_').replace('/', '_')
destination_filename = \
os.path.join(temp_dir,url_as_filename)
if (not force_download) and (os.path.isfile(destination_filename)):
print('Bypassing download of already-downloaded file {}'.format(os.path.basename(url)))
return destination_filename
print('Downloading file {} to {}'.format(os.path.basename(url),destination_filename),end='')
urllib.request.urlretrieve(url, destination_filename, progress_updater)
assert(os.path.isfile(destination_filename))
nBytes = os.path.getsize(destination_filename)
print('...done, {} bytes.'.format(nBytes))
return destination_filename
def display_naip_tile(filename):
"""
Display a NAIP tile using rasterio.
"""
# NAIP tiles are enormous; downsize for plotting in this notebook
dsfactor = 10
with rasterio.open(filename) as raster:
# NAIP imagery has four channels: R, G, B, IR
#
# Stack RGB channels into an image; we won't try to render the IR channel
#
# rasterio uses 1-based indexing for channels.
h = int(raster.height/dsfactor)
w = int(raster.width/dsfactor)
print('Resampling to {},{}'.format(h,w))
r = raster.read(1, out_shape=(1, h, w))
g = raster.read(2, out_shape=(1, h, w))
b = raster.read(3, out_shape=(1, h, w))
rgb =
|
np.dstack((r,g,b))
|
numpy.dstack
|
"""
Tests for FitBenchmarking object
"""
from __future__ import absolute_import, division, print_function
import inspect
import os
import unittest
import numpy as np
from fitbenchmarking import mock_problems
from fitbenchmarking.cost_func.nlls_cost_func import NLLSCostFunc
from fitbenchmarking.hessian.analytic_hessian import \
Analytic as AnalyticHessian
from fitbenchmarking.jacobian.scipy_jacobian import Scipy
from fitbenchmarking.parsing.parser_factory import parse_problem_file
from fitbenchmarking.utils.fitbm_result import FittingResult
from fitbenchmarking.utils.options import Options
class FitbmResultTests(unittest.TestCase):
"""
Tests for FitBenchmarking results object
"""
def setUp(self):
"""
Setting up FitBenchmarking results object
"""
self.options = Options()
mock_problems_dir = os.path.dirname(inspect.getfile(mock_problems))
problem_dir = os.path.join(mock_problems_dir, "cubic.dat")
self.problem = parse_problem_file(problem_dir, self.options)
self.problem.correct_data()
self.chi_sq = 10
self.minimizer = "test_minimizer"
self.runtime = 0.01
self.params = np.array([1, 3, 4, 4])
self.initial_params = np.array([0, 0, 0, 0])
self.cost_func = NLLSCostFunc(self.problem)
self.jac = Scipy(self.cost_func)
self.jac.method = "2-point"
self.hess = AnalyticHessian(self.cost_func.problem, self.jac)
self.result = FittingResult(
options=self.options, cost_func=self.cost_func, jac=self.jac,
hess=self.hess, chi_sq=self.chi_sq, runtime=self.runtime,
minimizer=self.minimizer, initial_params=self.initial_params,
params=self.params, error_flag=0)
self.min_chi_sq = 0.1
self.result.min_chi_sq = self.min_chi_sq
self.min_runtime = 1
self.result.min_runtime = self.min_runtime
def test_fitting_result_str(self):
"""
Test that the fitting result can be printed as a readable string.
"""
self.assertEqual(str(self.result),
"+================================+\n"
"| FittingResult |\n"
"+================================+\n"
"| Cost Function | NLLSCostFunc |\n"
"+--------------------------------+\n"
"| Problem | cubic |\n"
"+--------------------------------+\n"
"| Software | None |\n"
"+--------------------------------+\n"
"| Minimizer | test_minimizer |\n"
"+--------------------------------+\n"
"| Jacobian | Scipy |\n"
"+--------------------------------+\n"
"| Hessian | Analytic |\n"
"+--------------------------------+\n"
"| Chi Squared | 10 |\n"
"+--------------------------------+\n"
"| Runtime | 0.01 |\n"
"+--------------------------------+")
def test_init_with_dataset_id(self):
"""
Tests to check that the multifit id is setup correctly
"""
chi_sq = [10, 5, 1]
minimizer = "test_minimizer"
runtime = 0.01
params = [np.array([1, 3, 4, 4]),
np.array([2, 3, 57, 8]),
np.array([4, 2, 5, 1])]
initial_params = np.array([0, 0, 0, 0])
self.problem.data_x = [
|
np.array([3, 2, 1, 4])
|
numpy.array
|
from astropy.io import fits
import matplotlib.pyplot as plt
import numpy as np
# Import the .fits file which contains all the pec dat and
# extract the information.
hdulist = fits.open('pec_dat_650_950_lowdens.fits')
pec_temps = hdulist[0].data # The array of temps in eV
n_meta = hdulist[1].data # Array listing the metastable #
pec_dens = hdulist[2].data # Density array
pec_wave = hdulist[3].data # Wavelengths corresponiding to each PEC
pec_pec = hdulist[4].data.T # 3-D array containing all PEC's
data = np.loadtxt('temp_5_dens2.5e16.dat')
ALEXIS_wavelengths = data[0]
ALEXIS_spect = data[1]
wavelist = np.zeros((np.size(pec_wave),2))
for i in range(0,np.size(pec_wave)):
wavelist[i,0] = i
wavelist[i,1] = pec_wave[i]
templist = np.zeros((np.size(pec_temps),2))
for i in range(0,np.size(pec_temps)):
templist[i,0] = i
templist[i,1] = pec_temps[i]
denslist = np.zeros((np.size(pec_dens),2))
for i in range(0,np.size(pec_dens)):
denslist[i,0] = i
denslist[i,1] = pec_dens[i]
# Declare some variables
size_meta = n_meta.size # Number of metastables
n_pec = pec_wave.size # Number of PEC's for each metastable
wl_min = int(pec_wave[0]) # smallest wavelength
wl_max = pec_wave[n_pec - 1] # largest wavelength
# Spectrometer Resolution (nm)
lambda_d = 2.0
fwhm = 2.0*(np.log(2.0))**0.5*lambda_d
# These variables will be needed for Doppler Broadening
fwhm = 2.0*(np.log(2.0))**0.5*lambda_d
dwavel = np.amin(fwhm)/20.0
n_wavel = int(((wl_max-wl_min)/dwavel))
flux_lam = np.linspace(wl_min, wl_max, n_wavel)
# Create dictionaries to hold the invidual meta PEC's and Broadened flux
pec_meta = dict()
flux = dict()
for i in range(size_meta):
pec_meta[i] = pec_pec[:, :, n_pec*i:n_pec*(i+1)]
flux[i] = np.zeros(n_wavel)
# Choose a temp and density
t_indx = 8
d_indx = 18
print("t_e = {:6.2e} eV".format(templist[t_indx,1]))
print("n_e = {:6.2e} cm^-3 ".format(denslist[d_indx,1]))
# %% Doppler Broadening
# Doppler Broaden the array for the specified t_indx and d_indx
for i in range(n_wavel):
for j in range(pec_wave.size):
for k in range(size_meta):
if flux_lam[i] > (pec_wave[j]-fwhm*5.0) and flux_lam[i] < \
(pec_wave[j]+fwhm*5.0):
flux[k][i] = flux[k][i] + pec_meta[k][t_indx, d_indx, j] * \
(2 * np.pi) ** (-0.5)/lambda_d * np.exp(-abs((flux_lam[i] \
- pec_wave[j]) / lambda_d) ** 2.0)
# %%
# %%
from astropy.io import fits
from scipy.optimize import curve_fit
from scipy import stats
import os
os.chdir(spect_dir)
import datetime
now = datetime.datetime.now()
import tkinter
from tkinter import filedialog
# we don't want a full GUI, so keep the root window from appearing
root = tkinter.Tk()
root.withdraw()
# show an "Open" dialog box and return the path to the selected file
filename = filedialog.askopenfilename()
hdulist = fits.open(filename)
# hdu_info = hdulist.info()
def spect_get():
hdu_size = np.size(hdulist)
scan_lambda = hdulist[hdu_size - 2].data.T
wavelengths =
|
np.array(scan_lambda[0])
|
numpy.array
|
import pandas as pd
import numpy as np
from .. import extensions as ext
import pytest
import itertools
def _as_2d_waterfall_signal(arg, repeat_n=3):
return np.transpose(np.tile(arg, (repeat_n, 1)))
@pytest.fixture
def butterworth_test_signals():
sr = 100
t = np.linspace(0, 1, sr, False)
mix_10_20_sig = np.sin(2*np.pi*10*t) + np.sin(2*np.pi*20*t)
single_10_sig = np.sin(2*np.pi*10*t)
constant_sig = np.ones(t.shape)
zero_sig = np.zeros(t.shape)
sigs = [mix_10_20_sig,
_as_2d_waterfall_signal(mix_10_20_sig),
constant_sig,
_as_2d_waterfall_signal(constant_sig)
]
low_pass_expects = [single_10_sig,
_as_2d_waterfall_signal(single_10_sig),
constant_sig,
_as_2d_waterfall_signal(constant_sig), ]
low_pass_cases = list(
itertools.starmap(
lambda sig, expected: (
{'X': sig, 'sr': sr, 'cut_offs': 15, 'order': 4,
'filter_type': 'low'},
expected
),
zip(sigs, low_pass_expects)
)
)
band_pass_expects = [zero_sig,
_as_2d_waterfall_signal(zero_sig),
zero_sig,
_as_2d_waterfall_signal(zero_sig)]
band_pass_cases = list(
itertools.starmap(
lambda sig, expected: (
{'X': sig, 'sr': sr, 'cut_offs': [13, 17], 'order': 4,
'filter_type': 'pass'},
expected),
zip(sigs, band_pass_expects)
)
)
return low_pass_cases + band_pass_cases
@pytest.fixture
def resample_test_signals():
list_sr = [100, 80, 50, 30]
test_cases = []
signals = []
for sr in list_sr:
t = np.linspace([0, 0], [1, 1], sr, False, axis=0)
mix_2_5_sig = np.sin(2*np.pi*2*t) + np.sin(2*np.pi*5*t)
signals.append(mix_2_5_sig)
sr_signal_pairs = zip(list_sr, signals)
test_cases = itertools.combinations(sr_signal_pairs, 2)
return test_cases
@pytest.fixture
def regularize_test_signals():
results = []
for et, expected in zip([1, 1.1, 1.11, 1.01], [80, 88, 88, 80]):
t = np.linspace(0, et, num=85)
X = np.random.rand(len(t), 3)
results.append((t, X, 80, expected))
return results
@pytest.fixture(scope='module')
def test_sensor_data(spades_lab_data):
dw_sensor_file = spades_lab_data['subjects']['SPADES_1']['sensors']['DW'][0]
da_sensor_file = spades_lab_data['subjects']['SPADES_1']['sensors']['DA'][0]
dw_data = pd.read_csv(dw_sensor_file, parse_dates=[0])
da_data = pd.read_csv(da_sensor_file, parse_dates=[0])
st = max([dw_data.iloc[0, 0], da_data.iloc[0, 0]])
dw_data = ext.pandas.segment_by_time(
dw_data, st, st + pd.Timedelta(12.8 * 5, unit='seconds'))
da_data = ext.pandas.segment_by_time(
da_data, st, st + pd.Timedelta(12.8 * 5, unit='seconds'))
return dw_data, da_data
class TestPandas:
def test_merge_all(self):
df1 = pd.DataFrame({'key': ['foo', 'bar', 'baz', 'foo'],
'value': [1, 2, 3, 5], 'group': [1, 1, 2, 2]})
df2 = pd.DataFrame({'key': ['foo', 'bar', 'baz', 'foo'],
'value': [5, 6, 7, 8], 'group': [1, 1, 2, 2]})
df3 = pd.DataFrame({'key': ['foo', 'bar', 'baz', 'foo'],
'value': [9, 10, 11, 12], 'group': [1, 1, 2, 2]})
dfs = [df1, df2, df3]
merged, cols_with_suffixes = ext.pandas.merge_all(*dfs, suffix_names=['DW', 'DA', 'DT'], suffix_cols=[
'value'], on=['key', 'group'], how='inner', sort=False)
np.testing.assert_array_equal(
cols_with_suffixes, ['value_DW', 'value_DA', 'value_DT'])
np.testing.assert_array_equal(
merged[['key', 'group']], df1[['key', 'group']]
)
np.testing.assert_array_equal(
set(merged.columns), set(['key', 'value_DW', 'value_DA', 'value_DT', 'group']))
merged, cols_with_suffixes = ext.pandas.merge_all(df1, suffix_names=['DW'], suffix_cols=[
'value'], on=['key', 'group'], how='inner', sort=False)
np.testing.assert_array_equal(merged.values, df1.values)
np.testing.assert_array_equal(
set(merged.columns), set(['key', 'value_DW', 'group']))
def test_filter_column(self):
df1 = pd.DataFrame({'key': ['foo', 'bar', 'baz', 'foo'],
'value': [1, 2, 3, 5], 'group': [1, 1, 2, 2]})
filtered = ext.pandas.filter_column(
df1, col='key', values_to_filter_out=['foo'])
np.testing.assert_array_equal(filtered['value'].values, [2, 3])
np.testing.assert_array_equal(filtered['key'].values, ['bar', 'baz'])
filtered = ext.pandas.filter_column(
df1, col='value', values_to_filter_out=[2, 5])
np.testing.assert_array_equal(filtered['value'].values, [1, 3])
np.testing.assert_array_equal(filtered['key'].values, ['foo', 'baz'])
@pytest.mark.parametrize('start_time', [None, "2015-09-24 14:17:00.000"])
@pytest.mark.parametrize('stop_time', [None, "2015-09-24 14:17:00.000"])
def test_get_common_timespan(self, test_sensor_data, start_time, stop_time):
dw_data, da_data = test_sensor_data
st, et = ext.pandas.get_common_timespan(
dw_data, da_data, st=start_time, et=stop_time)
if start_time is None:
assert st.timestamp() == pd.Timestamp(
'2015-09-24 14:17:48.013').timestamp()
else:
assert st.timestamp() == pd.Timestamp(start_time).timestamp()
if stop_time is None:
assert et.timestamp() == pd.Timestamp(
'2015-09-24 14:18:52.000').timestamp()
else:
assert et.timestamp() == pd.Timestamp(stop_time).timestamp()
@pytest.mark.parametrize('start_time', [None, "2015-09-24 14:17:48.000"])
@pytest.mark.parametrize('stop_time', [None, "2015-09-24 14:17:00.000"])
def test_split_into_windows(self, test_sensor_data, start_time, stop_time):
dw_data, da_data = test_sensor_data
window_start_markers = ext.pandas.split_into_windows(
dw_data, da_data, step_size=12.8, st=start_time, et=stop_time)
if start_time is None and stop_time is None:
assert len(window_start_markers) == 5
assert np.all(
np.diff(window_start_markers)[:-1].astype('timedelta64[ms]').astype(int) == 12800)
assert window_start_markers[0].timestamp() == pd.Timestamp(
'2015-09-24 14:17:48.013').timestamp()
elif start_time is not None and stop_time is None:
assert len(window_start_markers) == 5
assert np.all(
np.diff(window_start_markers)[:-1].astype('timedelta64[ms]').astype(int) == 12800)
assert window_start_markers[0].timestamp() == pd.Timestamp(
start_time).timestamp()
elif stop_time is not None:
assert len(window_start_markers) == 0
def test_fixed_window_slider(self, test_sensor_data):
def sample_counter(*dfs, st, et, placements):
result = {
'START_TIME': [st],
'STOP_TIME': [et]
}
for df, p in zip(dfs, placements):
result[p] = [df.shape[0]]
return pd.DataFrame.from_dict(result)
dw_df, da_df = test_sensor_data
count_df = ext.pandas.fixed_window_slider(
*[dw_df, da_df], slider_fn=sample_counter, window_size=12.8, step_size=None, placements=['DW', 'DA'])
assert count_df.shape[1] == 4
assert count_df.shape[0] == 5
assert (count_df.iloc[:, 2] == 1024).all()
class TestNumpy:
def test_mutate_nan(self):
# test signal without nan
X = np.random.rand(10, 3)
X_new = ext.numpy.mutate_nan(X)
np.testing.assert_array_almost_equal(X, X_new)
X = np.random.rand(10, 1)
X_new = ext.numpy.mutate_nan(X)
np.testing.assert_array_almost_equal(X, X_new)
# test signal with single sample without nan
X = np.array([[0.]])
X_new = ext.numpy.mutate_nan(X)
np.testing.assert_array_almost_equal(X, X_new)
X = np.array([[0., 0, 0, ]])
X_new = ext.numpy.mutate_nan(X)
np.testing.assert_array_almost_equal(X, X_new)
# test signal with nan
X = np.atleast_2d(np.sin(2*np.pi * 1 * np.arange(0, 1, 1.0 / 100))).T
X_nan = np.copy(X)
X_nan[5:10, 0] = np.nan
X_new = ext.numpy.mutate_nan(X_nan)
np.testing.assert_array_almost_equal(X, X_new, decimal=4)
X = np.tile(np.sin(2*np.pi * 1 *
np.arange(0, 1, 1.0 / 100.)), (3, 1)).T
X_nan = np.copy(X)
X_nan[5:10, 0:3] = np.nan
X_new = ext.numpy.mutate_nan(X_nan)
np.testing.assert_array_almost_equal(X, X_new, decimal=4)
def test_butterworth(self, butterworth_test_signals):
for args, expected in butterworth_test_signals:
result = ext.numpy.butterworth(**args)
diff = expected[10:90, ] - result[10:90, ]
assert expected.shape == result.shape
assert np.all(diff < 0.1)
def test_resample(self, resample_test_signals):
for test_signal_pair_1, test_signal_pair_2 in resample_test_signals:
sr1 = test_signal_pair_1[0]
signal1 = test_signal_pair_1[1]
sr2 = test_signal_pair_2[0]
signal2 = test_signal_pair_2[1]
result12 = ext.numpy.resample(signal1, sr1, sr2)
expected12 = signal2
st = int(sr2/10)
et = int(sr2 - sr2/10)
diff12 = expected12[st:et, ] - \
result12[st:et, ]
print('{}, {}'.format(sr1, sr2))
assert expected12.shape == result12.shape
assert np.all(diff12 < 0.1)
result21 = ext.numpy.resample(signal2, sr2, sr1)
expected21 = signal1
st = int(sr1/10)
et = int(sr1 - sr1/10)
diff21 = expected21[st:et, ] - \
result21[st:et, ]
print('{}, {}'.format(sr2, sr1))
assert expected21.shape == result21.shape
assert np.all(diff21 < 0.1)
def test_apply_over_subwins(self):
func = np.mean
# test on single row array with subwins and subwin_samples not set
X = ext.numpy.atleast_float_2d(np.array([1., 1., 1.]))
result = ext.numpy.apply_over_subwins(
X, func, subwin_samples=None, subwins=None, axis=0)
assert np.array_equal(result, X)
# test on single row array with subwin_samples not set
X = ext.numpy.atleast_float_2d(np.array([1., 1., 1.]))
result = ext.numpy.apply_over_subwins(
X, func, subwin_samples=None, subwins=1, axis=0)
assert np.array_equal(result, X)
# test on single row array with subwins not set
X = ext.numpy.atleast_float_2d(np.array([1., 1., 1.]))
result = ext.numpy.apply_over_subwins(
X, func, subwin_samples=1, subwins=None, axis=0)
assert np.array_equal(result, X)
# test on single row array with subwins to be zero
X = ext.numpy.atleast_float_2d(np.array([1., 1., 1.]))
result = ext.numpy.apply_over_subwins(
X, func, subwin_samples=2, subwins=None, axis=0)
assert np.array_equal(result, X)
result = ext.numpy.apply_over_subwins(
X, func, subwin_samples=None, subwins=0, axis=0)
assert np.array_equal(result, X)
# test on single row array with subwin_samples to be zero
X = ext.numpy.atleast_float_2d(np.array([1., 1., 1.]))
result = ext.numpy.apply_over_subwins(
X, func, subwin_samples=0, subwins=None, axis=0)
assert np.array_equal(result, X)
result = ext.numpy.apply_over_subwins(
X, func, subwin_samples=None, subwins=2, axis=0)
assert np.array_equal(result, X)
# test on 2d array
X = ext.numpy.atleast_float_2d(
|
np.ones((10, 3))
|
numpy.ones
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 31 10:20:27 2019
Metropolis Hastings step for Gorkha case
@author: duttar
"""
# %%
# load libraries
import numpy as np
import scipy.io as sio
import random
import sys
import os
from scipy.optimize import lsq_linear
#import matplotlib.pyplot as plt
#from mpl_toolkits.mplot3d import Axes3D
#simport matplotlib.tri as mtri
from collections import namedtuple
sys.path.append('additional_scripts/')
sys.path.append('additional_scripts/geompars/')
sys.path.append('additional_scripts/greens/')
sys.path.append('additional_scripts/SMC-python/')
from Gorkhamakemesh import *
from greenfunction import *
from posteriorGorkha import *
from SMC import *
import time
#%% Define functions
# %%
mat_c1 = sio.loadmat('additional_scripts/GPS_subsampledGorkha.mat')
covall = mat_c1['covall']
subdisp = mat_c1['subdisp']
subloc1 = mat_c1['subloc']
sublos = mat_c1['sublos']
numdis = subloc1.shape[0]
subloc = np.hstack((subloc1,np.zeros((numdis,1))))
for i in range(numdis):
covall[i,i] = 1.19*covall[i,i]
invcov = np.linalg.inv(covall)
W = np.linalg.cholesky(invcov).T
surf_pts = np.array([[215.8972,3.0950e+03],[442.8739,3.0097e+03]])
bestgeo = np.array([1.0927 ,0.0241, 5.6933,-20.0000, 2.1345, \
0.3529, 4.4643, 0.0336, -12, -2.8494, 0.0043])
disct_x = 20; disct_z = 12
NT1 = namedtuple('NT1', \
'trired p q r xfault yfault zfault disct_x disct_z surfpts model')
mesh = NT1(None, None, None, None, None, None, None, \
disct_x, disct_z, surf_pts, bestgeo)
# get the geometry
start = time.time()
finalmesh = Gorkhamesh(mesh, NT1)
end = time.time()
print(end - start)
################# make a plot #################################
#fig = plt.figure(figsize=plt.figaspect(0.5))
#ax = fig.add_subplot(1, 1, 1, projection='3d')
#ax.plot_trisurf(finalmesh.p, finalmesh.q, finalmesh.r, \
# triangles=finalmesh.trired, cmap=plt.cm.Spectral)
#plt.axis('equal')
#ax.set(xlim=(200, 500), ylim=(2900, 3400), zlim=(-25, -5))
#plt.show()
##############################################################
# run the greens function
start = time.time()
#grn1, obsdata = grn_func(subloc, subdisp, sublos, finalmesh.trired, \
# finalmesh.p, finalmesh.q, finalmesh.r)
end = time.time()
print(end - start)
##############################################################
lowslip1 = np.zeros((1,finalmesh.trired.shape[0]))
lowslip2 = -10*np.ones((1,finalmesh.trired.shape[0]))
maxslip1 = 25*np.ones((1,finalmesh.trired.shape[0]))
maxslip2 = 10*np.ones((1,finalmesh.trired.shape[0]))
LBslip = np.append(lowslip1, lowslip2)
UBslip = np.append(maxslip1, maxslip2)
LB = np.append(
|
np.array([-5, -.5, 3, -25, -5, -.5, -5, -.5, -13, -8, -8])
|
numpy.array
|
import unittest
import matplotlib
import matplotlib.pyplot
matplotlib.use("Agg")
matplotlib.pyplot.switch_backend("Agg")
class Test(unittest.TestCase):
def test_cantilever_beam(self):
import numpy as np
import matplotlib.pyplot as plt
from smt.problems import CantileverBeam
ndim = 3
problem = CantileverBeam(ndim=ndim)
num = 100
x = np.ones((num, ndim))
x[:, 0] = np.linspace(0.01, 0.05, num)
x[:, 1] = 0.5
x[:, 2] = 0.5
y = problem(x)
yd = np.empty((num, ndim))
for i in range(ndim):
yd[:, i] = problem(x, kx=i).flatten()
print(y.shape)
print(yd.shape)
plt.plot(x[:, 0], y[:, 0])
plt.xlabel("x")
plt.ylabel("y")
plt.show()
def test_robot_arm(self):
import numpy as np
import matplotlib.pyplot as plt
from smt.problems import RobotArm
ndim = 2
problem = RobotArm(ndim=ndim)
num = 100
x = np.ones((num, ndim))
x[:, 0] = np.linspace(0.0, 1.0, num)
x[:, 1] = np.pi
y = problem(x)
yd = np.empty((num, ndim))
for i in range(ndim):
yd[:, i] = problem(x, kx=i).flatten()
print(y.shape)
print(yd.shape)
plt.plot(x[:, 0], y[:, 0])
plt.xlabel("x")
plt.ylabel("y")
plt.show()
def test_rosenbrock(self):
import numpy as np
import matplotlib.pyplot as plt
from smt.problems import Rosenbrock
ndim = 2
problem = Rosenbrock(ndim=ndim)
num = 100
x = np.ones((num, ndim))
x[:, 0] = np.linspace(-2, 2.0, num)
x[:, 1] = 0.0
y = problem(x)
yd = np.empty((num, ndim))
for i in range(ndim):
yd[:, i] = problem(x, kx=i).flatten()
print(y.shape)
print(yd.shape)
plt.plot(x[:, 0], y[:, 0])
plt.xlabel("x")
plt.ylabel("y")
plt.show()
def test_sphere(self):
import numpy as np
import matplotlib.pyplot as plt
from smt.problems import Sphere
ndim = 2
problem = Sphere(ndim=ndim)
num = 100
x = np.ones((num, ndim))
x[:, 0] = np.linspace(-10, 10.0, num)
x[:, 1] = 0.0
y = problem(x)
yd = np.empty((num, ndim))
for i in range(ndim):
yd[:, i] = problem(x, kx=i).flatten()
print(y.shape)
print(yd.shape)
plt.plot(x[:, 0], y[:, 0])
plt.xlabel("x")
plt.ylabel("y")
plt.show()
def test_branin(self):
import numpy as np
import matplotlib.pyplot as plt
from smt.problems import Branin
ndim = 2
problem = Branin(ndim=ndim)
num = 100
x = np.ones((num, ndim))
x[:, 0] = np.linspace(-5.0, 10.0, num)
x[:, 1] = np.linspace(0.0, 15.0, num)
y = problem(x)
yd = np.empty((num, ndim))
for i in range(ndim):
yd[:, i] = problem(x, kx=i).flatten()
print(y.shape)
print(yd.shape)
plt.plot(x[:, 0], y[:, 0])
plt.xlabel("x")
plt.ylabel("y")
plt.show()
def test_lp_norm(self):
import numpy as np
import matplotlib.pyplot as plt
from smt.problems import LpNorm
ndim = 2
problem = LpNorm(ndim=ndim, order=2)
num = 100
x = np.ones((num, ndim))
x[:, 0] = np.linspace(-1.0, 1.0, num)
x[:, 1] = np.linspace(-1.0, 1.0, num)
y = problem(x)
yd = np.empty((num, ndim))
for i in range(ndim):
yd[:, i] = problem(x, kx=i).flatten()
print(y.shape)
print(yd.shape)
plt.plot(x[:, 0], y[:, 0])
plt.xlabel("x")
plt.ylabel("y")
plt.show()
def test_tensor_product(self):
import numpy as np
import matplotlib.pyplot as plt
from smt.problems import TensorProduct
ndim = 2
problem = TensorProduct(ndim=ndim, func="cos")
num = 100
x = np.ones((num, ndim))
x[:, 0] = np.linspace(-1, 1.0, num)
x[:, 1] = 0.0
y = problem(x)
yd = np.empty((num, ndim))
for i in range(ndim):
yd[:, i] = problem(x, kx=i).flatten()
print(y.shape)
print(yd.shape)
plt.plot(x[:, 0], y[:, 0])
plt.xlabel("x")
plt.ylabel("y")
plt.show()
def test_torsion_vibration(self):
import numpy as np
import matplotlib.pyplot as plt
from smt.problems import TorsionVibration
ndim = 15
problem = TorsionVibration(ndim=ndim)
num = 100
x = np.ones((num, ndim))
for i in range(ndim):
x[:, i] = 0.5 * (problem.xlimits[i, 0] + problem.xlimits[i, 1])
x[:, 0] = np.linspace(1.8, 2.2, num)
y = problem(x)
yd = np.empty((num, ndim))
for i in range(ndim):
yd[:, i] = problem(x, kx=i).flatten()
print(y.shape)
print(yd.shape)
plt.plot(x[:, 0], y[:, 0])
plt.xlabel("x")
plt.ylabel("y")
plt.show()
def test_water_flow(self):
import numpy as np
import matplotlib.pyplot as plt
from smt.problems import WaterFlow
ndim = 8
problem = WaterFlow(ndim=ndim)
num = 100
x = np.ones((num, ndim))
for i in range(ndim):
x[:, i] = 0.5 * (problem.xlimits[i, 0] + problem.xlimits[i, 1])
x[:, 0] = np.linspace(0.05, 0.15, num)
y = problem(x)
yd = np.empty((num, ndim))
for i in range(ndim):
yd[:, i] = problem(x, kx=i).flatten()
print(y.shape)
print(yd.shape)
plt.plot(x[:, 0], y[:, 0])
plt.xlabel("x")
plt.ylabel("y")
plt.show()
def test_welded_beam(self):
import numpy as np
import matplotlib.pyplot as plt
from smt.problems import WeldedBeam
ndim = 3
problem = WeldedBeam(ndim=ndim)
num = 100
x = np.ones((num, ndim))
for i in range(ndim):
x[:, i] = 0.5 * (problem.xlimits[i, 0] + problem.xlimits[i, 1])
x[:, 0] = np.linspace(5.0, 10.0, num)
y = problem(x)
yd = np.empty((num, ndim))
for i in range(ndim):
yd[:, i] = problem(x, kx=i).flatten()
print(y.shape)
print(yd.shape)
plt.plot(x[:, 0], y[:, 0])
plt.xlabel("x")
plt.ylabel("y")
plt.show()
def test_wing_weight(self):
import numpy as np
import matplotlib.pyplot as plt
from smt.problems import WingWeight
ndim = 10
problem = WingWeight(ndim=ndim)
num = 100
x = np.ones((num, ndim))
for i in range(ndim):
x[:, i] = 0.5 * (problem.xlimits[i, 0] + problem.xlimits[i, 1])
x[:, 0] = np.linspace(150.0, 200.0, num)
y = problem(x)
yd =
|
np.empty((num, ndim))
|
numpy.empty
|
"""An implementation of a distributed EP algorithm described in an article
"Expectation propagation as a way of life" (arXiv:1412.4869).
This implementation works with parallel EP but the calculations are done
serially with shared memory between workers.
The most recent version of the code can be found on GitHub:
https://github.com/gelman/ep-stan
"""
# Licensed under the 3-clause BSD license.
# http://opensource.org/licenses/BSD-3-Clause
#
# Copyright (C) 2014 <NAME>
# All rights reserved.
__all__ = ['Worker', 'Master']
import sys
import time
import multiprocessing
import numpy as np
from scipy import linalg
# LAPACK qr routine
dgeqrf_routine = linalg.get_lapack_funcs('geqrf')
from .util import (
invert_normal_params,
olse,
get_last_fit_sample,
load_stan,
copy_fit_samples,
stan_sample_time
)
from pystan.constants import MAX_UINT as pystan_max_uint
def _sample_stan(queue, path, data, stan_params, other_params=None):
"""Load and fit Stan model in a subprocess.
Implemented for multiprocesing.
Parameters
----------
queue : multiprocessing.Queue
Queue into which the results are put (see Returns).
path : str
Path to the stan model.
data : dict
Data for the sampling.
stan_params : dict
Keyword arguments passed to the Stan.
other_params : sequence of str, optional
List of additional parameter names. If provided, the associated samples
are also returned.
Returns
-------
samps : ndarray
samples of phi
lastsamp : dict
the last sample of the chains for next iteration initialisation
duration : float
sampling time
msteps : float
mean stepsize
mrhat : float
max Rhat
other_samp : dict
additional requested samples (returned only if such are requested)
"""
# Sample from the model
sm = load_stan(path)
fit, duration = stan_sample_time(sm, data=data, **stan_params)
# Extract samples
dphi = data['mu_phi'].shape[0]
samp = copy_fit_samples(fit, 'phi')
# Get the last sample of all
lastsamp = get_last_fit_sample(fit)
# Mean stepsize
msteps = np.mean([
np.mean(p['stepsize__'])
for p in fit.get_sampler_params()
])
# Max Rhat (from all but last row in the last column)
mrhat = np.max(fit.summary()['summary'][:-1,-1])
# Returned values
ret = [samp, lastsamp, duration, msteps, mrhat]
# Extract other params
if other_params:
other_samp = {
par : fit.extract(pars=par)[par]
for par in other_params
}
ret.append(other_samp)
# Put returns into the queue
queue.put(ret)
class Worker(object):
"""Worker responsible of calculations for each site.
Parameters
----------
index : integer
The index of this site
stan_model : StanModel or str
The StanModel instance responsible for the MCMC sampling.
dphi : int
The length of the parameter vector phi.
X, y: ndarray
The data included in this site.
A : dict, optional
Additional data included in this site.
Other parameters
----------------
See the class DistributedEP
"""
DEFAULT_OPTIONS = {
'init_prev' : True,
'prec_estim' : 'sample',
'prec_estim_skip' : 0,
'verbose' : False
}
DEFAULT_STAN_PARAMS = {
'chains' : 4,
'iter' : 1000,
'warmup' : None,
'thin' : 1,
'init' : 'random'
}
# Available values for option `prec_estim`
PREC_ESTIM_OPTIONS = ('sample', 'olse')
RESERVED_STAN_PARAMETER_NAMES = ['X', 'y', 'N', 'D', 'mu_phi', 'Omega_phi']
def __init__(
self, index, stan_model, dphi, X, y, A=None, **options):
# Parse options
# Set missing options to defaults
for (kw, default) in self.DEFAULT_OPTIONS.items():
if kw not in options:
options[kw] = default
for (kw, default) in self.DEFAULT_STAN_PARAMS.items():
if kw not in options:
options[kw] = default
# Extract stan parameters
self.stan_params = {}
for (kw, val) in options.items():
if kw in self.DEFAULT_STAN_PARAMS:
self.stan_params[kw] = val
elif kw not in self.DEFAULT_OPTIONS:
# Unrecognised option
raise TypeError("Unexpected option '{}'".format(kw))
if A is None:
A = {}
# Allocate space for calculations
# After calling the method cavity, self.Mat holds the precision matrix
# and self.vec holds the mean of the cavity distribution. After calling
# the method tilted, self.Mat holds the unnormalised covariance matrix
# and self.vec holds the mean of the tilted distributions.
self.Mat = np.empty((dphi,dphi), order='F')
self.vec = np.empty(dphi)
# The instance variable self.phase indicates if self.Mat and self.vec
# contains the cavity or tilted distribution parameters:
# 0: neither
# 1: cavity
# 2: tilted
self.phase = 0
# In the case of tilted distribution, the instance variable self.nsamp
# indicates how many samples has contributed into the unnormalised
# covariance matrix in self.Mat
self.nsamp = None
# Current iteration global approximations
self.Q = None
self.r = None
# Temporary arrays for calculations
self.temp_M = np.empty((dphi,dphi), order='F')
self.temp_v = np.empty(dphi)
# Data for stan model in method tilted
self.data = dict(
N=X.shape[0],
X=X,
y=y,
mu_phi=self.vec,
Omega_phi=self.Mat.T, # Mat transposed in order to get C-order
**A
)
# Add param `D` only if `X` is two dimensional
if len(X.shape) == 2:
self.data['D'] = X.shape[1]
# Store other instance variables
self.index = index
self.stan_model = stan_model
self.dphi = dphi
self.iteration = 0
# The last elapsed time
self.last_time = None
self.last_msteps = None
self.last_mrhat = None
# The samples saved from the last fit
self.saved_samples = None
# Initialisation
self.init_prev = options['init_prev']
if self.init_prev:
# Store the original init method so that it can be reset, when
# an iteration fails
self.init_orig = self.stan_params['init']
if not isinstance(self.init_orig, str):
# If init_prev is used, init option has to be a string
raise ValueError("Arg. `init` has to be a string if "
"`init_prev` is True")
# Tilted precision estimate method
self.prec_estim = options['prec_estim']
if not self.prec_estim in self.PREC_ESTIM_OPTIONS:
raise ValueError("Invalid value for option `prec_estim`")
if self.prec_estim != 'sample':
self.prec_estim_skip = options['prec_estim_skip']
else:
self.prec_estim_skip = 0
# Verbose option
self.verbose = options['verbose']
def cavity(self, Q, r, Qi, ri):
"""Form the cavity distribution and convert them to moment parameters.
Parameters
----------
Q, r : ndarray
Natural parameters of the global approximation
Qi, ri : ndarray
Natural site parameters
Returns
-------
pos_def
True if the cavity distribution covariance matrix is positive
definite. False otherwise.
"""
self.Q = Q
self.r = r
np.subtract(self.Q, Qi, out=self.Mat)
np.subtract(self.r, ri, out=self.vec)
# Check if positive definite and solve the mean
try:
np.copyto(self.temp_M, self.Mat)
cho = linalg.cho_factor(self.temp_M, overwrite_a=True)
linalg.cho_solve(cho, self.vec, overwrite_b=True)
except linalg.LinAlgError:
# Not positive definite
self.phase = 0
return False
else:
self.phase = 1
return True
def tilted(self, dQi, dri, save_samples=None, seed=None):
"""Estimate the tilted distribution parameters.
This method estimates the tilted distribution parameters and calculates
the resulting site parameter updates into the given arrays. The cavity
distribution has to be calculated before this method is called, i.e. the
method cavity has to be run before this.
After calling this method the instance variables self.Mat and self.vec
hold the tilted distribution moment parameters (note however that the
covariance matrix is unnormalised and the number of samples contributing
to this matrix is stored in the instance variable self.nsamp).
Parameters
----------
dQi, dri : ndarray
Output arrays where the site parameter updates are placed.
save_samples : sequence of str, optional
Additional parameter names, whose samples are to be saved in
instance variable `saved_samples` (dict with {pname:samples}).
seed : np.random.RandomState or int, optional
Seed for the Stan sampling
Returns
-------
pos_def
True if the estimated tilted distribution covariance matrix is
positive definite. False otherwise.
"""
if self.phase != 1:
raise RuntimeError('Cavity has to be calculated before tilted.')
# set next seed for the sampling
if isinstance(seed, np.random.RandomState):
rng = seed
else:
rng = np.random.RandomState(seed)
self.stan_params['seed'] = rng.randint(0, pystan_max_uint)
# Sample from the model
if isinstance(self.stan_model, str):
# run in a subprocess
q = multiprocessing.Queue()
args = [q, self.stan_model, self.data, self.stan_params]
if save_samples:
args.append(save_samples)
p = multiprocessing.Process(target=_sample_stan, args=args)
p.start()
if save_samples:
samp, lastsamp, dur, msteps, mrhat, saved_samp = q.get()
self.saved_samp = saved_samp
else:
samp, lastsamp, dur, msteps, mrhat = q.get()
samp = np.copy(samp, order='F') # Needs to be copied for `owndata`
p.join()
# store info
self.last_time = dur
self.last_msteps = msteps
self.last_mrhat = mrhat
else:
# run in the same process
fit, max_sampling_time = stan_sample_time(
self.stan_model, data=self.data, **self.stan_params)
time_start = timer()
# store info
# runtime
self.last_time = max_sampling_time
# mean stepsize
self.last_msteps = np.mean([
np.mean(p['stepsize__'])
for p in fit.get_sampler_params()
])
# max Rhat (from all but last row in the last column)
self.last_mrhat = np.max(fit.summary()['summary'][:-1,-1])
# Extract samples
samp = copy_fit_samples(fit, 'phi')
lastsamp = get_last_fit_sample(fit)
if save_samples:
# Extract other params
self.saved_samp = {
par : fit.extract(pars=par)[par]
for par in save_samples
}
# Dereference the fit
fit = None
if self.verbose:
print('\n sampling runtime: {:.4}'.format(self.last_time))
print(' mean stepsize: {:.4}'.format(self.last_msteps))
print(' max Rhat: {:.4}'.format(self.last_mrhat))
if self.init_prev:
# Store the last sample of each chain
self.stan_params['init'] = lastsamp
self.nsamp = samp.shape[0]
# Estimate precision matrix
try:
# Basic sample estimate
if self.prec_estim == 'sample' or self.prec_estim_skip > 0:
# Mean
mt = np.mean(samp, axis=0, out=self.vec)
# Center samples
samp -= mt
# Use QR-decomposition for obtaining Cholesky of the scatter
# matrix (only R needed, Q-less algorithm would be nice)
_, _, _, info = dgeqrf_routine(samp, overwrite_a=True)
if info:
raise linalg.LinAlgError(
"dgeqrf LAPACK routine failed with error code {}"
.format(info)
)
# Copy the relevant part of the array into contiguous memory
np.copyto(self.Mat, samp[:self.dphi,:])
invert_normal_params(
self.Mat, mt, out_A=dQi, out_b=dri,
cho_form=True
)
# Unbiased (for normal distr.) natural parameter estimates
unbias_k = (self.nsamp - self.dphi - 2)
dQi *= unbias_k
dri *= unbias_k
if self.prec_estim_skip > 0:
self.prec_estim_skip -= 1
# Optimal linear shrinkage estimate
elif self.prec_estim == 'olse':
# Mean
mt = np.mean(samp, axis=0, out=self.vec)
# Center samples
samp -= mt
# Sample covariance
np.dot(samp.T, samp, out=self.Mat.T)
# Normalise self.Mat into dQi
np.divide(self.Mat, self.nsamp, out=dQi)
# Estimate
olse(dQi, self.nsamp, P=self.Q, out='in-place')
np.dot(dQi, mt, out=dri)
else:
raise ValueError("Invalid value for option `prec_estim`")
# Calculate the difference into the output arrays
np.subtract(dQi, self.Q, out=dQi)
np.subtract(dri, self.r, out=dri)
except linalg.LinAlgError:
# Precision estimate failed
pos_def = False
self.phase = 0
dQi.fill(0)
dri.fill(0)
if self.init_prev:
# Reset initialisation method
self.init = self.init_orig
else:
# Set return and phase flag
pos_def = True
self.phase = 2
self.iteration += 1
return pos_def
class Master(object):
"""Manages the distributed EP algorithm.
Parameters
----------
site_model : StanModel or string
Model for sampling from the tilted distribution of a site. Can be
provided either directly as a PyStan model instance or as filename
string pointing to a pickled model or stan source code. The model has a
restricted structure (see Notes).
X : ndarray
Explanatory variable data in an ndarray of shape (N,D), where N is the
number of observations and D is the number of variables. `X` should be
C-contiguous (copy made if not). N.B. One dimensional array of shape
(N,) is also acceptable, in which case D is not provided to the stan
model.
y : ndarray
Response variable data in an ndarray of shape (N,), where N is the
number of observations (same N as for X).
A : dict, optional
Additional data for the site model. The keys in the dict are the names
of the variables and the values are the coresponding objects e.g.
integers or ndarrays. These arrays are distributed as a whole for each
site. Example: {'var':[3,2]} distributes var=[3,2] to each site.
A_k : dict, optional
Additional data for the site model. The keys in the dict are the names
of the variables and the values are array-likes of length K, where K is
the number of sites. The first element of the array-likes are
distributed to the first site etc. Example: {'var':[3,2]} distributes
var=3 to the first site and var=2 to the second site.
A_n : dict, optional
Additional sliced data arrays provided for the site model. The keys in
the dict are the names of the variables and the values are the
coresponding ndarrays of size (N, ...). These arrays are sliced for each
site (similary as `X` and `y`).
site_ind, site_ind_ord, site_sizes : ndarray, optional
Arrays indicating which sample belong to which site. Providing one of
these keyword arguments is enough. If none of these are provided, a
clustering is performed. Description of individual arguments:
site_ind : Array of length N containing the site number
(non-negative integer) of each point.
site_ind_ord : Similary as `site_ind` but the sites are in order,
i.e. the samples are sorted.
site_sizes : Array of size K, where K is the number of sites,
indicating the number of samples in each site.
When this argument is provided, the samples are
assumed to be in order (similary as for argument
`site_ind_ord`).
Providing `site_ind_ord` or `site_sizes` is preferable over
`site_ind` because then the data arrays `X` and `y` does not have to be
copied.
dphi : int, optional
Number of parameters for the site model, i.e. the length of phi
(see Notes). Has to be given if prior is not provided.
prior : dict, optional
The parameters of the multivariate normal prior distribution for phi
provided in a dict containing either:
1) moment parameters with keys 'm' and 'S'
2) natural parameters with keys 'r' and 'Q'.
The matrix 'Q' should be F contiguous (copy made if not). Argument
`dphi` can be ommited if a prior is given. If prior is not given, the
standard normal distribution is used.
Other parameters
----------------
init_site : scalar or ndarray, optional
The initial site precision matrix. If not provided, improper uniform
N(0,inf I), i.e. Q is allzeroes, is used. If scalar, N(0,A^2/K I),
where A = `init_site`, is used.
overwrite_model : bool, optional
If a string for `site_model` is provided, the model is compiled even
if a precompiled model is found (see util.load_stan).
chains : int, optional
The number of chains in the site_model mcmc sampling. Default is 4.
iter : int, optional
The number of samples in the site_model mcmc sampling. Default
is 1000.
warmup : int, optional
The number of samples to be discarded from the begining of each chain
in the site_model mcmc sampling. Default is nsamp//2.
thin : int, optional
Thinning parameter for the site_model mcmc sampling. Default is 2.
init_prev : bool, optional
Indicates if the last sample of each chain in the site mcmc sampling is
used as the starting point for the next iteration sampling. Default is
True.
init : {'random', '0', 0, function returning dict, list of dict}, optional
Specifies how the initialisation is performed for the sampler (see
StanModel.sampling). If `init_prev` is True, this parameter affects only
the sampling on the first iteration, and strings 'random' and '0' are
the only acceptable values for this argument.
prec_estim : {'sample', 'olse', 'glassocv'}
Method for estimating the precision matrix from the tilted distribution
samples. The available methods are:
'sample' : basic sample estimate
'olse' : optimal linear shrinkage estimate (see util.olse)
'glassocv' : graphical lasso estimate with cross validation
prec_estim_skip : int
Non-negative integer indicating on how many iterations from the begining
the tilted distribution precision matrix is estimated using the default
sample estimate instead of anything else.
df0 : float or function, optional
The initial damping factor for each iteration. Must be a number in the
range (0,1]. If a number is given, a constant initial damping factor for
each iteration is used. If a function is given, it must return the
desired initial damping factor when called with the iteration number.
If not provided, damping factor of 1/K is used.
df_decay : float, optional
The decay multiplier for the damping factor used if the resulting
posterior covariance or cavity distributions are not positive definite.
Default value is 0.8.
df_treshold : float, optional
The treshold value for the damping factor. If the damping factor decays
below this value, the algorithm is stopped. Default is 1e-6.
Notes
-----
TODO: Describe the structure of the site model.
"""
# Return codes for method run
INFO_OK = 0
INFO_INVALID_PRIOR = 1
INFO_DF_TRESHOLD_REACHED_GLOBAL = 2
INFO_DF_TRESHOLD_REACHED_CAVITY = 3
INFO_ALL_SITES_FAIL = 4
# Constrain pos.def. min eig.val.
MIN_EIG_TRESHOLD = 1e-5
MIN_EIG = 0.5
# List of constructor default keyword arguments
DEFAULT_KWARGS = dict(
A = {},
A_n = {},
A_k = {},
site_ind = None,
site_ind_ord = None,
site_sizes = None,
dphi = None,
prior = None,
init_site = None,
df0 = None,
df_decay = 0.8,
df_treshold = 1e-6,
overwrite_model = False
)
def __init__(self, site_model, X, y, **kwargs):
# Parse keyword arguments
self.worker_options = {}
for (kw, val) in kwargs.items():
if ( kw in Worker.DEFAULT_OPTIONS
or kw in Worker.DEFAULT_STAN_PARAMS
):
self.worker_options[kw] = val
elif kw not in self.DEFAULT_KWARGS:
# Unrecognised keyword argument
raise TypeError("Unexpected keyword argument '{}'".format(kw))
# Set missing kwargs to defaults
for (kw, default) in self.DEFAULT_KWARGS.items():
if kw not in kwargs:
kwargs[kw] = default
# Set missing worker options to defaults
for (kw, default) in Worker.DEFAULT_OPTIONS.items():
if kw not in self.worker_options:
self.worker_options[kw] = default
for (kw, default) in Worker.DEFAULT_STAN_PARAMS.items():
if kw not in self.worker_options:
self.worker_options[kw] = default
# Stan model source (or instance)
self.site_model = site_model
# Validate X
self.N = X.shape[0]
if len(X.shape) == 2:
self.D = X.shape[1]
elif len(X.shape) == 1:
self.D = None
else:
raise ValueError("Argument `X` should be one or two dimensional")
self.X = X
# Validate y
if len(y.shape) != 1:
raise ValueError("Argument `y` should be one dimensional")
if y.shape[0] != self.N:
raise ValueError("The shapes of `y` and `X` does not match")
self.y = y
# Process site indices
# K : number of sites
# Nk : number of samples per site
# k_ind : site index of each sample
# k_lim : sample index limits
if not kwargs['site_sizes'] is None:
# Size of each site provided
self.Nk = kwargs['site_sizes']
self.K = len(self.Nk)
self.k_lim = np.concatenate(([0], np.cumsum(self.Nk)))
self.k_ind = np.empty(self.N, dtype=np.int64)
for k in range(self.K):
self.k_ind[self.k_lim[k]:self.k_lim[k+1]] = k
elif not kwargs['site_ind_ord'] is None:
# Sorted array of site indices provided
self.k_ind = kwargs['site_ind_ord']
self.Nk = np.bincount(self.k_ind)
self.K = len(self.Nk)
self.k_lim = np.concatenate(([0], np.cumsum(self.Nk)))
elif not kwargs['site_ind'] is None:
# Unsorted array of site indices provided
k_ind = kwargs['site_ind']
k_sort = k_ind.argsort(kind='mergesort') # Stable sort
self.k_ind = k_ind[k_sort]
self.Nk = np.bincount(self.k_ind)
self.K = len(self.Nk)
self.k_lim = np.concatenate(([0], np.cumsum(self.Nk)))
# Copy X and y to a new sorted array
self.X = self.X[k_sort]
self.y = self.y[k_sort]
else:
raise NotImplementedError("Auto clustering not yet implemented")
if self.k_lim[-1] != self.N:
raise ValueError("Site definition does not match with `X`")
if np.any(self.Nk == 0):
raise ValueError("Empty sites: {}. Index the sites from 1 to K-1"
.format(np.nonzero(self.Nk==0)[0]))
if self.K < 2:
raise ValueError("Distributed EP should be run with at least "
"two sites.")
# Ensure that X and y are C contiguous
self.X = np.ascontiguousarray(self.X)
self.y = np.ascontiguousarray(self.y)
# Process A
self.A = kwargs['A']
# Check for name clashes
for key in self.A.keys():
if key in Worker.RESERVED_STAN_PARAMETER_NAMES:
raise ValueError("Additional data name {} clashes.".format(key))
# Process A_n
self.A_n = kwargs['A_n'].copy()
for (key, val) in kwargs['A_n'].items():
if val.shape[0] != self.N:
raise ValueError("The shapes of `A_n[{}]` and `X` does not "
"match".format(repr(key)))
# Check for name clashes
if ( key in Worker.RESERVED_STAN_PARAMETER_NAMES
or key in self.A
):
raise ValueError("Additional data name {} clashes.".format(key))
# Ensure C-contiguous
if not val.flags['CARRAY']:
self.A_n[key] = np.ascontiguousarray(val)
# Process A_k
self.A_k = kwargs['A_k']
for (key, val) in self.A_k.items():
# Check for length
if len(val) != self.K:
raise ValueError("Array-like length mismatch in `A_k` "
"(should be: {}, found: {})"
.format(self.K, len(val)))
# Check for name clashes
if ( key in Worker.RESERVED_STAN_PARAMETER_NAMES
or key in self.A
or key in self.A_n
):
raise ValueError("Additional data name {} clashes.".format(key))
# Initialise prior
prior = kwargs['prior']
self.dphi = kwargs['dphi']
if prior is None:
# Use default prior
if self.dphi is None:
raise ValueError("If arg. `prior` is not provided, "
"arg. `dphi` has to be given")
self.Q0 = np.eye(self.dphi).T # Transposed for F contiguous
self.r0 = np.zeros(self.dphi)
else:
# Use provided prior
if not isinstance(prior, dict):
raise TypeError("Argument `prior` is of wrong type")
if 'Q' in prior and 'r' in prior:
# In a natural form already
self.Q0 =
|
np.asfortranarray(prior['Q'])
|
numpy.asfortranarray
|
# This file is part of the Astrometry.net suite.
# Licensed under a 3-clause BSD style license - see LICENSE
from math import pi
import numpy as np
def estimate_mode(img, lo=None, hi=None, plo=1, phi=70, bins1=30,
flo=0.5, fhi=0.8, bins2=30,
return_fit=False, raiseOnWarn=False):
# Estimate sky level by: compute the histogram within [lo,hi], fit
# a parabola to the log-counts, return the argmax of that parabola.
# Coarse bin to find the peak (mode)
if lo is None:
lo = np.percentile(img, plo)
if hi is None:
hi = np.percentile(img, phi)
binedges1 = np.linspace(lo, hi, bins1+1)
counts1,e = np.histogram(img.ravel(), bins=binedges1)
bincenters1 = binedges1[:-1] + (binedges1[1]-binedges1[0])/2.
maxbin = np.argmax(counts1)
maxcount = counts1[maxbin]
mode = bincenters1[maxbin]
# Search for bin containing < {flo,fhi} * maxcount
ilo = maxbin
while ilo > 0:
ilo -= 1
if counts1[ilo] < flo*maxcount:
break
ihi = maxbin
while ihi < bins1-1:
ihi += 1
if counts1[ihi] < fhi*maxcount:
break
lo = bincenters1[ilo]
hi = bincenters1[ihi]
binedges = np.linspace(lo, hi, bins2)
counts,e = np.histogram(img.ravel(), bins=binedges)
bincenters = binedges[:-1] + (binedges[1]-binedges[0])/2.
b = np.log10(np.maximum(1, counts))
xscale = 0.5 * (hi - lo)
x0 = (hi + lo) / 2.
x = (bincenters - x0) / xscale
A = np.zeros((len(x), 3))
A[:,0] = 1.
A[:,1] = x
A[:,2] = x**2
res = np.linalg.lstsq(A, b)
X = res[0]
mx = -X[1] / (2. * X[2])
mx = (mx * xscale) + x0
warn = None
if not (mx > lo and mx < hi):
if raiseOnWarn:
raise ValueError('sky estimate not bracketed by peak: lo %f, sky %f, hi %f' % (lo, mx, hi))
warn = 'WARNING: sky estimate not bracketed by peak: lo %f, sky %f, hi %f' % (lo, mx, hi)
if return_fit:
bfit = X[0] + X[1] * x + X[2] * x**2
return (x * xscale + x0, b, bfit, mx, warn, binedges1,counts1)
return mx
def parse_ranges(s):
'''
Parse PBS job array-style ranges: NNN,MMM-NNN,PPP
*s*: string
Returns: [ int, int, ... ]
'''
tiles = []
words = s.split()
for w in words:
for a in w.split(','):
if '-' in a:
aa = a.split('-')
if len(aa) != 2:
raise RuntimeError('With an arg containing a dash, expect two parts, in word "%s"' % a)
start = int(aa[0])
end = int(aa[1])
for i in range(start, end+1):
tiles.append(i)
else:
tiles.append(int(a))
return tiles
def patch_image(img, mask, dxdy = [(-1,0),(1,0),(0,-1),(0,1)],
required=None):
'''
Patch masked pixels by iteratively averaging non-masked neighboring pixels.
WARNING: this modifies BOTH the "img" and "mask" arrays!
mask: True for good pixels
required: if non-None: True for pixels you want to be patched.
dxdy: Pixels to average in, relative to pixels to be patched.
Returns True if patching was successful.
'''
assert(img.shape == mask.shape)
assert(len(img.shape) == 2)
h,w = img.shape
Nlast = -1
while True:
needpatching = np.logical_not(mask)
if required is not None:
needpatching *= required
I = np.flatnonzero(needpatching)
if len(I) == 0:
break
if len(I) == Nlast:
return False
#print 'Patching', len(I), 'pixels'
Nlast = len(I)
iy,ix = np.unravel_index(I, img.shape)
psum = np.zeros(len(I), img.dtype)
pn = np.zeros(len(I), int)
for dx,dy in dxdy:
ok = True
if dx < 0:
ok = ok * (ix >= (-dx))
if dx > 0:
ok = ok * (ix <= (w-1-dx))
if dy < 0:
ok = ok * (iy >= (-dy))
if dy > 0:
ok = ok * (iy <= (h-1-dy))
# darn, NaN * False = NaN, not zero.
finite = np.isfinite(img [iy[ok]+dy, ix[ok]+dx])
ok[ok] *= finite
psum[ok] += (img [iy[ok]+dy, ix[ok]+dx] *
mask[iy[ok]+dy, ix[ok]+dx])
pn[ok] += mask[iy[ok]+dy, ix[ok]+dx]
# print 'ix', ix
# print 'iy', iy
# print 'dx,dy', dx,dy
# print 'ok', ok
# print 'psum', psum
# print 'pn', pn
img.flat[I] = (psum / np.maximum(pn, 1)).astype(img.dtype)
mask.flat[I] = (pn > 0)
#print 'Patched', np.sum(pn > 0)
return True
def clip_wcs(wcs1, wcs2, makeConvex=True, pix1=None, pix2=None):
'''
Returns a pixel-space polygon in WCS1 after it is clipped by WCS2.
Returns an empty list if the two WCS headers do not intersect.
Note that due to weakness in the clip_polygon method, wcs2 must be convex.
If makeConvex=True, we find the convex hull and clip with that.
If pix1 is not None, pix1=(xx,yy), xx and yy lists of pixel coordinates defining
the boundary of the image, in CLOCKWISE order; default is the edges and midpoints.
'''
if pix1 is None:
w1,h1 = wcs1.get_width(), wcs1.get_height()
x1,x2,x3 = 0.5, w1/2., w1+0.5
y1,y2,y3 = 0.5, h1/2., h1+0.5
xx = [x1, x1, x1, x2, x3, x3, x3, x2]
yy = [y1, y2, y3, y3, y3, y2, y1, y1]
else:
xx,yy = pix1
#rr,dd = wcs1.pixelxy2radec(xx, yy)
if pix2 is None:
w2,h2 = wcs2.get_width(), wcs2.get_height()
x1,x2,x3 = 0.5, w2/2., w2+0.5
y1,y2,y3 = 0.5, h2/2., h2+0.5
XX = [x1, x1, x1, x2, x3, x3, x3, x2]
YY = [y1, y2, y3, y3, y3, y2, y1, y1]
else:
XX,YY = pix2
rr,dd = wcs2.pixelxy2radec(XX, YY)
ok,XX,YY = wcs1.radec2pixelxy(rr, dd)
# XX,YY is the clip polygon in wcs1 pixel coords.
# Not necessarily clockwise at this point!
#print 'XX,YY', XX, YY
if makeConvex:
from scipy.spatial import ConvexHull
points = np.vstack((XX,YY)).T
#print 'points', points.shape
hull = ConvexHull(points)
#print 'Convex hull:', hull
#print hull.vertices
# ConvexHull returns the vertices in COUNTER-clockwise order. Reverse.
v = np.array(list(reversed(hull.vertices))).astype(int)
XX = XX[v]
YY = YY[v]
#plt.plot(XX, YY, 'm-')
#plt.plot(XX[0], YY[0], 'mo')
#plt.savefig('clip2.png')
else:
# Ensure points are listed in CLOCKWISE order.
crosses = []
for i in range(len(XX)):
j = (i + 1) % len(XX)
k = (i + 2) % len(XX)
xi,yi = XX[i], YY[i]
xj,yj = XX[j], YY[j]
xk,yk = XX[k], YY[k]
dx1, dy1 = xj - xi, yj - yi
dx2, dy2 = xk - xj, yk - yj
cross = dx1 * dy2 - dy1 * dx2
#print 'cross', cross
crosses.append(cross)
crosses = np.array(crosses)
#print 'cross products', crosses
if np.all(crosses >= 0):
# Reverse
#print 'Reversing wcs2 points'
XX = np.array(list(reversed(XX)))
YY = np.array(list(reversed(YY)))
clip = clip_polygon(list(zip(xx, yy)), list(zip(XX, YY)))
clip = np.array(clip)
if False:
import pylab as plt
plt.clf()
plt.plot(xx, yy, 'b.-')
plt.plot(xx[0], yy[0], 'bo')
plt.plot(XX, YY, 'r.-')
plt.plot(XX[0], YY[0], 'ro')
if len(clip) > 0:
plt.plot(clip[:,0], clip[:,1], 'm.-', alpha=0.5, lw=2)
plt.savefig('clip1.png')
return clip
def polygon_area(poly):
'''
NOTE, unlike many of the other methods in this module, takes:
poly = (xx,yy)
where xx,yy MUST repeat the starting point at the end of the polygon.
'''
xx,yy = poly
x,y = np.mean(xx), np.mean(yy)
area = 0.
for dx0,dy0,dx1,dy1 in zip(xx-x, yy-y, xx[1:]-x, yy[1:]-y):
# area: 1/2 cross product
area += np.abs(dx0 * dy1 - dx1 * dy0)
return 0.5 * area
def clip_polygon(poly1, poly2):
'''
Returns a new polygon resulting from taking poly1 and clipping it
to lie inside poly2.
WARNING, the polygons must be listed in CLOCKWISE order.
WARNING, the clipping polygon, poly2, must be CONVEX.
'''
# from clipper import Clipper, Point, PolyType, ClipType, PolyFillType
# '''
# '''
# c = Clipper()
# p1 = [Point(x,y) for x,y in poly1]
# p2 = [Point(x,y) for x,y in poly2]
# c.AddPolygon(p1, PolyType.Subject)
# c.AddPolygon(p2, PolyType.Clip)
# solution = []
# pft = PolyFillType.EvenOdd
# result = c.Execute(ClipType.Intersection, solution, pft, pft)
# if len(solution) > 1:
# raise RuntimeError('Polygon clipping results in non-simple polygon')
# assert(result)
# #print 'Result:', result
# #print 'Solution:', solution
# return [(s.x, s.y) for s in solution[0]]
# Sutherland-Hodgman algorithm -- thanks, Wikipedia!
N2 = len(poly2)
# clip by each edge in turn.
for j in range(N2):
# target "left_right" value
clip1 = poly2[j]
clip2 = poly2[(j+1)%N2]
LRinside = _left_right(clip1, clip2, poly2[(j+2)%N2])
# are poly vertices inside or outside the clip polygon?
isinside = [_left_right(clip1, clip2, p) == LRinside
for p in poly1]
# the resulting clipped polygon
clipped = []
N1 = len(poly1)
for i in range(N1):
S = poly1[i]
E = poly1[(i+1)%N1]
Sin = isinside[i]
Ein = isinside[(i+1)%N1]
if Ein:
if not Sin:
clipped.append(line_intersection(clip1, clip2, S, E))
clipped.append(E)
else:
if Sin:
clipped.append(line_intersection(clip1, clip2, S, E))
poly1 = clipped
return poly1
def polygons_intersect(poly1, poly2):
'''
Determines whether the given 2-D polygons intersect.
poly1, poly2: np arrays with shape (N,2)
'''
# Check whether any points in poly1 are inside poly2,
# or vice versa.
for (px,py) in poly1:
if point_in_poly(px,py, poly2):
return (px,py)
for (px,py) in poly2:
if point_in_poly(px,py, poly1):
return (px,py)
# Check for intersections between line segments. O(n^2) brutish
N1 = len(poly1)
N2 = len(poly2)
for i in range(N1):
for j in range(N2):
xy = line_segments_intersect(poly1[i % N1, :], poly1[(i+1) % N1, :],
poly2[j % N2, :], poly2[(j+1) % N2, :])
if xy:
return xy
return False
def line_segments_intersect(xy1, xy2, xy3, xy4):
'''
Determines whether the two given line segments intersect;
(x1,y1) to (x2,y2)
and
(x3,y3) to (x4,y4)
'''
(x1,y1) = xy1
(x2,y2) = xy2
(x3,y3) = xy3
(x4,y4) = xy4
x,y = line_intersection((x1,y1),(x2,y2),(x3,y3),(x4,y4))
if x is None:
# Parallel lines
return False
if x1 == x2:
p1,p2 = y1,y2
p = y
else:
p1,p2 = x1,x2
p = x
if not ((p >= min(p1,p2)) and (p <= max(p1,p2))):
return False
if x3 == x4:
p1,p2 = y3,y4
p = y
else:
p1,p2 = x3,x4
p = x
if not ((p >= min(p1,p2)) and (p <= max(p1,p2))):
return False
return (x,y)
def line_intersection(xy1, xy2, xy3, xy4):
'''
Determines the point where the lines described by
(x1,y1) to (x2,y2)
and
(x3,y3) to (x4,y4)
intersect.
Note that this may be beyond the endpoints of the line segments.
Probably raises an exception if the lines are parallel, or does
something numerically crazy.
'''
(x1,y1) = xy1
(x2,y2) = xy2
(x3,y3) = xy3
(x4,y4) = xy4
# This code started with the equation from Wikipedia,
# then I added special-case handling.
# bottom = ((x1 - x2) * (y3 - y4) - (y1 - y2) * (x3 - x4))
# if bottom == 0:
# raise RuntimeError("divide by zero")
# t1 = (x1 * y2 - y1 * x2)
# t2 = (x3 * y4 - y3 * x4)
# px = (t1 * (x3 - x4) - t2 * (x1 - x2)) / bottom
# py = (t1 * (y3 - y4) - t2 * (y1 - y2)) / bottom
# From http://wiki.processing.org/w/Line-Line_intersection
bx = float(x2) - float(x1)
by = float(y2) - float(y1)
dx = float(x4) - float(x3)
dy = float(y4) - float(y3)
b_dot_d_perp = bx*dy - by*dx
if b_dot_d_perp == 0:
return None,None
cx = float(x3) - float(x1)
cy = float(y3) - float(y1)
t = (cx*dy - cy*dx) / b_dot_d_perp
return x1 + t*bx, y1 + t*by
def _left_right(xy1, xy2, xy3):
'''
is (x3,y3) to the 'left' or 'right' of the line from (x1,y1) to (x2,y2) ?
'''
(x1,y1) = xy1
(x2,y2) = xy2
(x3,y3) = xy3
dx2,dy2 = x2-x1, y2-y1
dx3,dy3 = x3-x1, y3-y1
return (dx2 * dy3 - dx3 * dy2) > 0
def point_in_poly(x, y, poly):
'''
Performs a point-in-polygon test for numpy arrays of *x* and *y*
values, and a polygon described as 2-d numpy array (with shape (N,2))
poly: N x 2 array
Returns a numpy array of bools.
'''
x = np.atleast_1d(x)
y =
|
np.atleast_1d(y)
|
numpy.atleast_1d
|
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
import os
dimensions = range(1,11)
for dim in dimensions:
file0_name = "gauss_data/gaussian_same_projection_on_each_axis_{0}D_10000_0.0_1.0_1.0_0_euclidean.txt".format(dim)
file1_name = "gauss_data/gaussian_same_projection_on_each_axis_{0}D_10000_0.0_0.95_0.95_0_euclidean.txt".format(dim)
name = "gaussian_same_projection_on_each_axis_{0}D_10000_0.0_0.95_0.95_0".format(dim)
data0 = np.loadtxt(file0_name)[:,None]
data1 = np.loadtxt(file1_name)[:,None]
xmin = np.min( [np.min(data0[:,0]),np.min(data1[:,0])])
xmax = np.max( [
|
np.max(data0[:,0])
|
numpy.max
|
# -*- coding: utf-8 -*-
""" Research topics related to conference designs
<NAME> <<EMAIL>>
"""
from collections import OrderedDict
from typing import Union
import json_tricks
import copy
import json
import collections
import itertools
import numpy as np
import os
import sys
import time
import pickle
import shutil
from typing import Tuple, List, Any
from functools import reduce
import operator
from functools import lru_cache
import numpy as np
from joblib import Parallel, delayed
try:
import matplotlib.pyplot as plt
except BaseException:
pass
import oapackage
from oapackage.oahelper import create_pareto_element
import oapackage.conference
import oapackage.markup as markup
import oapackage.oahelper as oahelper
from oapackage.markup import oneliner as e
import oaresearch
from oaresearch.research import citation
import oaresearch.filetools
from oapackage.conference import momentMatrix, modelStatistics, conferenceProjectionStatistics
#%%
class hashable_array(object):
r'''Hashable wrapper for ndarray objects.
Instances of ndarray are not hashable, meaning they cannot be added to
sets, nor used as keys in dictionaries. This is by design - ndarray
objects are mutable, and therefore cannot reliably implement the
__hash__() method.
The hashable class allows a way around this limitation. It implements
the required methods for hashable objects in terms of an encapsulated
ndarray object. This can be either a copied instance (which is safer)
or the original object (which requires the user to be careful enough
not to modify it).
'''
def __init__(self, wrapped, tight=False):
r'''Creates a new hashable object encapsulating an ndarray.
wrapped
The wrapped ndarray.
tight
Optional. If True, a copy of the input ndaray is created.
Defaults to False.
'''
self.__tight = tight
self.__wrapped = np.array(wrapped) if tight else wrapped
self.__hash = hash(self.__wrapped.tostring())
def __eq__(self, other):
return np.all(self.__wrapped == other.__wrapped)
def __hash__(self):
return self.__hash
def unwrap(self):
r'''Returns the encapsulated ndarray.
If the wrapper is "tight", a copy of the encapsulated ndarray is
returned. Otherwise, the encapsulated ndarray itself is returned.
'''
if self.__tight:
return np.array(self.__wrapped)
return self.__wrapped
def make_hashable_array(design):
return hashable_array(np.array(design))
# %%
DesignType = Union[oapackage.array_link, np.ndarray]
def conference_design_extensions(array: DesignType, verbose: int = 0, filter_symmetry : bool = True, conference_generator = None) -> List:
""" Return list of all extensions of a conference design
All extensions are generated, minus symmetry conditions.
For details see `oapackage.generateSingleConferenceExtensions`
Args:
arrays : List of arrays to be extended
verbose: Verbosity level
filter_symmetry: If True then discard any designs that are now minimal according to the row permutations
from the row symmetry group of the input design
Returns:
List of extensions
"""
j1zero = 0
if not isinstance(array, oapackage.array_link):
array = oapackage.makearraylink(array)
conference_type = oapackage.conference_t(
array.n_rows, array.n_columns, j1zero)
zero_index = -1
filterj2 = 1
filterj3 = 0
filter_symmetry = True # we can use symmetry reduction, since any the other filtering is not related to the symmetry of the design
if conference_generator is None:
extensions = oapackage.generateSingleConferenceExtensions(
array, conference_type, zero_index, verbose >= 2, filter_symmetry, filterj2, filterj3, filter_symmetry)
else:
# TODO assert proper conference class and symmetry types
extensions = conference_generator.generateCandidates (array);
extensions = [oapackage.hstack(array, extension)
for extension in extensions]
return list(extensions)
def conference_design_has_extension(design : DesignType) -> bool:
""" Return true if specified conference design has an extension with an additional column
Args:
design: Conference design
Returns:
True of the design has extensions
"""
design_np = np.array(design)
ee=conference_design_extensions(design_np)
return len(ee)>0
#%%
from oaresearch.misc_utils import index_sorted_array
from collections import namedtuple
#%%
DesignStack = namedtuple('DesignStack', ['designs', 'nauty_designs', 'nauty_sort_index', 'nauty_designs_sorted'])
def reduce_minimal_form(design, design_stack):
""" Reduce design to minimal form using full stack of designs """
all_data, all_data_nauty, sort_indices, nauty_designs_sorted= design_stack
nauty_form= oapackage.reduceConference(design)
k=nauty_form.shape[1]
sort_idx = sort_indices[k]
if nauty_designs_sorted is None:
nauty_sorted = [ all_data_nauty[k][idx] for idx in sort_idx]
else:
nauty_sorted=nauty_designs_sorted[k]
sorted_idx=index_sorted_array(nauty_sorted,nauty_form)
idx = sort_idx[sorted_idx]
return all_data[k][idx]
@lru_cache(maxsize=400000)
def cached_conference_design_has_maximal_extension(design, verbose=0, Nmax = None, conference_generator=None):
design_stack = conference_design_has_maximal_extension.design_stack
if design_stack is None:
raise Exception('initialize the design stack first!')
if isinstance(design, hashable_array):
design=design.unwrap()
design_np = np.array(design)
N = design_np.shape[0]
if Nmax is None:
Nmax = N
k = design_np.shape[1]
if not N==design_stack[0][4][0].shape[0]:
raise Exception('N {N} does not match design stack')
if k==Nmax:
return True
ee=conference_design_extensions(design_np, conference_generator = conference_generator)
if verbose:
print(f'design {N} {k}: check {len(ee)} extensions' )
result = False
for subidx, extension_design in enumerate(ee):
if verbose:
print(f'design {N} {k}: subidx {subidx}' )
extension_design_link = oapackage.makearraylink(extension_design)
md=reduce_minimal_form(extension_design_link, design_stack)
if cached_conference_design_has_maximal_extension(make_hashable_array(md), verbose=verbose, Nmax=Nmax, conference_generator = conference_generator):
result= True
break
if verbose:
print(f'design {N} {k}: {result}' )
return result
@oahelper.static_var('design_stack', None)
def conference_design_has_maximal_extension(design, verbose=0, Nmax = None, conference_generator = None) -> bool:
""" Determine whether a design has an extension to a maximal design
The static variable design_stack needs to be initialized with a dictionary containing all designs
with the number of rows specifed by the design.
Args:
design: Conference design
N: Number of rows
Returns:
True if the design can be extended to the full number of columns
"""
if not isinstance(design, hashable_array):
design = make_hashable_array(design)
result = cached_conference_design_has_maximal_extension(design, verbose=verbose, Nmax=Nmax, conference_generator=conference_generator)
return result
def _flatten(data):
if len(data) == 0:
return data
return reduce(operator.concat, data)
def _conference_design_extensions_numpy(array, verbose=0):
lst = conference_design_extensions(array, verbose)
return [np.array(array) for array in lst]
def extend_conference_designs_full(extensions: List, number_parallel_jobs=4) -> List:
""" Extend a list of conference designs with all possible extensions minus symmetry """
if number_parallel_jobs > 1:
extensions_numpy = _flatten(Parallel(n_jobs=number_parallel_jobs)(
delayed(_conference_design_extensions_numpy)(np.array(array)) for array in extensions))
extensions = [oapackage.makearraylink(
array) for array in extensions_numpy]
else:
extensions = _flatten([conference_design_extensions(array)
for array in extensions])
return extensions
def maximal_extension_size(array: oapackage.array_link, verbose: int = 1) -> Tuple[int, list]:
""" Calculate maximum number of columns in an extention of specified design
Args:
design:
Returns:
Maximum number of columns
"""
maximum_number_of_columns = array.n_columns
N = array.n_rows
extensions0 = [array]
extensions = extensions0
t0 = time.time()
for c in range(extensions[0].n_columns, N):
extensions2 = extend_conference_designs_full(extensions)
if verbose >= 2:
print(f'maximal_extension_size: N {N} columns {c}->{c+1}: {len(extensions)}->{len(extensions2)}')
extensionsr = oapackage.selectConferenceIsomorpismClasses(
extensions2, verbose=0)
if verbose:
dt = time.time() - t0
print(
f'maximal_extension_size: N {N} columns {c}->{c+1}: {len(extensions)}->{len(extensions2)}->{len(extensionsr)}, {dt:.1f} [s]')
if len(extensionsr) == 0:
break
extensions = extensionsr
maximum_number_of_columns = c + 1
return maximum_number_of_columns, extensions
# %%
def select_even_odd_conference_designs(cfile: str) -> Tuple:
""" Select the even-odd conference designs from a file with designs """
na = oapackage.nArrayFile(cfile)
eolist: List[Any] = []
if na > 100000:
af = oapackage.arrayfile_t(cfile)
for ii in range(na):
if ii % (200 * 1e3) == 0 or ii == na - 1:
print('select_even_odd_conference_designs: %s: %d/%d' %
(cfile, ii, af.narrays))
al = af.readnext()
if ii == 0:
if al.min() > -1:
raise Exception('not a conference matrix?!')
if not oapackage.isConferenceFoldover(al):
eolist += [al]
af.closefile()
else:
ll = oapackage.readarrayfile(cfile)
na = len(ll)
if len(ll) > 0:
if ll[0].min() > -1:
raise Exception('not a conference matrix?!')
eolist = [al for al in ll if not oapackage.isConferenceFoldover(al)]
return na, eolist
# %%
def cdesignTag(N, kk, page, outputdir, tdstyle='', tags=['cdesign', 'cdesign-diagonal', 'cdesign-diagonal-r'],
tagtype=['full', 'r', 'r'], verbose=1, ncache=None, subpage=None, generated_result=None, conference_html_dir=None):
""" Create html tag for oa page
Args:
N (int): number of rows
kk (int): number of columns
page (object):
outputdir (str):
tdstyle (str):
tags (list):
tagtype (list):
verbose (int):
ncache (dict): store results
"""
cfile, nn, mode = conferenceResultsFile(
N, kk, outputdir, tags=tags, tagtype=tagtype, verbose=1)
if ncache is not None:
if 'full' not in ncache:
ncache['full'] = {}
ncache['full']['N%dk%d' % (N, kk)] = nn
cfilex = oapackage.oahelper.checkOAfile(cfile)
if cfilex is not None:
cfilebase = os.path.basename(cfilex)
else:
cfilebase = None
if generated_result is not None and not len(generated_result) == 0:
if generated_result['pareto_results']['full_results']:
nn = generated_result['pareto_results']['narrays']
cfile = None
mode = 'full'
if page is not None:
if subpage:
hreflink = os.path.join('conference', subpage)
if verbose:
print('cdesignTag: hreflink: %s' % subpage)
else:
hreflink = 'conference/%s' % cfilebase
txt, hyper_link = htmlTag(nn, kk, N, mode=mode,
href=hreflink, ncache=ncache, verbose=verbose >= 2)
if verbose >= 2:
print('cdesignTag: N %d, k %d, html txt %s' % (N, kk, txt,))
if hyper_link and (
cfilex is not None) and not hreflink.endswith('html'):
# no html page, just copy OA file
if verbose >= 2:
print('cdesignTag: N %d, ncols %d: copy OA file' % (N, kk))
shutil.copyfile(cfilex, os.path.join(
conference_html_dir, cfilebase))
page.td(txt, style=tdstyle)
else:
if verbose >= 2:
print(cfile)
return cfile
def generate_even_odd_conference_designs(outputdir):
""" Select even-odd conference designs from generated double conference designs """
Nrange = range(0, 82, 2) # full range
# Nrange=range(44, 45, 2)
# Nrange=range(4, 48, 2)
Nrange = range(74, 82, 2)
tag = 'dconferencej1j3'
for Ni, N in enumerate(Nrange):
kmax = int(np.ceil(N / 2) + 1)
krange = range(2, kmax)
for ki, kk in enumerate(krange):
cfile = cdesignTag(N, kk, page=None, outputdir=outputdir, tags=[
tag, tag + '-r'], tagtype=['full', 'r'], conference_html_dir=None)
na, eolist = select_even_odd_conference_designs(cfile)
cfileout = cfile.replace(tag, tag + '-eo')
print(' out: %s: %d -> %d' % (cfileout, na, len(eolist)))
if 1:
oapackage.writearrayfile(
cfileout, eolist, oapackage.ABINARY_DIFF, N, kk)
xfile = cfileout + '.gz'
if os.path.exists(xfile):
print('removing file %s' % (xfile))
os.remove(xfile)
if 1:
if len(eolist) > 100:
cmd = 'gzip -f %s' % cfileout
os.system(cmd)
# %%
def reduce_single_conference(arrays, verbose=0):
""" Reduce a list of double conference arrays to single conference arrays
Arrays that are not foldover arrays are discarded.
Args:
arrays (list): list of dobule conference designs
Returns:
list: list containing the corresponding single conference designs
"""
narrays = len(arrays)
arrays = [
array for array in arrays if oapackage.isConferenceFoldover(array)]
if verbose:
print('reduce_single_conference: reduce %d arrays to %d single conference designs' % (
narrays, len(arrays)))
def reduce_single(array):
Nsingle = int(array.n_rows / 2)
perm = oapackage.double_conference_foldover_permutation(array)
return oapackage.array_link(np.array(array)[perm[0:Nsingle], :])
arrays = [reduce_single(array) for array in arrays]
return arrays
class SingleConferenceParetoCombiner:
def __init__(self, outputdir, cache_dir, cache=False,
verbose=1, pareto_method_options=None):
""" Class to generate statistics and Pareto optimality results for a conference design class from double conference designs """
self.outputdir = outputdir
self.cache_dir = cache_dir
self.cache = cache
self.verbose = verbose
if pareto_method_options is None:
pareto_method_options = {}
self._pareto_method_options = pareto_method_options
def append_basepath(self, afile):
return os.path.join(self.outputdir, afile)
def pareto_file(self, filename):
pfile = os.path.join(self.cache_dir, filename)
oapackage.mkdirc(os.path.split(pfile)[0])
return pfile
def stats_file(self, filename):
pfile = os.path.join(self.cache_dir, filename).replace('.oa', '.json')
oapackage.mkdirc(os.path.split(pfile)[0])
return pfile
def combined_results_file(self, number_columns):
pfile = os.path.join(
self.cache_dir, 'combined-single-conference-pareto-results-k%d.json' % number_columns)
oapackage.mkdirc(os.path.split(pfile)[0])
return pfile
def pre_calculate(self, arrayfiles):
for ii, afile in enumerate(arrayfiles):
outputfile = self.pareto_file(afile)
outputfile_stats = self.stats_file(afile)
if os.path.exists(outputfile) and self.cache:
continue
oapackage.oahelper.tprint(
'ParetoCalculator: pre_calculate file %d/%d: %s' % (ii, len(arrayfiles), afile))
arrays = oapackage.readarrayfile(self.append_basepath(afile))
number_arrays = len(arrays)
arrays = reduce_single_conference(arrays, verbose=1)
presults, _ = oaresearch.research_conference.calculateConferencePareto(
arrays, **self._pareto_method_options)
pareto_designs = [oapackage.array_link(
array) for array in presults['pareto_designs']]
print('generate %s: %d arrays' % (outputfile, len(pareto_designs)))
oapackage.writearrayfile(outputfile, oapackage.arraylist_t(
pareto_designs), oapackage.ABINARY)
with open(outputfile_stats, 'wt') as fid:
json.dump({'number_arrays': number_arrays,
'number_conference_arrays': len(arrays)}, fid)
@staticmethod
def combine_statistics(stats, extra_stats):
if stats is None:
return copy.copy(extra_stats)
combined_stats = copy.copy(stats)
for field in ['number_arrays', 'number_conference_arrays']:
combined_stats[field] = stats[field] + extra_stats[field]
return combined_stats
def write_combined_results(self, number_columns, results):
results['pareto_designs'] = [
np.array(array) for array in results['pareto_designs']]
with open(self.combined_results_file(number_columns), 'wt') as fid:
json_tricks.dump(results, fid, indent=4)
def load_combined_results(self, number_columns):
with open(self.combined_results_file(number_columns), 'rt') as fid:
results = json_tricks.load(fid)
return results
def calculate(self, arrayfiles):
""" Calculate statistics over generated designs
Args:
lst (list): list of files with designs
"""
pareto_arrays = []
combined_stats = None
for afile in arrayfiles:
oapackage.oahelper.tprint('ParetoCalculator: calculate %s' % afile)
outputfile = self.pareto_file(afile)
outputfile_stats = self.stats_file(afile)
arrays = oapackage.readarrayfile(outputfile)
pareto_arrays += list(arrays)
stats = json.load(open(outputfile_stats, 'rt'))
combined_stats = self.combine_statistics(combined_stats, stats)
presults, _ = oaresearch.research_conference.calculateConferencePareto(
pareto_arrays, **self._pareto_method_options)
# remove invalid fields
for tag in ['B4', 'F4']:
if tag + '_max' in presults:
presults.pop(tag + '_max')
for tag in ['rankinteraction', 'ranksecondorder']:
if tag + '_min' in presults:
presults.pop(tag + '_min')
presults['combined_statistics'] = combined_stats
presults['_pareto_method_options'] = self._pareto_method_options
return presults
# %%
def generate_or_load_conference_results(N, number_of_columns, outputdir, dc_outputdir,
double_conference_cases=(), addExtensions=True, addMaximumExtensionColumns=False):
""" Calculate results for conference designs class
In data is either calculated directly, or loaded from pre-generated data gathered from double conference designs.
"""
pareto_results = OrderedDict(
{'N': N, 'ncolumns': number_of_columns, 'full_results': 0, 'no_results': True})
from_double_conference = N in double_conference_cases
addExtensions = N <= 26
pareto_method_options = {
'verbose': 1,
'addProjectionStatistics': None,
'addExtensions': addExtensions,
'addMaximumExtensionColumns': addMaximumExtensionColumns}
if from_double_conference:
if number_of_columns > N:
return pareto_results, None
print(
'generate_or_load_conference_results: N %d: loading from doubleconference results' %
(N,))
dc_dir = os.path.join(dc_outputdir, 'doubleconference-%d' % (2 * N))
if not os.path.exists(dc_dir):
return {}, None
cache_dir = oapackage.mkdirc(os.path.join(dc_dir, 'sc_pareto_cache'))
pareto_calculator = SingleConferenceParetoCombiner(
dc_dir, cache_dir=cache_dir, cache=True, pareto_method_options=None)
pareto_results = pareto_calculator.load_combined_results(
number_of_columns)
pareto_results['narrays'] = pareto_results['combined_statistics']['number_conference_arrays']
pareto_results['idstr'] = 'cdesign-%d-%d' % (
pareto_results['N'], pareto_results['ncolumns'])
pareto_results['full'] = True
pareto_results['full_results'] = True
pareto_results['_from_double_conference'] = True
cfile = None
else:
cfile, nn, mode = conferenceResultsFile(N, number_of_columns, outputdir,
tags=[
'cdesign', 'cdesign-diagonal', 'cdesign-diagonal-r'],
tagtype=['full', 'r', 'r'], verbose=1)
ll = oapackage.readarrayfile(cfile)
narrays = len(ll)
if mode == 'full' or narrays < 1000:
presults, pareto = calculateConferencePareto(
ll, N=N, k=number_of_columns, **pareto_method_options)
pareto_results = generateConferenceResults(
presults, ll, ct=None, full=(mode == 'full'))
pareto_results['arrayfile'] = cfile
else:
cfile = None
return pareto_results, cfile
# %%
def generateConference(N, kmax=None, verbose=1, diagc=False, nmax=None, selectmethod='random', tag='cdesign',
outputdir=None):
""" Generate sequece of conference designs
Arguments:
N : integer
number of rows in the array
kmax : integer
maximum number of columns to compute
verbose : integer
output level
diagc : boolean
the default value is False. If True, then only the diagonal
matrices will be computed (e.g. all zeros are on the diagonal)
"""
if kmax is None:
kmax = N
ctype = oapackage.conference_t(N, N, 0)
if diagc:
ctype.ctype = oapackage.conference_t.CONFERENCE_DIAGONAL
tag += '-diagonal'
if nmax is not None:
tag += '-r'
al = ctype.create_root()
ll = oapackage.arraylist_t()
ll.push_back(al)
LL = [[]] * (kmax)
LL[1] = ll
print('generateConference: start: %s' % ctype)
if outputdir is not None:
_ = oapackage.writearrayfile(
os.path.join(outputdir, 'cdesign-%d-%d.oa' % (N, 2)), LL[1], oapackage.ATEXT, N, 2)
for extcol in range(2, kmax):
if verbose:
print('generateConference: extcol %d: %d designs' %
(extcol, len(LL[extcol - 1])))
sys.stdout.flush()
LL[extcol] = oapackage.extend_conference(
LL[extcol - 1], ctype, verbose=verbose >= 2)
LL[extcol] = oapackage.selectConferenceIsomorpismClasses(
LL[extcol], verbose >= 1)
LL[extcol] = oapackage.sortLMC0(LL[extcol])
if nmax is not None:
na = min(nmax, len(LL[extcol]))
if na > 0:
if selectmethod == 'random':
idx = np.random.choice(len(LL[extcol]), na, replace=False)
LL[extcol] = [LL[extcol][i] for i in idx]
elif selectmethod == 'first':
LL[extcol] = [LL[extcol][i] for i in range(na)]
else:
# mixed case
raise Exception('not implemented')
afmode = oapackage.ATEXT
if (len(LL[extcol]) > 1000):
afmode = oapackage.ABINARY
if outputdir is not None:
_ = oapackage.writearrayfile(os.path.join(
outputdir, '%s-%d-%d.oa' % (tag, N, extcol + 1)), LL[extcol], afmode, N, extcol + 1)
ll = [len(l) for l in LL]
if verbose:
print('generated sequence: %s' % ll)
return LL
# %%
def conferenceJ4(al, jj=4):
""" Calculate J4 values for a conference matrix """
al = oapackage.makearraylink(al)
return oapackage.Jcharacteristics_conference(al, number_of_columns=jj)
@oapackage.oahelper.deprecated
def conferenceSecondOrder(al, include_so=False):
""" Calculate second-order interaction matrix for a conference matrix """
x = np.array(al)
k = al.n_columns
if include_so:
offset = 0
m = int(k * (k + 1) / 2)
else:
offset = 1
m = int(k * (k - 1) / 2)
y = np.zeros((x.shape[0], m))
idx = 0
for ii in range(k):
for jj in range(ii + offset, k):
y[:, idx] = x[:, ii] * x[:, jj]
idx = idx + 1
return y
def conferenceStatistics(al, verbose=0):
""" Calculate statistics for a conference design
Args:
al (array): design to use
Returns:
list: f4, b4, rank X2, rank X2 with quadratics
"""
f4 = al.FvaluesConference(4)
N = al.n_rows
j4 = conferenceJ4(al)
b4 = np.sum(np.array(j4) ** 2) / N ** 2
ncols = al.n_columns
modelmatrix = oapackage.array2modelmatrix(al, 'i')[:, (1 + ncols):]
rank = np.linalg.matrix_rank(modelmatrix)
modelmatrix_quadratic = oapackage.array2modelmatrix(al, 'q')[
:, (1 + ncols):]
rankq = np.linalg.matrix_rank(modelmatrix_quadratic)
if verbose:
print('f4: %s' % (f4,))
print('j4: %s' % (j4,))
print('rank X2: %s' % (rank,))
print('rank X2+quadratics: %s' % (rankq,))
return [f4, b4, rank, rankq]
def test_confJ4():
al = oapackage.exampleArray(18)
J = conferenceJ4(al)
assert (np.sum(np.abs(np.array(J)) == 12) == 1)
assert (np.sum(np.abs(np.array(J)) == 0) == 23)
def createConferenceParetoElement(al, addFoldover=True, addProjectionStatistics=True,
addMaximality=False, addMaximumExtensionColumns=False, pareto=None, rounding_decimals=3):
""" Create Pareto element from conference design
Returns:
Tuple with pareto_element, data
"""
rr = conferenceStatistics(al, verbose=0)
[f4, b4, rankinteraction, ranksecondorder] = rr[0:4]
f4minus = [-float(x) for x in f4]
values = [[float(ranksecondorder)], [float(
rankinteraction)], list(f4minus), [-float(b4)]]
data = OrderedDict(ranksecondorder=ranksecondorder)
data['rankinteraction'] = rankinteraction
data['F4'] = f4
data['B4'] = b4
if addProjectionStatistics:
proj_data = np.zeros((2, 3))
proj_data[0] = oapackage.conference.conferenceProjectionStatistics(
al, ncolumns=4)
proj_data[1] = oapackage.conference.conferenceProjectionStatistics(
al, ncolumns=5)
proj_data = np.around(proj_data, rounding_decimals)
for tag_index, tag in enumerate(['PEC', 'PIC', 'PPC']):
for ni, kk in enumerate([4, 5]):
values += [[proj_data[ni, tag_index]]]
data[tag + '%d' % kk] = proj_data[ni, tag_index]
else:
for tag in ['PEC', 'PIC', 'PPC']:
for kk in [4, 5]:
data[tag + '%d' % kk] = None
if addFoldover:
foldover = oapackage.isConferenceFoldover(al)
values += [[int(foldover)], [int(not foldover)]]
data['foldover'] = int(foldover)
data['notfoldover'] = int(not foldover)
if addProjectionStatistics:
assert (len(values) == len(data.keys()))
if addMaximality:
data['has_extensions'] = conference_design_has_extensions(al)
if addMaximumExtensionColumns:
data['maximum_extension_size'] = maximal_extension_size(al)[0]
if pareto is None:
pareto = oapackage.ParetoMultiDoubleLong()
pareto_element = create_pareto_element(values, pareto=pareto)
return pareto_element, data
@oapackage.oahelper.deprecated
def makePareto(presults, addFoldover=True):
pareto = oapackage.ParetoMultiDoubleLong()
for ii in range(len(presults.ranks)):
f4minus = tuple([-x for x in presults.f4s[ii]])
values = [[int(presults.ranks[ii])], list(
f4minus), [-presults.b4s[ii]]]
if addFoldover:
values += [[int(presults.foldover[ii])],
[int(not presults.foldover[ii])]]
val = create_pareto_element(values, pareto=pareto)
pareto.addvalue(val, ii)
return pareto
class pareto_results_structure(collections.OrderedDict):
""" Class to hold results of Pareto calculations """
def add_value(self, tag, value):
mintag = tag + '_min'
maxtag = tag + '_max'
if not mintag in self:
self[mintag] = value
if not maxtag in self:
self[maxtag] = value
self[mintag] = min(value, self[mintag])
self[maxtag] = max(value, self[maxtag])
def conference_design_has_extensions(array, verbose=0):
""" Return True if a single conference design has extensions """
j1zero = 0
conference_type = oapackage.conference_t(
array.n_rows, array.n_columns, j1zero)
zero_index = -1
filterj2 = 1
filterj3 = 0
filter_symmetry = 1 # we can use symmetry reduction, since any the other filtering is not related to the symmetry of the design
extensions = oapackage.generateSingleConferenceExtensions(
array, conference_type, zero_index, verbose >= 2, filter_symmetry, filterj2, filterj3, filter_symmetry)
result = len(extensions) > 0
if verbose:
print('conference_design_has_extensions: %s, found %d extensions' %
(result, len(extensions)))
if verbose >= 2:
oapackage.showCandidates(extensions)
return result
def conferenceParetoIdentifier():
return '0.4'
def calculateConferencePareto(ll, N=None, k=None, verbose=1, add_data=True, addProjectionStatistics=None,
addExtensions=False, addMaximumExtensionColumns=False, number_parallel_jobs=1):
""" Calculate Pareto optimal designs from a list of designs
Args:
ll (list): list of designs
N (int)
k (int)
verbose (int)
add_data (bool)
addProjectionStatistics (None or bool)
addExtensions (bool)
Returns:
presults, pareto
"""
t0 = time.time()
if verbose:
print('calculateConferencePareto: analysing %d arrays, addProjectionStatistics %s, addExtensions %s' % (
len(ll), addProjectionStatistics, addExtensions))
if len(ll) > 0:
N = ll[0].n_rows
k = ll[0].n_columns
presults = pareto_results_structure({'pareto_designs': []})
pareto = oapackage.ParetoMultiDoubleLong()
if N is None:
presults['N'] = None
return presults, pareto
if addProjectionStatistics is None:
if (N <= 20):
addProjectionStatistics = True
else:
addProjectionStatistics = False
data = None
t0 = time.time()
def pareto_worker(al):
al = oapackage.makearraylink(al)
pareto_element, data = createConferenceParetoElement(al, addFoldover=False,
addProjectionStatistics=addProjectionStatistics,
pareto=None)
pareto_element = [list(e.values) for e in list(pareto_element)]
return pareto_element, data
block_size = 200
blocks = [(ii, min(len(ll), ii + block_size))
for ii in np.arange(0, len(ll), block_size)]
def add_extra_data(presults, data, addProjectionStatistics):
if add_data:
for tag in ['ranksecondorder', 'rankinteraction', 'B4', 'F4']:
presults.add_value(tag, data[tag])
if addProjectionStatistics:
for tag in ['PEC4', 'PIC4']:
presults.add_value(tag, data[tag])
if number_parallel_jobs > 1:
for blockidx, block in enumerate(blocks):
dt = time.time() - t0
oapackage.oahelper.tprint(f'calculateConferencePareto: N {N} column {k}: block {blockidx}/{len(blocks)}: ({dt:.1f} [s]): {str(pareto).strip()}')
xx = Parallel(n_jobs=number_parallel_jobs)(
[delayed(pareto_worker)(np.array(al)) for al in ll[block[0]:block[1]]])
for jj, al in enumerate(ll[block[0]:block[1]]):
pareto_element, data = xx[jj]
pareto_element = oapackage.vector_mvalue_t_double(
[oapackage.mvalue_t_double(x) for x in pareto_element])
ii = int(jj + block[0])
pareto.addvalue(pareto_element, ii)
add_extra_data(presults, data, addProjectionStatistics)
else:
for ii, al in enumerate(ll):
oapackage.oahelper.tprint('calculateConferencePareto: N %s column %s: array %d/%d (%.1f [s]): %s' %
(str(N), str(k), ii, len(ll), time.time() - t0, str(pareto).strip()), dt=2)
pareto_element, data = createConferenceParetoElement(al, addFoldover=False,
addProjectionStatistics=addProjectionStatistics,
pareto=None)
pareto.addvalue(pareto_element, ii)
add_extra_data(presults, data, addProjectionStatistics)
presults['N'] = N
presults['ncolumns'] = k
if len(ll) > 0:
presults.N = ll[0].n_rows
presults.ncolumns = ll[0].n_columns
if data is None:
presults['pareto_type'] = 'no design'
else:
presults['pareto_type'] = ', '.join(
[key for key in data.keys() if data[key] is not None])
presults['pareto_type'] = presults['pareto_type'].replace(
'ranksecondorder', 'r(2FI, QE)')
presults['pareto_type'] = presults['pareto_type'].replace(
'rankinteraction', 'r(2FI)')
pareto.show()
presults['pareto_indices'] = pareto.allindices()
presults['nclasses'] = pareto.number()
presults['npareto'] = pareto.numberindices()
presults['_version'] = conferenceParetoIdentifier()
presults['pareto_designs'] = [ll[ii] for ii in presults['pareto_indices']]
presults['pareto_data'] = []
def pareto_worker(al):
al = oapackage.makearraylink(al)
pareto_element, data = createConferenceParetoElement(
al, addFoldover=True, addMaximality=addExtensions, addMaximumExtensionColumns=addMaximumExtensionColumns)
return data
if number_parallel_jobs > 1:
data_results = Parallel(n_jobs=number_parallel_jobs)(
[delayed(pareto_worker)(np.array(al)) for al in presults['pareto_designs']])
for ii, data in enumerate(data_results):
presults['pareto_data'].append(data)
else:
for ii, al in enumerate(presults['pareto_designs']):
data = pareto_worker(al)
presults['pareto_data'].append(data)
presults['_pareto_processing_time'] = time.time() - t0
presults = OrderedDict(presults)
return presults, pareto
def test_calculateConferencePareto():
ll = [oapackage.exampleArray(idx) for idx in [45, 46, 47, 45]]
presults, _ = calculateConferencePareto(
ll, N=None, k=None, verbose=1, add_data=True)
if __name__ == '__main__':
test_calculateConferencePareto()
def showMaxZ(LL):
""" For a list of generated designs show the maximum zero position """
N = LL[3][0].n_rows
for ii, L in enumerate(LL):
k = ii + 1
s = [oapackage.maxz(al) for al in L]
mm, _ = np.histogram(s, range(N + 1))
print('%d cols: maxz seq %s' % (k, list(mm)))
def generate_conference_latex_tables(htmlsubdir, verbose=1):
""" Generate LaTeX results tables from pre-generated result files """
for N in range(8, 25, 2):
lst = oapackage.findfiles(htmlsubdir, 'conference-N%d.*pickle' % N)
if verbose:
print('latex table: N %d: %d files' % (N, len(lst)))
table = None
kk = [oapackage.scanf.sscanf(
file, 'conference-N%dk%d')[1] for file in lst]
lst = [lst[idx] for idx in np.argsort(kk)]
for file in (lst):
r = pickle.load(open(os.path.join(htmlsubdir, file), 'rb'))
ncolumns = r['ncolumns']
rtable = r['rtable']
if rtable.size == 0:
continue
column = np.vstack(
(['k'], ncolumns * np.ones((rtable.shape[0] - 1, 1), dtype=int)))
rtable = np.hstack((column, rtable))
if table is None:
table = rtable
else:
rtable = rtable[1:]
table = np.vstack((table, rtable))
# r['ncolumns']
print(table)
if len(lst) == 0:
print('no results for N=%d' % N)
continue
offset_columns = [1, 2]
for row in range(1, table.shape[0]):
for col in offset_columns:
table[row, col] = str(int(table[row, col]) + 1)
latextable = oapackage.array2latex(table, hlines=[0],
comment=['conference desgins N=%d' % (N), 'offset for indices is 1'])
if verbose:
print(latextable)
with open(os.path.join(htmlsubdir, 'conference-N%d-overview.tex' % (N,)), 'wt') as fid:
fid.write(latextable)
def conferenceResultsFile(N, kk, outputdir, tags=['cdesign', 'cdesign-diagonal', 'cdesign-diagonal-r'],
tagtype=['full', 'r', 'r'], verbose=1):
""" Create html tag for oa page
Args:
N (int): number of rows
kk (int): number of columns
outputdir (str):
tags (list):
tagtype (list):
verbose (int):
ncache (dict): store results
"""
for ii, tag in enumerate(tags):
cfile0 = '%s-%d-%d.oa' % (tag, N, kk)
cfile = os.path.join(outputdir, cfile0)
gfile = os.path.join(outputdir, cfile0 + '.gz')
if verbose >= 2:
print('cdesignTag: try file %s' % cfile0)
if os.path.exists(os.path.join(outputdir, cfile0)
) and os.path.exists(gfile):
nn1 = oapackage.nArrayFile(cfile)
nn2 = oapackage.nArrayFile(gfile)
raise Exception('both .oa and .oa.gz exist: %s' % cfile)
nn = oapackage.nArrays(cfile)
mode = tagtype[ii]
cfilex = oapackage.oahelper.checkOAfile(cfile)
if cfilex is not None:
cfilebase = os.path.basename(cfilex)
else:
cfilebase = None
if nn >= 0:
break
if verbose:
print('cdesignTag: N %d, kk %d: selected tag %s: nn %d' %
(N, kk, tag, nn))
# special case
if kk == N and tag == 'cdesign-diagonal':
mode = 'full'
if verbose >= 2:
print(cfile)
return cfile, nn, mode
def generateConferenceResults(presults, ll, ct=None, full=None):
pareto_results = presults
pareto_results['type'] = 'conference designs'
pareto_results['arrayfile'] = None
pareto_results['presults'] = None
pareto_results['full'] = full
pareto_results['full_results'] = full
pareto_results['idstr'] = 'cdesign-%d-%d' % (
pareto_results['N'], pareto_results['ncolumns'])
if ct is not None:
pareto_results['ctidstr'] = ct.idstr()
assert (ct.N == pareto_results['N'])
assert (ct.ncols == pareto_results['ncolumns'])
pareto_results['narrays'] = len(ll)
pareto_results['pareto_designs'] = [
np.array(array) for array in presults['pareto_designs']]
return pareto_results
# %% Webpage generation
def nprevzero(N, k, ncache):
""" Return true if any previous result was zero """
for ix in range(k - 1, 2, -1):
# print(ix)
p = ncache['full'].get('N%dk%d' % (N, ix), -1)
# p = ncache['full'].get('N%dk%d' % (N, ix), -1)
if p == 0:
return True
return False
def htmlTag(nn, kk, N, mode='full', href=None, ncache=None, verbose=0):
""" Create html tag for number of designs
Args:
nn (int): number of arrays
kk (int): number of columns
N (int): number of rows
mode (str)
href (None or str): hyperlink to subpage
Returns:
txt (str): link text
hyper_link (bool): True if the txt is a hyperlink
"""
hyper_link = False
if nn >= 0:
if mode == 'full':
txt = '%d' % nn
else:
if nn == 0:
txt = '?'
else:
txt = '≥ %d' % nn
if href is not None:
if nn < 6000 and nn > 0 or (href.endswith('html')):
ss = e.a(txt, href=href, style='text-decoration: none;')
hyper_link = True
txt = ss
else:
pass
else:
if verbose:
print('htmlTag: nn is negative')
if kk <= N:
if ncache is None:
txt = '?'
else:
if verbose >= 1:
print('htmlTag: mode %s, N %d, k %d' % (mode, N, kk))
if nprevzero(N, kk, ncache):
if verbose >= 1:
print(
'htmlTag: nprevzero(%d, %d, ncache) is True' %
(N, kk))
txt = ''
else:
txt = '?'
else:
txt = ''
return txt, hyper_link
def latexResults(outputdir):
X = []
print('make latex results table...')
NN = range(4, 31, 2)
kk = range(0, np.max(NN) + 1)
X = np.zeros((1 + 1 + len(kk) - 2, 1 + len(NN)), dtype=object)
X[:] = ''
X[0, 0] = ''
X[1, 0] = '$k$'
for ii, N in enumerate(NN):
X[1, 1 + ii] = N
X[0, 1 + ii] = ''
for ki, k in enumerate(range(2, N + 1)):
if k > N:
X[1 + 1 + ki, 1 + ii] = ''
else:
cfile0 = 'cdesign-%d-%d.oa' % (N, k)
nn = oapackage.nArrays(os.path.join(outputdir, cfile0))
if nn < 0 and k == N:
cfile0 = 'cdesign-diagonal-%d-%d.oa' % (N, k)
nn = oapackage.nArrays(os.path.join(outputdir, cfile0))
if nn < 0:
cfile0 = 'cdesign-diagonal-%d-%d.oa' % (N, k)
nnm = oapackage.nArrays(os.path.join(outputdir, cfile0))
if nnm > 0:
X[1 + 1 + ki, 1 + ii] = r'$\ge %d$' % nnm
else:
X[1 + 1 + ki, 1 + ii] = '?'
else:
X[1 + 1 + ki, 1 + ii] = nn
X[1 + 1 + ki, 0] = '%d' % k
X[0, 1] = '$N$'
X[2, 0] = X[2, 0] + r'\rule{0pt}{2.9ex}'
return X
def createConferenceDesignsPageHeader(
page, makeheader, conference_class, ncolumns, full_results=False):
xstr = 'C(%d, %d)' % (conference_class.N, ncolumns)
xstrplain = xstr
if makeheader:
page.init(title="Class %s" % xstrplain,
css=('../oastyle.css'),
lang='en', htmlattrs=dict({'xmlns': 'http://www.w3.org/1999/xhtml', 'xml:lang': 'en'}),
header="<!-- Start of page -->",
htmlheader=oaresearch.research.oaCssStyle(addframe=True),
bodyattrs=dict({'style': 'padding-left: 3px;'}),
doctype='<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">',
metainfo=({'text/html': 'charset=utf-8', 'keywords': 'conference designs',
'robots': 'index, follow', 'description': 'conference designs'}),
footer="<!-- End of page -->")
if full_results:
page.h1('Conference designs %s ' % xstr)
else:
page.h1('Conference designs %s (<b>partial results</b>) ' % xstr)
oap = e.a('Orthogonal Array package', href=r'http://www.pietereendebak.nl/oapackage/index.html')
pstr = 'This page contains information about conference designs. '
pstr += 'The results have been generated with the %s.' % oap
pstr += ' If you use these results, please cite the paper ' + \
citation('conference', style='full') + '.'
page.p(pstr)
def createConferenceDesignsPageResultsTable(page, pareto_results, verbose=0):
full_results = pareto_results.get('full')
if full_results:
page.h2('Results')
else:
page.h2('Results (partial results)')
page.table()
page.tr(style='font-weight: bold;')
page.td('Statistic', style='padding-right:30px;')
page.td(('Results'), style='padding-right:8px;')
page.tr.close()
def simpleRow(a, b):
page.tr(style='')
page.td(a, style='padding-right:30px;')
page.td(b, style='padding-right:8px;')
page.tr.close()
narrays = pareto_results['narrays']
simpleRow('Number of non-isomorphic designs',
str(pareto_results['narrays']))
if narrays > 0:
if 'ranksecondorder_min' in pareto_results:
simpleRow('Minimum/Maximum rank of model matrix with 2FI and QE',
'%d/%d' % (pareto_results['ranksecondorder_min'], pareto_results['ranksecondorder_max']))
simpleRow('Minimum/Maximum rank of model matrix for 2FI',
'%d/%d' % (pareto_results['rankinteraction_min'], pareto_results['rankinteraction_max']))
else:
simpleRow('Maximum rank of model matrix with 2FI and QE',
'%d' % (pareto_results['ranksecondorder_max']))
simpleRow('Maximum rank of model matrix for 2FI', '%d' %
(pareto_results['rankinteraction_max']))
if 'B4_min' in pareto_results:
simpleRow('Minimum B4', '%.4f' % pareto_results['B4_min'])
if 'B4_max' in pareto_results:
simpleRow('Maximum B4', '%.4f' % pareto_results['B4_max'])
if 'F4_min' in pareto_results:
simpleRow('Minimum F4', '%s' % (pareto_results['F4_min'],))
if 'F4_max' in pareto_results:
simpleRow('Maximum F4', '%s' % (pareto_results['F4_max'],))
if 'totaltime' in list(pareto_results.keys()):
simpleRow('Processing time', str(pareto_results['totaltime']) + 's')
if pareto_results.get('datafile_tag', None) is not None:
simpleRow('Data', pareto_results['datafile_tag'])
page.table.close()
page.p(style='font-size: smaller;')
page.add('Note: 2FI: two-factor interaction; QE: quadratic effect')
page.p.close()
def createConferenceDesignsPageLoadDesignsFile(
pareto_results, htmlsubdir=None, verbose=1):
havearrayfile = 0
if 'arrayfile' in list(pareto_results.keys()):
if not pareto_results['arrayfile'] is None:
havearrayfile = 1
if verbose:
print('createConferenceDesignsPageLoadDesignsFile: havearrayfile %d' %
havearrayfile)
if havearrayfile:
iafile = pareto_results['arrayfile']
outfile0 = pareto_results['idstr'] + '.oa'
na = oapackage.nArrayFile(iafile)
if verbose >= 2:
print('conferenceDesignsPage: read arrayfile %s: na %d' %
(iafile, na))
if na < 5000 and na >= 0:
if htmlsubdir is None:
raise Exception('need html subdirectory to copy .oa file')
outfilefinal = oaresearch.filetools.copyOAfile(iafile, htmlsubdir, outfile0, convert='T', zipfile=None,
verbose=1, cache=0)
if pareto_results.get('full', False):
if verbose:
print('conferenceDesignsPage: full results')
pareto_results['datafilestr'] = 'all arrays'
htag = oaresearch.htmltools.formatArrayHyperlink(
pareto_results['datafilestr'], outfile0, iafile)
pareto_results['datafilestr'] = 'all arrays'
pareto_results['datafile_tag'] = htag
else:
na = oapackage.nArrayFile(
os.path.join(htmlsubdir, outfilefinal))
pareto_results['datafilestr'] = '%d array(s)' % na
htag = oaresearch.htmltools.formatArrayHyperlink(
pareto_results['datafilestr'], outfile0, iafile)
pareto_results['datafile_tag'] = htag
else:
if verbose:
print('conferenceDesignsPage: no datafile (na %d)' % na)
pareto_results['datafilestr'] = '-'
pareto_results['datafile_tag'] = None
def createConferenceDesignsPageParetoTable(
page, pareto_results, verbose=0, htmlsubdir=None):
""" Create table with Pareto results and add to the markup object
Args:
page (markup.page): html page to add table to
Returns:
rtable (array): generated table
"""
if verbose:
print('createConferenceDesignsPageParetoTable: start')
pareto_indices = pareto_results['pareto_indices']
pareto_data = pareto_results['pareto_data']
add_extension_information = False
add_maximum_extension_size = False
if len(pareto_data) > 0:
if pareto_data[0].get('has_extensions', None) is not None:
add_extension_information = True
if pareto_data[0].get('maximum_extension_size', None) is not None:
add_maximum_extension_size = True
if pareto_results['narrays'] > 0 and pareto_results.get('full_results'):
add_extra = True
if verbose:
print('createConferenceDesignsPageParetoTable: do statistics2htmltable')
header = ['Index Pareto file', 'Index design file',
'Rank 2FI and QE', 'Rank 2FI', 'F4', 'B4']
if add_extra:
for tag in ['PEC', 'PIC', 'PPC']:
for kk in [4, 5]:
header += [tag + '%d' % kk]
if add_extension_information:
header += ['Extensions']
if add_maximum_extension_size:
header += ['Max. columns']
rtable = np.zeros(
(1 + len(pareto_results['pareto_indices']), len(header)), dtype='|U208')
rtable[:] = ' '
for ii, h in enumerate(header):
rtable[0, ii] = header[ii]
sort_indices = oapackage.sortrows(
np.array([p['F4'] for p in pareto_results['pareto_data']]))
for ii, sort_index in enumerate(sort_indices):
pareto_idx = sort_index
array_list_idx = pareto_indices[sort_index]
rank_secondorder = str(pareto_data[pareto_idx]['ranksecondorder'])
rank_interaction = str(pareto_data[pareto_idx]['rankinteraction'])
rowdata = ['%d' % pareto_idx, '%d' % array_list_idx, rank_secondorder, rank_interaction,
str(pareto_data[pareto_idx]['F4']), '%.2f' % ((pareto_data[pareto_idx]['B4']))]
rtable[ii + 1, 0:len(rowdata)] = rowdata
column_offset = len(rowdata)
if add_extra:
for tag in ['PEC', 'PIC', 'PPC']:
for kk in [4, 5]:
rtable[ii + 1, column_offset] = '%.3f' % (
pareto_data[pareto_idx][tag + '%d' % kk])
column_offset = column_offset + 1
if add_extension_information:
rtable[ii + 1, column_offset] = 'Yes' if pareto_data[pareto_idx]['has_extensions'] > 0 else 'No'
column_offset = column_offset + 1
if add_maximum_extension_size:
rtable[ii + 1, column_offset] = pareto_data[pareto_idx]['maximum_extension_size']
column_offset = column_offset + 1
subpage = oaresearch.research.array2html(rtable, header=1, tablestyle='border-collapse: collapse;',
trclass='', tdstyle='padding-right:1em;', trstyle='',
thstyle='text-align:left; padding-right: 1em;', comment=None)
page.br(clear='both')
page.h2('Pareto optimal designs')
page.p()
if pareto_results['nclasses'] == 1:
pareto_classes_text = 'in %d class' % pareto_results['nclasses']
else:
pareto_classes_text = 'in %d classes' % pareto_results['nclasses']
if pareto_results['npareto'] == 1:
page.add('There is %d Pareto optimal design %s.' %
(pareto_results['npareto'], pareto_classes_text))
else:
page.add('There are %d Pareto optimal designs %s.' %
(pareto_results['npareto'], pareto_classes_text))
pareto_type = pareto_results['pareto_type']
if ',' in pareto_type:
k = pareto_type.rfind(", ")
pareto_type = pareto_type[:k] + ", and " + pareto_type[k + 1:]
page.add(
'Pareto optimality is according to %s (any other statistics are ignored).' % pareto_type)
page.p.close()
if pareto_results.get('pareto_designs', None) is not None:
pdesigns = pareto_results.get('pareto_designs', None)
pfile0 = pareto_results['idstr'] + '-pareto.oa'
if htmlsubdir is not None:
pfile = os.path.join(htmlsubdir, pfile0)
oapackage.writearrayfile(
pfile, [oapackage.array_link(array) for array in pdesigns])
page.p('All %s' % e.a('Pareto optimal designs', href=pfile0) + '.')
page.add(str(subpage))
page.br(clear='both')
else:
rtable =
|
np.zeros((0, 0))
|
numpy.zeros
|
""" Random Interval Spectral Forest (RISE).
Implementation of Deng's Time Series Forest, with minor changes
"""
__author__ = "<NAME>"
__all__ = ["RandomIntervalSpectralForest","acf","matrix_acf","ps"]
import numpy as np
import pandas as pd
import math
from numpy import random
from copy import deepcopy
from sklearn.ensemble.forest import ForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.utils.multiclass import class_distribution
class RandomIntervalSpectralForest(ForestClassifier):
"""Random Interval Spectral Forest (RISE).
Random Interval Spectral Forest: stripped down implementation of RISE from Lines 2018:
@article
{lines17hive-cote,
author = {<NAME>, <NAME> and <NAME>},
title = {Time Series Classification with HIVE-COTE: The Hierarchical Vote Collective of Transformation-Based Ensembles},
journal = {ACM Transactions on Knowledge and Data Engineering},
volume = {12},
number= {5},
year = {2018}
Overview: Input n series length m
for each tree
sample a random intervals
take the ACF and PS over this interval, and concatenate features
build tree on new features
ensemble the trees through averaging probabilities.
Need to have a minimum interval for each tree
This is from the python github.
Parameters
----------
n_trees : int, ensemble size, optional (default = 200)
random_state : int, seed for random, integer, optional (default to no seed)
min_interval : int, minimum width of an interval, optional (default = 16)
acf_lag : int, maximum number of autocorellation terms to use (default =100)
acf_min_values : int, never use fewer than this number of terms to fnd a correlation (default =4)
Attributes
----------
n_classes : int, extracted from the data
classifiers : array of shape = [n_trees] of DecisionTree classifiers
intervals : array of shape = [n_trees][2] stores indexes of start and end points for all classifiers
TO DO: handle missing values, unequal length series and multivariate problems
"""
def __init__(self,
n_trees=200,
random_state=None,
min_interval=16,
acf_lag=100,
acf_min_values=4
):
super(RandomIntervalSpectralForest, self).__init__(
base_estimator=DecisionTreeClassifier(),
n_estimators=n_trees)
self.n_trees=n_trees
self.random_state = random_state
random.seed(random_state)
self.min_interval=min_interval
self.acf_lag=acf_lag
self.acf_min_values=acf_min_values
# These are all set in fit
self.n_classes = 0
self.series_length = 0
self.classifiers = []
self.intervals=[]
self.lags=[]
self.classes_ = []
def fit(self, X, y, sample_weight=None):
"""Build a forest of trees from the training set (X, y) using random intervals and spectral features
Parameters
----------
X : array-like or sparse matrix of shape = [n_instances,series_length] or shape = [n_instances,n_columns]
The training input samples. If a Pandas data frame is passed it must have a single column (i.e. univariate
classification. RISE has no bespoke method for multivariate classification as yet.
y : array-like, shape = [n_instances] The class labels.
Returns
-------
self : object
"""
if isinstance(X, pd.DataFrame):
if X.shape[1] > 1:
raise TypeError("RISE cannot handle multivariate problems yet")
elif isinstance(X.iloc[0, 0], pd.Series):
X = np.asarray([a.values for a in X.iloc[:, 0]])
else:
raise TypeError(
"Input should either be a 2d numpy array, or a pandas dataframe with a single column of Series objects (TSF cannot yet handle multivariate problems")
n_instances, self.series_length = X.shape
self.n_classes = np.unique(y).shape[0]
self.classes_ = class_distribution(np.asarray(y).reshape(-1, 1))[0][0]
self.intervals=np.zeros((self.n_trees, 2), dtype=int)
self.intervals[0][0] = 0
self.intervals[0][1] = self.series_length
for i in range(1, self.n_trees):
self.intervals[i][0]=random.randint(self.series_length - self.min_interval)
self.intervals[i][1]=random.randint(self.intervals[i][0] + self.min_interval, self.series_length)
# Check lag against global properties
if self.acf_lag > self.series_length-self.acf_min_values:
self.acf_lag = self.series_length - self.acf_min_values
if self.acf_lag < 0:
self.acf_lag = 1
self.lags=np.zeros(self.n_trees, dtype=int)
for i in range(0, self.n_trees):
temp_lag=self.acf_lag
if temp_lag > self.intervals[i][1]-self.intervals[i][0]-self.acf_min_values:
temp_lag = self.intervals[i][1] - self.intervals[i][0] - self.acf_min_values
if temp_lag < 0:
temp_lag = 1
self.lags[i] = int(temp_lag)
acf_x = np.empty(shape=(n_instances, self.lags[i]))
ps_len = (self.intervals[i][1] - self.intervals[i][0]) / 2
ps_x = np.empty(shape=(n_instances, int(ps_len)))
for j in range(0, n_instances):
acf_x[j] = acf(X[j,self.intervals[i][0]:self.intervals[i][1]], temp_lag)
ps_x[j] = ps(X[j, self.intervals[i][0]:self.intervals[i][1]])
transformed_x = np.concatenate((acf_x,ps_x),axis=1)
# transformed_x=acf_x
tree = deepcopy(self.base_estimator)
tree.fit(transformed_x, y)
self.classifiers.append(tree)
return self
def predict(self, X):
"""
Find predictions for all cases in X. Built on top of predict_proba
Parameters
----------
X : array-like or sparse matrix of shape = [n_instances, n_columns] or a data frame.
If a Pandas data frame is passed,
Returns
-------
output : array of shape = [n_instances]
"""
probs=self.predict_proba(X)
return [self.classes_[np.argmax(prob)] for prob in probs]
def predict_proba(self, X):
"""
Find probability estimates for each class for all cases in X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_instances, n_columns]
The training input samples. If a Pandas data frame is passed,
Local variables
----------
n_samps : int, number of cases to classify
n_columns : int, number of attributes in X, must match _num_atts determined in fit
Returns
-------
output : array of shape = [n_instances, n_classes] of probabilities
"""
if isinstance(X, pd.DataFrame):
if X.shape[1] > 1:
raise TypeError("RISE cannot handle multivariate problems yet")
elif isinstance(X.iloc[0, 0], pd.Series):
X = np.asarray([a.values for a in X.iloc[:, 0]])
else:
raise TypeError(
"Input should either be a 2d numpy array, or a pandas dataframe with a single column of Series objects (TSF cannot yet handle multivariate problems")
rows,cols=X.shape
#HERE Do transform again
n_cases, n_columns = X.shape
if n_columns != self.series_length:
raise TypeError(" ERROR number of attributes in the train does not match that in the test data")
sums = np.zeros((X.shape[0],self.n_classes), dtype=np.float64)
for i in range(0, self.n_trees):
acf_x =
|
np.empty(shape=(n_cases, self.lags[i]))
|
numpy.empty
|
import numpy as np
#--------------------------------------------------------------------------
# Evalute the gradient and objective of QSP function, provided that
# phi is symmetric
#
# Input:
# phi --- Variables
# delta --- Samples
# opts --- Options structure with fields
# target: target function
# parity: parity of phi (0 -- even, 1 -- odd)
#
# Output:
# grad --- Gradient of obj function
# obj --- Objective function value
#
#--------------------------------------------------------------------------
def QSPHess_sym(phi, delta, options) -> (object, object, object):
m = len(delta)
d = len(phi)
obj = np.zeros((m, 1))
grad = np.zeros((m, d))
hess = np.zeros((d, d, m))
sigmaz = np.array([[1, 0],
[0, -1]])
temp_sig = 1j * sigmaz
gate = np.array([[np.exp(1j * pi / 4), 0],
[0, np.exp(-1j * pi / 4)]])
exp_theta = np.exp(1j * phi)
targetx = options["target"]
parity = options["parity"]
# Start Hessian
for i in range(m):
x = delta[i]
Wx = np.array([[x, 1j * np.sqrt(1 - x**2)],
[1j *
|
np.sqrt(1 - x**2)
|
numpy.sqrt
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Mai 19 14:28:46 2020
@author: metzgern
"""
import os
import h5py
import torch
import numpy as np
import csv
from tqdm import tqdm
from datetime import datetime
import lib.utils as utils
from lib.utils import FastTensorDataLoader
import pdb
class SwissCrops(object):
# Complete list
label = ['0_unknown', 'Barley', 'Beets', 'Berries', 'Biodiversity', 'Chestnut', 'Fallow', 'Field bean', 'Forest', 'Gardens',
'Grain', 'Hedge', 'Hemp', 'Hops', 'Linen', 'Maize', 'Meadow', 'MixedCrop', 'Multiple', 'Oat', 'Orchards', 'Pasture',
'Potatoes', 'Rapeseed', 'Rye', 'Sorghum', 'Soy', 'Spelt', 'Sugar_beets', 'Sunflowers', 'Vegetables', 'Vines', 'Wheat',
'unknownclass1', 'unknownclass2', 'unknownclass3']
#Updated list after selection of most frequent ones....
label = ['Meadow','Potatoes', 'Pasture', 'Maize', 'Sugar_beets', 'Sunflowers', 'Vegetables', 'Vines', 'Wheat', 'WinterBarley', 'WinterRapeseed', 'WinterWheat']
label_dict = {k: i for i, k in enumerate(label)}
reverse_label_dict = {v: k for k, v in label_dict.items()}
def __init__(self, root, mode='train', device = torch.device("cpu"),
neighbourhood=3, cloud_thresh=0.05,
nsamples=float("inf"),args=None,
step=1, trunc=9, datatype="2",
singlepix=False, noskip=False, validation_from_train_split=0.15):
self.datatype = datatype
self.normalize = True
self.shuffle = True
self.singlepix = singlepix
self.validation_from_train_split = validation_from_train_split
self.root = root
self.nb = neighbourhood
self.cloud_thresh = cloud_thresh
self.device = device
self.n = nsamples
self.mode = mode
self.noskip = noskip
if noskip:
raise Exception("--noskip option not supported for swissdata.")
if args==None:
argsdict = {"dataset": "swisscrop", "sample_tp": None, "cut_tp": None, "extrap": False}
self.args = utils.Bunch(argsdict)
# calculated from 50k samples
#self.means = [0.40220731, 0.2304579, 0.21944561, 0.22120122, 0.00414104, 0.00608051, 0.00555058, 0.00306677, 0.00378373]
#self.stds = [0.24774854, 0.29837374, 0.3176923, 0.29580569, 0.00475051, 0.00396885, 0.00412216, 0.00274612, 0.00241172]
# define de previously calculated global training mean and std...
self.means = [0.4071655 , 0.2441012 , 0.23429523, 0.23402453, 0.00432794, 0.00615292, 0.00566292, 0.00306609, 0.00367624]
self.stds = [0.24994541, 0.30625425, 0.32668449, 0.30204761, 0.00490984, 0.00411067, 0.00426914, 0.0027143 , 0.00221963]
# Define some mapping to sort out the labels for fewer cases
# All Labels
self.labellist = np.array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
43, 44, 45, 50, 51, 52, 53, 54, 58, 59, 60, 61, 62,
63, 64, 65, 66, 67, 68, 69, 71, 74, 75, 76, 77, 78,
81, 84, 85, 88, 91, 93, 95, 108, 109, 110, 113, 114, 120,
121, 123])
self.labellistglob = np.array([ 39, 49, 26, 48, 13, 48, 20, 8, 41, 51, 32, 13, 36, 20, 20, 38, 2,
30, 30, 40, 50, 34, 42, 18, 15, 10, 29, 19, 31, 43, 33, 13, 18, 45,
45, 7, 5, 33, 3, 4, 9, 9, 22, 4, 24, 50, 42, 21, 21, 21, 21,
21, 27, 27, 27, 21, 21, 27, 17, 21, 46, 1, 28, 37, 3, 16, 44, 44,
46, 6, 46, 46, 14, 14, 14, 11, 23, 4, 12, 27])
# 13 Labels
self.labellist13 = np.array([ 2, 4, 6, 7, 10, 13, 14, 15, 16, 18, 19, 21, 23,
34, 35, 53, 54, 58, 59, 60, 61, 62, 63, 64, 65, 66,
67, 68, 71, 74, 88, 93, 95, 123])
self.labellistglob13 = np.array([ 49, 48, 48, 20, 51, 36, 20, 20, 38, 30, 30, 50, 42, 45, 45, 50, 42,
21, 21, 21, 21, 21, 27, 27, 27, 21, 21, 27, 21, 46, 46, 46, 46, 27])
# 23 Labels
self.labellist23 = np.array([ 2, 3, 4, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16,
18, 19, 21, 22, 23, 26, 27, 34, 35, 44, 45, 53, 54,
58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 71, 74,
84, 85, 88, 93, 95, 108, 109, 110, 123])
self.labellistglob23 = np.array([49, 26, 48, 48, 20, 8, 41, 51, 32, 36, 20, 20, 38, 30, 30, 50, 34,
42, 10, 29, 45, 45, 9, 9, 50, 42, 21, 21, 21, 21, 21, 27, 27, 27,
21, 21, 27, 21, 46, 44, 44, 46, 46, 46, 14, 14, 14, 27])
if mode=="train" or self.mode=="train_from_train" or self.mode=="validation_from_train":
data_file = self.train_file
elif mode=="test":
data_file = self.test_file
if not os.path.exists(data_file):
print("haven't found " + data_file + " . Starting to preprocess the whole dataset...")
#self.process_data()
if not os.path.exists(data_file):
print("haven't found " + data_file + " . Starting to preprocess the whole dataset...")
#self.process_data()
self.hdf5dataloader = h5py.File(data_file, "r", rdcc_nbytes=1024**2*4000,rdcc_nslots=1e7)
self.nsamples = self.hdf5dataloader["data"].shape[0]
self.nfeatures = self.hdf5dataloader["data"].shape[2]
# get timestamps
if not os.path.exists( os.path.join(self.processed_folder, self.time_file)):
self.read_date_file()
self.timestamps = h5py.File(os.path.join(self.processed_folder, self.time_file), "r")["tt"][:]
assert(self.timestamps.size==self.hdf5dataloader["data"].shape[1])
self.features = self.hdf5dataloader["data"].shape[2]
#selective features and timestamps
self.step = step # skippage of timesteps
self.trunc = trunc # feature truncation
self.feature_trunc = trunc*self.nb**2
#self.mask = np.kron(np.hstack([np.ones(self.trunc),np.zeros( self.nfeatures//self.nb**2 - self.trunc)]), np.ones(9))
def process_data(self):
os.makedirs(self.processed_folder, exist_ok=True)
train_dataset = Dataset("data/SwissCrops/raw/train_set_24x24_debug.hdf5", mode='train', eval_mode=False)
raw_train_samples = len(train_dataset)
test_dataset = Dataset("data/SwissCrops/raw/train_set_24x24_debug.hdf5", mode='test', eval_mode=False)
raw_test_samples = len(test_dataset)
num_invalid_obs = 0
# Calculate the number of trainingsamples
raw_batch = (24 - int(self.nb/2)*2)**2
ntrainsamples = raw_batch * raw_train_samples
ntestsamples = raw_batch * raw_test_samples
# Inplement index aliasing to perform shuffling
trainindices = np.arange(ntrainsamples)
testindices = np.arange(ntestsamples)
## TRAIN DATASET ##
shuffle_chucks = 20 # 30: limit of 64GB RAM, 60: limit of 32GB RAM
splits = np.array_split(trainindices, shuffle_chucks)
# get measures
X, target, target_local_1, target_local_2, cloud_cover = train_dataset[0]
raw_features = X.shape[1]
nfeatures = raw_features* self.nb**2
seq_length = X.shape[0]
ntargetclasses = train_dataset.n_classes
ntargetclasses_l1 = train_dataset.n_classes_local_1
ntargetclasses_l2 = train_dataset.n_classes_local_2
# Open a hdf5 files and create arrays
hdf5_file_train = h5py.File(self.train_file , mode='w', rdcc_nbytes=1024**2*16000, rdcc_nslots=1e7, libver='latest')
hdf5_file_train.create_dataset("data", (ntrainsamples, seq_length, nfeatures), np.float16, chunks=(1500, seq_length, nfeatures) )
hdf5_file_train.create_dataset("mask", (ntrainsamples, seq_length, nfeatures), np.bool, chunks=(1500, seq_length, nfeatures) )
hdf5_file_train.create_dataset("labels", (ntrainsamples, ntargetclasses), np.int8, chunks=(1500, ntargetclasses ))
hdf5_file_train.create_dataset("labels_local1", (ntrainsamples, ntargetclasses_l1), np.int8, chunks=(1500, ntargetclasses_l1) )
hdf5_file_train.create_dataset("labels_local2", (ntrainsamples, ntargetclasses_l2), np.int8, chunks=(1500, ntargetclasses_l1) )
#prepare first splitblock
X_merge = np.zeros( (len(splits[0]), seq_length, nfeatures) , dtype=np.float16)
mask_merge = np.ones( (len(splits[0]), seq_length, nfeatures) , dtype=bool)
target_merge = np.ones( (len(splits[0]), ntargetclasses) , dtype=np.int8)
target_l1_merge = np.ones( (len(splits[0]), ntargetclasses_l1) , dtype=np.int8)
target_l2_merge = np.ones( (len(splits[0]), ntargetclasses_l2) , dtype=np.int8)
missing = 0
observed = 0
first_batch = True # will be changed after the first batch
accum_counter = 0
split_counter = 0
n_valid = 0
summation = np.zeros( (raw_features) )
sq_summation = np.zeros( (raw_features) )
#for idx in tqdm(range(raw_train_samples)):
for idx in tqdm(range(raw_train_samples)):
X, target, target_local_1, target_local_2, cloud_cover = train_dataset[idx]
# check if data can be cropped
cloud_mask = cloud_cover>self.cloud_thresh
invalid_obs = np.sum(cloud_mask,axis=0)==0
sub_shape = (self.nb, self.nb)
view_shape = tuple(np.subtract(invalid_obs.shape, sub_shape) + 1) + sub_shape
strides = invalid_obs.strides + invalid_obs.strides
sub_invalid = np.lib.stride_tricks.as_strided(invalid_obs,view_shape,strides)
# store the number of invalid observations
num_invalid_obs += np.sum ( (np.sum(sub_invalid, axis=(2,3))!=0) )
assert(num_invalid_obs==0)
# Prepare for running mean and std calculation
valid_ind = np.nonzero( (~cloud_mask)[:,np.newaxis] )
valid_data = X[valid_ind[0],:,valid_ind[2],valid_ind[3]]
summation += valid_data.sum(0)
sq_summation += (valid_data**2).sum(0)
n_valid += (valid_data**2).shape[0]
if self.normalize:
norm_data = (valid_data-self.means)/self.stds
X[valid_ind[0],:,valid_ind[2],valid_ind[3]] = norm_data
#prepare mask for later
sub_shape = (seq_length, self.nb, self.nb)
view_shape = tuple(np.subtract(cloud_mask.shape, sub_shape) + 1) + sub_shape
strides = cloud_mask.strides + cloud_mask.strides
sub_cloud = np.lib.stride_tricks.as_strided(cloud_mask,view_shape,strides)
ravel_mask = sub_cloud.reshape(raw_batch, seq_length, self.nb**2)
cloud_mask = np.tile(ravel_mask, (1,1, raw_features))
mask = ~cloud_mask
# Subtile the features
sub_shape = (seq_length, raw_features, self.nb, self.nb)
view_shape = tuple(np.subtract(X.shape, sub_shape) + 1) + sub_shape
strides = X.strides + X.strides
sub_X = np.lib.stride_tricks.as_strided(X,view_shape,strides)
ravel_X = sub_X.reshape(raw_batch, sub_X.shape[4], nfeatures )
# subconvolove Targets
sub_shape = (self.nb, self.nb)
view_shape = tuple(np.subtract(target.shape, sub_shape) + 1) + sub_shape
strides = target.strides + target.strides
sub_target = np.lib.stride_tricks.as_strided(target,view_shape,strides)
sub_target_local_1 = np.lib.stride_tricks.as_strided(target_local_1,view_shape,strides)
sub_target_local_2 = np.lib.stride_tricks.as_strided(target_local_2,view_shape,strides)
ravel_mask = sub_invalid.reshape(raw_batch, 1, self.nb**2)
ravel_target = sub_target[:,:,self.nb//2, self.nb//2].reshape(-1)
ravel_target_local_1 = sub_target_local_1[:,:,self.nb//2, self.nb//2].reshape(-1)
ravel_target_local_2 = sub_target_local_2[:,:,self.nb//2, self.nb//2].reshape(-1)[:]
# bring to one-hot format
OH_target = np.zeros((ravel_target.size, ntargetclasses))
OH_target[np.arange(ravel_target.size),ravel_target] = 1
OH_target_local_1 = np.zeros((ravel_target_local_1.size, ntargetclasses_l1))
OH_target_local_1[np.arange(ravel_target_local_1.size),ravel_target_local_1] = 1
OH_target_local_2 = np.zeros((ravel_target_local_2.size, ntargetclasses_l2))
OH_target_local_2[np.arange(ravel_target_local_2.size),ravel_target_local_2] = 1
# if only one pixel in a neighbourhood is corrupted, we don't use it=> set complete mask of this (sample, timestep) as unobserved
mask = np.tile( (mask.sum(2)==nfeatures)[:,:,np.newaxis] , (1,1,nfeatures))
# "destroy" data, that is corrputed by bad weather. We will never use it!
ravel_X[~mask] = 0
#for statistics
missing += np.sum(mask == 0.)
observed += np.sum(mask == 1.)
# Accummulate data before writing it to file
# fill in HDF5 file
if first_batch:
start_ix = 0
stop_ix = raw_batch
first_batch = False
else:
start_ix = stop_ix
stop_ix += raw_batch
if stop_ix<len(splits[split_counter]):
#write to memory file
X_merge[start_ix:stop_ix] = ravel_X
mask_merge[start_ix:stop_ix] = mask
target_merge[start_ix:stop_ix] = OH_target
target_l1_merge[start_ix:stop_ix] = OH_target_local_1
target_l2_merge[start_ix:stop_ix] = OH_target_local_2
else:
#Write to file, if merge is big enough
#determine th amount of overdose
overdose = stop_ix - len(splits[split_counter])
validdose = raw_batch - overdose
# add to memory only how much fits in it
X_merge[start_ix:] = ravel_X[:validdose]
mask_merge[start_ix:] = mask[:validdose]
target_merge[start_ix:] = OH_target[:validdose]
target_l1_merge[start_ix:] = OH_target_local_1[:validdose]
target_l2_merge[start_ix:] = OH_target_local_2[:validdose]
#shuffle the blocks
self.shuffle = True
if self.shuffle:
merge_ind = np.arange(len(splits[split_counter]))
np.random.shuffle(merge_ind)
X_merge_write = X_merge[merge_ind]
mask_merge_write = mask_merge[merge_ind]
target_merge_write = target_merge[merge_ind]
target_l1_merge_write = target_l1_merge[merge_ind]
target_l2_merge_write = target_l2_merge[merge_ind]
else:
X_merge_write = X_merge
mask_merge_write = mask_merge
target_merge_write = target_merge
target_l1_merge_write = target_l1_merge
target_l2_merge_write = target_l2_merge
#fill in data to hdf5 file
sorted_indices = splits[split_counter]
hdf5_file_train["data"][sorted_indices[0]:sorted_indices[-1]+1, ...] = X_merge_write
hdf5_file_train["mask"][sorted_indices[0]:sorted_indices[-1]+1, ...] = mask_merge_write
hdf5_file_train["labels"][sorted_indices[0]:sorted_indices[-1]+1, ...] = target_merge_write
hdf5_file_train["labels_local1"][sorted_indices[0]:sorted_indices[-1]+1, ...] = target_l1_merge_write
hdf5_file_train["labels_local2"][sorted_indices[0]:sorted_indices[-1]+1, ...] = target_l2_merge_write
accum_counter = 0
split_counter += 1
#prepare next merge variable
if split_counter<len(splits):
X_merge = np.zeros( (len(splits[split_counter]), seq_length, nfeatures) , dtype=np.float16)
mask_merge = np.ones( (len(splits[split_counter]), seq_length, nfeatures) , dtype=bool)
target_merge = np.ones( (len(splits[split_counter]), ntargetclasses) , dtype=np.int8)
target_l1_merge = np.ones( (len(splits[split_counter]), ntargetclasses_l1) , dtype=np.int8)
target_l2_merge = np.ones( (len(splits[split_counter]), ntargetclasses_l2) , dtype=np.int8)
# fill in the overdose from the current split/chunck
start_ix = 0
stop_ix = overdose
X_merge[start_ix:stop_ix] = ravel_X[validdose:]
mask_merge[start_ix:stop_ix] = mask[validdose:]
target_merge[start_ix:stop_ix] = OH_target[validdose:]
target_l1_merge[start_ix:stop_ix] = OH_target_local_1[validdose:]
target_l2_merge[start_ix:stop_ix] = OH_target_local_2[validdose:]
accum_counter += 1
print("found ", num_invalid_obs, " invalid Neighbourhood-Observations in training data")
assert(num_invalid_obs==0)
## TEST DATASET ##
shuffle_chucks = 25 #15 # 30: limit of 64GB RAM, 60: limit of 32GB RAM
splits = np.array_split(testindices, shuffle_chucks)
hdf5_file_test = h5py.File(self.test_file, mode='w', rdcc_nbytes =1024**2*24000, rdcc_nslots=1e7, libver='latest')
hdf5_file_test.create_dataset("data", (ntestsamples, seq_length, nfeatures), np.float16, chunks=(10000, seq_length, nfeatures) )
hdf5_file_test.create_dataset("mask", (ntestsamples, seq_length, nfeatures), np.bool, chunks=(10000, seq_length, nfeatures) )
hdf5_file_test.create_dataset("labels", (ntestsamples, ntargetclasses), np.int8, chunks=(10000, ntargetclasses) )
hdf5_file_test.create_dataset("labels_local1", (ntestsamples, ntargetclasses_l1), np.int8, chunks=(10000, ntargetclasses_l1) )
hdf5_file_test.create_dataset("labels_local2", (ntestsamples, ntargetclasses_l2), np.int8, chunks=(10000, ntargetclasses_l2) )
#prepare first splitblock
X_merge = np.zeros( (len(splits[0]), seq_length, nfeatures) , dtype=np.float16)
mask_merge = np.ones( (len(splits[0]), seq_length, nfeatures) , dtype=bool)
target_merge = np.ones( (len(splits[0]), ntargetclasses) , dtype=np.int8)
target_l1_merge = np.ones( (len(splits[0]), ntargetclasses_l1) , dtype=np.int8)
target_l2_merge = np.ones( (len(splits[0]), ntargetclasses_l2) , dtype=np.int8)
missing = 0
observed = 0
first_batch = True # will be changed after the first batch
accum_counter = 0
split_counter = 0
#for idx in tqdm(range(raw_test_samples)):
for idx in tqdm(range(raw_test_samples)):
X, target, target_local_1, target_local_2, cloud_cover = test_dataset[idx]
# check if data can be cropped
cloud_mask = cloud_cover>self.cloud_thresh
invalid_obs = np.sum(cloud_mask,axis=0)==0
sub_shape = (self.nb, self.nb)
view_shape = tuple(np.subtract(invalid_obs.shape, sub_shape) + 1) + sub_shape
strides = invalid_obs.strides + invalid_obs.strides
sub_invalid = np.lib.stride_tricks.as_strided(invalid_obs,view_shape,strides)
# store the number of invalid observations
num_invalid_obs += np.sum ( (np.sum(sub_invalid, axis=(2,3))!=0) )
assert(num_invalid_obs==0)
# Prepare for running mean and std calculation
valid_ind = np.nonzero( (~cloud_mask)[:,np.newaxis] )
valid_data = X[valid_ind[0],:,valid_ind[2],valid_ind[3]]
if self.normalize:
norm_data = (valid_data-self.means)/self.stds
X[valid_ind[0],:,valid_ind[2],valid_ind[3]] = norm_data
#prepare mask for later
sub_shape = (seq_length, self.nb, self.nb)
view_shape = tuple(np.subtract(cloud_mask.shape, sub_shape) + 1) + sub_shape
strides = cloud_mask.strides + cloud_mask.strides
sub_cloud = np.lib.stride_tricks.as_strided(cloud_mask,view_shape,strides)
ravel_mask = sub_cloud.reshape(raw_batch, seq_length, self.nb**2)
cloud_mask = np.tile(ravel_mask, (1,1, raw_features))
mask = ~cloud_mask
# Subtile the features
sub_shape = (seq_length, raw_features, self.nb, self.nb)
view_shape = tuple(np.subtract(X.shape, sub_shape) + 1) + sub_shape
strides = X.strides + X.strides
sub_X = np.lib.stride_tricks.as_strided(X,view_shape,strides)
ravel_X = sub_X.reshape(raw_batch, sub_X.shape[4], nfeatures )
# subconvolove Targets
sub_shape = (self.nb, self.nb)
view_shape = tuple(np.subtract(target.shape, sub_shape) + 1) + sub_shape
strides = target.strides + target.strides
sub_target = np.lib.stride_tricks.as_strided(target,view_shape,strides)
sub_target_local_1 = np.lib.stride_tricks.as_strided(target_local_1,view_shape,strides)
sub_target_local_2 = np.lib.stride_tricks.as_strided(target_local_2,view_shape,strides)
ravel_mask = sub_invalid.reshape(raw_batch, 1, self.nb**2)
ravel_target = sub_target[:,:,self.nb//2, self.nb//2].reshape(-1)
ravel_target_local_1 = sub_target_local_1[:,:,self.nb//2, self.nb//2].reshape(-1)
ravel_target_local_2 = sub_target_local_2[:,:,self.nb//2, self.nb//2].reshape(-1)[:]
# bring to one-hot format
OH_target =
|
np.zeros((ravel_target.size, ntargetclasses))
|
numpy.zeros
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2017, <NAME>; Luczywo, Nadia
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# =============================================================================
# DOCS
# =============================================================================
"""Methods based on an aggregating function representing
“closeness to the ideal”.
"""
__all__ = ['TOPSIS']
# =============================================================================
# IMPORTS
# =============================================================================
import numpy as np
from .. import rank
from ..validate import MIN, MAX
from ..utils.doc_inherit import doc_inherit
from ._dmaker import DecisionMaker
# =============================================================================
# Function
# =============================================================================
def topsis(nmtx, ncriteria, nweights):
# apply weights
wmtx =
|
np.multiply(nmtx, nweights)
|
numpy.multiply
|
#!/usr/bin/env python3
#
from __future__ import division, print_function
import os
import sys
import pints
import numpy as np
import myokit
import argparse
if __name__=="__main__":
import platform
parallel = True
if platform.system() == 'Darwin':
import multiprocessing
multiprocessing.set_start_method('fork')
elif platform.system() == 'Windows':
parallel = False
# Check input arguments
parser = argparse.ArgumentParser(
description='Fit all the hERG models to sine wave data')
parser.add_argument('--cell', type=int, default=2, metavar='N',
help='repeat number : 1, 2, 3, 4, 5, 6')
parser.add_argument('--model', type=str, default='wang', metavar='N',
help='which model to use')
parser.add_argument('--repeats', type=int, default=25, metavar='N',
help='number of CMA-ES runs from different initial guesses')
parser.add_argument('--protocol', type=int, default=1, metavar='N',
help='which protocol is used to fit the data: 1 for staircase #1, \
2 for sine wave, 3 for complex AP')
parser.add_argument("--big_pop_size", action='store_true', default=False,
help="whether to use big population size of 100 rather than default")
args = parser.parse_args()
cell = args.cell
# Load project modules
sys.path.append(os.path.abspath(os.path.join('python')))
import priors
import cells
import transformation
import data
import model
# Get model string and params
if args.model == 'mazhari':
model_str = 'Mazhari'
x_found = np.loadtxt('cmaesfits/parameter-sets/mazhari-params.txt', unpack=True)
elif args.model == 'mazhari-reduced':
model_str = 'Maz-red'
x_found = np.loadtxt('cmaesfits/parameter-sets/mazhari-reduced-params.txt', unpack=True)
elif args.model == 'wang':
model_str = 'Wang'
x_found = np.loadtxt('cmaesfits/parameter-sets/wang-params.txt', unpack=True)
elif args.model == 'wang-r1':
model_str = 'Wang-r1'
x_found = np.loadtxt('cmaesfits/parameter-sets/wang-r1-params.txt', unpack=True)
for i in {0, 2, 4, 5, 6, 8, 13}:
x_found[i] = np.exp(x_found[i])
elif args.model == 'wang-r2':
model_str = 'Wang-r2'
x_found =
|
np.loadtxt('cmaesfits/parameter-sets/wang-r2-params.txt', unpack=True)
|
numpy.loadtxt
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 20 15:12:49 2016
@author: uzivatel
"""
import numpy as np
import timeit
from multiprocessing import Pool, cpu_count
from functools import partial
from sys import platform
import scipy
from copy import deepcopy
from ..qch_functions import overlap_STO, dipole_STO, quadrupole_STO, norm_GTO
from ..positioningTools import fill_basis_transf_matrix
from .general import Coordinate
# nepocitat self.exct_spec[exct_i]['coeff_mat']=M_mat pokud neni nutne - zdrzuje
def _ao_over_line(Coor_lin,Coeffs_lin,Exp_lin,Orient_lin,indx):
'''
This Function calculates row in overlap matrix
'''
# print('Start of parallel calculation witj process id:',os.getpid())
NAO=len(Coor_lin)
AO_over_row=np.zeros(NAO)
for ii in range(indx,NAO):
AO_over_row[ii]=overlap_STO(Coor_lin[indx],Coor_lin[ii],np.array(Coeffs_lin[indx]),np.array(Coeffs_lin[ii]),np.array(Exp_lin[indx]),np.array(Exp_lin[ii]),np.array(Orient_lin[indx]),np.array(Orient_lin[ii]))
# print(os.getpid())
return AO_over_row
def _dip_over_line(Coor_lin,Coeffs_lin,Exp_lin,Orient_lin,indx):
'''
This Function calculates row in dipole matrix
'''
NAO=len(Coor_lin)
Dip_over_row=np.zeros((NAO,3))
for ii in range(indx,NAO):
Dip_over_row[ii,:]=dipole_STO(Coor_lin[indx],Coor_lin[ii],np.array(Coeffs_lin[indx]),np.array(Coeffs_lin[ii]),np.array(Exp_lin[indx]),np.array(Exp_lin[ii]),np.array(Orient_lin[indx]),np.array(Orient_lin[ii]))
return Dip_over_row
def _quad_over_line(Coor_lin,Coeffs_lin,Exp_lin,Orient_lin,only_triangle,indx):
'''
This Function calculates row in quadrupole matrix
'''
NAO=len(Coor_lin)
Quad_over_row=np.zeros((NAO,6))
if only_triangle:
start=indx
else:
start=0
for ii in range(start,NAO):
Rik=Coor_lin[ii]-Coor_lin[indx]
R0=np.zeros(3)
Quad_over_row[ii,:]=quadrupole_STO(R0,Rik,np.array(Coeffs_lin[indx]),np.array(Coeffs_lin[ii]),np.array(Exp_lin[indx]),np.array(Exp_lin[ii]),np.array(Orient_lin[indx]),np.array(Orient_lin[ii]))
return Quad_over_row
class Atom:
''' Class containing atom type and index(position in structure)
type : string
Atom type e.g. 'C' or 'H'...
indx : integer
Position of atom in structure class. !Starting from 0!
'''
def __init__(self,typ,indx):
self.type=typ
self.indx=int(indx)
class AO:
''' Class containing all information about atomic orbitals
name : string
Name of the atomic basis e.g. 6-31G*,..
coeff : list of numpy.arrays of real
For every atomic orbital contains expansion coefficients of STO orbital
into GTO (more expained in Notes)
exp : list of numpy.arrays of real
For every atomic orbital contains exponents for GTO obitals in expansion
of STO (more explaind in Notes)
coor : Coordinate class - position units managed
Information about center of every atomic orbital. Units are coor.units.
Dimension of coor.value is (dimension Norbitals x 3)
type : list of string and integer (dimension Norbitals x 2)
Orbital types for every orbital (for example ``'s', 'p', 'd', '5d', 'f', '7f'``)
atom : list of Atom class (dimesion Norbitals)
For every orbital there is a list with atom information. (atom[i].indx
index of the atom in structure class, atom[i].type string with atom type)
nao : integer
Number of orbitals
nao_orient : integer
number of atomic orbitals with orientation (for 's' type orbital there
is only one orientation, for 'p' orbital there are 3 orientaions - x,y
and z and so on for oter orbitals) = total number of atomic orbital
basis functions
orient : list (dimension Norbitals)
For every atomic orbital type there is a list with possible atomic orbital
spatial orientations (e.g. one possible orientation could be for f orbital
[2,0,1] which correspond to X^2Z spatial orinetation or for d orbital [0,1,1]
correspond to YZ spatial orientation)
indx_orient : list (dimension Norbitals_orient)
For every spatialy oriented orbital there is a list with a number of atomic
orbitals to which this orientation corresponds, at the fisrt position and
orientation of the orbital at the second position. (e.g [2, [0, 1, 0]]
which means orientation in y direction of third orbital (numbering from 0)
which is p type orbital (summ of all numbers in orient. is 1))
overlap : numpy.array of real (dimension N_AO_orient x N_AO_orient)
Overlap matrix between AO: overlap[i,j]=<AO_i|AO_j>
dipole : dictionary
* **dipole['Dip_X']** = numpy.array of real (dimension N_AO_orient x N_AO_orient)
with dipole x coordinate in AO basis: dipole['Dip_X'][i,j]=<AO_i|x|AO_j>
* **dipole['Dip_Y']** = numpy.array of real (dimension N_AO_orient x N_AO_orient)
with dipole y coordinate in AO basis: dipole['Dip_Y'][i,j]=<AO_i|y|AO_j>
* **dipole['Dip_Z']** = numpy.array of real (dimension N_AO_orient x N_AO_orient)
with dipole z coordinate in AO basis: dipole['Dip_Z'][i,j]=<AO_i|z|AO_j>
grid : list of numpy arrays of float (dimension Nao x Grid_Nx x Grid_Ny x Grid_Nz)
Slater orbital basis evaluated on given grid
quadrupole : numpy.array of real (dimension 6 x N_AO_orient x N_AO_orient)
quadrupole components in AO basis:
* quadrupole[0,:,:] = quadrupole xx matrix <AO_i|x^2|AO_j>
* quadrupole[1,:,:] = quadrupole xy matrix <AO_i|xy|AO_j>
* quadrupole[2,:,:] = quadrupole xz matrix <AO_i|xz|AO_j>
* quadrupole[3,:,:] = quadrupole yy matrix <AO_i|yy|AO_j>
* quadrupole[4,:,:] = quadrupole yz matrix <AO_i|yz|AO_j>
* quadrupole[5,:,:] = quadrupole zz matrix <AO_i|zz|AO_j>
Functions
-----------
add_orbital :
Add atomic orbital including expansion coefficients into gaussian orbitals
coordinates, type, atom on which is centered.
rotate :
Rotate the atomic orbitals and all aditional quantities by specified
angles in radians in positive direction.
rotate_1 :
Inverse totation to rotate
move :
Moves the atomic orbitals and all aditional quantities along specified
vector
copy :
Create 1 to 1 copy of the atomic orbitals with all classes and types.
get_overlap :
Calculate overlap matrix between atomic orbitals
import_multiwfn_overlap :
Imports the overlap matrix from multiwfn output
get_dipole_matrix :
Calculate dipoles between each pair of atomic orbitals and outputs it
as matrix
get_quadrupole :
Calculate quadrupole moments between each pair of atomic orbitals and
outputs it as matrix
get_slater_ao_grid :
Evaluate slater atomic orbital on given grid
get_all_slater_grid
Evalueate all slater orbitals on given grid (create slater orbital basis
on given grid)
Notes
----------
Expansion of STO (slater orbital) into GTO (gaussian orbital) bassis
is defined as: STO=Sum(coef*GTO(r,exp)*NormGTO(r,exp)) where r is center of
the orbital (position of the corresponding atom)
'''
def __init__(self):
self.name='AO-basis'
self.coeff=[]
self.exp=[]
self.coor=None
self.type=[]
self.atom=[]
self.nao=0
self.nao_orient=0
self.orient=[]
self.indx_orient=[]
self.init=False
self.overlap=None
self.dipole=None
self.quadrupole=None
self.grid=None
def add_orbital(self,coeffs,exps,coor,orbit_type,atom_indx,atom_type):
""" Adds atomic orbital including all needed informations
Parameters
----------
coeffs : numpy array or list of floats
Expansion coefficients of the slater atomic orbital into gaussian
atomic orbitals
exps : numpy array or list of floats
Exponents of gaussian orbitals in expansion of the slater atomic
orbital.
coor : Coordinate class
Centers of atomic orbitals (position units managed)
orbit_type : string
Type of the orbital e.g. 's','d','5d'...
atom_indx : integer
index of atom on which orbital is centered
atom_type : string
Atom type on which orbital is cenetered e.g. 'C','H',...
"""
if type(atom_type)==list or type(atom_type)==np.ndarray:
if not self.init:
if type(coeffs)==np.ndarray:
self.coeff=list(coeffs) # it should be list of numpy arrays
elif type(coeffs)==list:
self.coeff=coeffs.copy()
else:
raise IOError('Imput expansion coefficients of AO should be list of numpy arrays or numpy array')
if type(exps)==np.ndarray:
self.exp=list(exps) # it should be list of numpy arrays
elif type(coeffs)==list:
self.exp=exps.copy()
else:
raise IOError('Imput expansion coefficients of AO should be list of numpy arrays or numpy array')
self.coor=Coordinate(coor) # assuming that we all arbital coordinates will be imputed in Bohrs
self.type=orbit_type
if type(atom_indx)==list:
for ii in range(len(atom_indx)):
self.atom.append(Atom(atom_type[ii],atom_indx[ii]))
else:
self.atom.append(Atom(atom_type,atom_indx))
self.nao=len(orbit_type)
for ii in range(len(self.type)):
orient=l_orient(self.type[ii])
self.orient.append(orient)
self.nao_orient+=len(orient)
for jj in range(len(orient)):
self.indx_orient.append([ii,orient[jj]])
self.init=True
else:
self.coor.add_coor(coor)
for ii in range(len(orbit_type)):
self.coeff.append(np.array(coeffs[ii],dtype='f8'))
self.exp.append(np.array(exps[ii],dtype='f8'))
self.type.append(orbit_type[ii])
self.atom.append(Atom(atom_type[ii],int(atom_indx[ii])))
self.nao+=1
orient=l_orient(orbit_type[ii])
self.orient.append(orient)
self.nao_orient+=len(orient)
for jj in range(len(orient)):
self.indx_orient.append([self.nao-1,orient[jj]])
else:
if not self.init:
self.coor=Coordinate(coor)
else:
self.coor.add_coor(coor)
self.coeff.append(np.array(coeffs,dtype='f8'))
self.exp.append(np.array(exps,dtype='f8'))
self.type.append(orbit_type)
self.atom.append(Atom(atom_type,atom_indx))
self.nao+=1
orient=l_orient(orbit_type[0])
self.orient.append(orient)
self.nao_orient+=len(orient)
for jj in range(len(orient)):
self.indx_orient.append([self.nao-1,orient[jj]])
self.init=True
def get_overlap(self,nt=0,verbose=False):
""" Calculate overlap matrix between atomic orbitals
Parameters
----------
nt : integer (optional init = 0)
Specifies how many cores should be used for the calculation.
Secial cases:
``nt=0`` all available cores are used for the calculation.
``nt=1`` serial calculation is performed.
verbose : logical (optional init = False)
If ``True`` information about time needed for overlap calculation
will be printed
Notes
---------
Overlap matrix is stored in:\n
**self.overlap** \n
as numpy array of float dimension (Nao_orient x Nao_orient)
"""
# Toto by chtelo urcite zefektivnit pomoci np
if (platform=='cygwin' or platform=="linux" or platform == "linux2") and nt!=1 and nt>=0:
typ='paralell'
elif platform=='win32' or nt==1:
typ='seriall'
else:
typ='seriall_old'
if typ=='seriall' or typ=='paralell':
SS=np.zeros((self.nao_orient,self.nao_orient),dtype='f8')
start_time = timeit.default_timer()
''' Convert all imput parameters into matrix which has dimension Nao_orient '''
# prepare imput
Coor_lin=np.zeros((self.nao_orient,3))
Coeffs_lin=[]
Exp_lin=[]
Orient_lin=[]
counter1=0
for ii in range(self.nao):
for jj in range(len(self.orient[ii])):
Coor_lin[counter1]=self.coor._value[ii]
Coeffs_lin.append(self.coeff[ii])
Exp_lin.append(self.exp[ii])
Orient_lin.append(self.orient[ii][jj])
counter1+=1
ao_over_line_partial = partial(_ao_over_line, Coor_lin,Coeffs_lin,Exp_lin,Orient_lin)
# Only parameter of this function is number of row whih is calculated
elif typ=='seriall_old':
SS=np.zeros((self.nao_orient,self.nao_orient),dtype='f8')
counter1=0
start_time = timeit.default_timer()
percent=0
for ii in range(self.nao):
for jj in range(len(self.orient[ii])):
counter2=0
for kk in range(self.nao):
for ll in range(len(self.orient[kk])):
if counter1>=counter2:
SS[counter1,counter2] = overlap_STO(self.coor._value[ii],self.coor._value[kk],self.coeff[ii],self.coeff[kk],self.exp[ii],self.exp[kk],self.orient[ii][jj],self.orient[kk][ll])
counter2 += 1
counter1 += 1
elapsed = timeit.default_timer() - start_time
if elapsed>60.0:
if percent!=(counter1*10//self.nao_orient)*10:
if percent==0:
print('Overlap matrix calculation progres:')
percent=(counter1*10//self.nao_orient)*10
print(percent,'% ',sep="",end="")
if verbose:
print('Elapsed time for serial overlap matrix allocation:',elapsed)
for ii in range(self.nao_orient):
for jj in range(ii+1,self.nao_orient):
SS[ii,jj]=SS[jj,ii]
if verbose:
print(' ')
self.overlap=np.copy(SS)
if typ=='paralell':
''' Parallel part '''
# print('Prepairing parallel calculation')
if nt>0:
pool = Pool(processes=nt)
else:
pool = Pool(processes=cpu_count())
index_list=range(self.nao_orient)
SS= np.array(pool.map(ao_over_line_partial,index_list))
pool.close() # ATTENTION HERE
pool.join()
elif typ=='seriall':
index_list=range(self.nao_orient)
SS=np.zeros((self.nao_orient,self.nao_orient))
''' Seriall part '''
for ii in range(self.nao_orient):
SS[ii,:]=ao_over_line_partial(index_list[ii])
''' Fill the lower triangle of overlap matrix'''
for ii in range(self.nao_orient):
for jj in range(ii):
SS[ii,jj]=SS[jj,ii]
elapsed = timeit.default_timer() - start_time
if verbose:
if typ=='paralell':
print('Elapsed time for parallel overlap matrix allocation:',elapsed)
elif typ=='seriall':
print('Elapsed time for seriall overlap matrix allocation:',elapsed)
self.overlap=np.copy(SS)
# TODO: include multiwfn script for generation of overlap matrix
def import_multiwfn_overlap(self,filename):
""" Import overlap matrix from multiwfn output
Parameters
----------
filename : string
Name of the input file including the path if needed. (output file
from multiwfn calculation)
Notes
---------
Overlap matrix is stored in:\n
**self.overlap** \n
as numpy array of float dimension (Nao_orient x Nao_orient)
"""
fid = open(filename,'r') # Open the file
flines = fid.readlines() # Read the WHOLE file into RAM
fid.close()
counter=0
Norb=self.nao_orient
SS_inp=np.zeros((Norb,Norb))
for jj in range(Norb//5+1):
for ii in range(5*jj,Norb+1):
if ii!=5*jj:
line = flines[counter]
thisline = line.split()
for kk in range(5):
if kk+5*jj+1<=ii:
if 'D' in thisline[kk+1]:
SS_inp[ii-1,kk+5*jj]=float(thisline[kk+1].replace('D', 'e'))
else:
SS_inp[ii-1,kk+5*jj]=float(thisline[kk+1][:-4]+'e'+thisline[kk+1][-4:])
#print(thisline[kk+1],SS_inp[ii-1,kk+5*jj])
#print(5*jj,ii-1,flines[counter])
counter+=1
for ii in range(Norb):
for jj in range(ii+1,Norb):
SS_inp[ii,jj]=SS_inp[jj,ii]
self.overlap=SS_inp
def get_dipole_matrix(self,nt=0,verbose=False):
""" Calculate dipole matrix between atomic orbitals
Parameters
----------
nt : integer (optional init = 0)
Specifies how many cores should be used for the calculation.
Secial cases:
``nt=0`` all available cores are used for the calculation.
``nt=1`` serial calculation is performed.
verbose : logical (optional init = False)
If ``True`` information about time needed for overlap calculation
will be printed
Notes
---------
Dipole matrix is stored in:\n
**self.dipole** \n
as dictionary of numpy arrays of float dimension (Nao_orient x Nao_orient).
Dictionary has 3 keys: 'Dip_X', 'Dip_Y', 'Dip_Z'. More information can
be found in class documentation
"""
# select platform
if (platform=='cygwin' or platform=="linux" or platform == "linux2") and nt!=1 and nt>=0:
typ='paralell'
elif platform=='win32' or nt==1:
typ='seriall'
else:
typ='seriall_old'
start_time = timeit.default_timer()
SS_dipX=np.zeros((self.nao_orient,self.nao_orient),dtype='f8')
SS_dipY=np.zeros((self.nao_orient,self.nao_orient),dtype='f8')
SS_dipZ=np.zeros((self.nao_orient,self.nao_orient),dtype='f8')
if typ=='seriall' or typ=='paralell':
''' Convert all imput parameters into matrix which has dimension Nao_orient '''
# prepare imput
Coor_lin=np.zeros((self.nao_orient,3))
Coeffs_lin=[]
Exp_lin=[]
Orient_lin=[]
counter1=0
for ii in range(self.nao):
for jj in range(len(self.orient[ii])):
Coor_lin[counter1]=self.coor._value[ii]
Coeffs_lin.append(self.coeff[ii])
Exp_lin.append(self.exp[ii])
Orient_lin.append(self.orient[ii][jj])
counter1+=1
dip_over_line_partial = partial(_dip_over_line, Coor_lin,Coeffs_lin,Exp_lin,Orient_lin)
# Only parameter of this function is number of row whih is calculated
else:
counter1=0
for ii in range(self.nao):
for jj in range(len(self.orient[ii])):
counter2=0
for kk in range(self.nao):
for ll in range(len(self.orient[kk])):
if counter1>=counter2:
dipole=dipole_STO(self.coor._value[ii],self.coor._value[kk],self.coeff[ii],self.coeff[kk],self.exp[ii],self.exp[kk],self.orient[ii][jj],self.orient[kk][ll])
SS_dipX[counter1,counter2] = dipole[0]
SS_dipY[counter1,counter2] = dipole[1]
SS_dipZ[counter1,counter2] = dipole[2]
counter2 += 1
counter1 += 1
for ii in range(self.nao_orient):
for jj in range(ii+1,self.nao_orient):
SS_dipX[ii,jj]=SS_dipX[jj,ii]
SS_dipY[ii,jj]=SS_dipY[jj,ii]
SS_dipZ[ii,jj]=SS_dipZ[jj,ii]
elapsed = timeit.default_timer() - start_time
if verbose:
print('Elapsed time slater dipole matrix allocation', elapsed)
self.dipole={}
self.dipole['Dip_X']=np.copy(SS_dipX)
self.dipole['Dip_Y']=np.copy(SS_dipY)
self.dipole['Dip_Z']=np.copy(SS_dipZ)
if typ=='paralell':
''' Parallel part '''
# print('Prepairing parallel calculation')
if nt>0:
pool = Pool(processes=nt)
else:
pool = Pool(processes=cpu_count())
index_list=range(self.nao_orient)
DipMat= np.array(pool.map(dip_over_line_partial,index_list))
pool.close() # ATTENTION HERE
pool.join()
elif typ=='seriall':
index_list=range(self.nao_orient)
DipMat=np.zeros((self.nao_orient,self.nao_orient,3))
''' Seriall part '''
for ii in range(self.nao_orient):
DipMat[ii,:]=dip_over_line_partial(index_list[ii])
if typ=='seriall' or typ=='paralell':
''' Fill the lower triangle of overlap matrix'''
for ii in range(self.nao_orient):
for jj in range(ii):
DipMat[ii,jj,:]=DipMat[jj,ii,:]
elapsed = timeit.default_timer() - start_time
if verbose:
if typ=='paralell':
print('Elapsed time for parallel slater dipole matrix allocation:',elapsed)
elif typ=='seriall':
print('Elapsed time for seriall slater dipole matrix allocation:',elapsed)
if typ=='seriall' or typ=='paralell':
self.dipole={}
self.dipole['Dip_X']=np.zeros((self.nao_orient,self.nao_orient))
self.dipole['Dip_Y']=np.zeros((self.nao_orient,self.nao_orient))
self.dipole['Dip_Z']=np.zeros((self.nao_orient,self.nao_orient))
self.dipole['Dip_X'][:,:]=np.copy(DipMat[:,:,0])
self.dipole['Dip_Y'][:,:]=np.copy(DipMat[:,:,1])
self.dipole['Dip_Z'][:,:]=np.copy(DipMat[:,:,2])
def get_quadrupole(self,nt=0,verbose=False):
""" Calculate quadrupole matrix between atomic orbitals
Parameters
----------
nt : integer (optional init = 0)
Specifies how many cores should be used for the calculation.
Secial cases:
``nt=0`` all available cores are used for the calculation.
``nt=1`` serial calculation is performed.
verbose : logical (optional init = False)
If ``True`` information about time needed for overlap calculation
will be printed
Notes
---------
Dipole matrix is stored in:\n
**self.quadrupole** \n
as numpy array of float dimension (6 x Nao_orient x Nao_orient) and
ordering of quadrupole moments is: xx, xy, xz, yy, yz, zz \n \n
quadrupoles are defined as ``Qij(mu,nu) = \int{AO_mu(r+R_mu)*ri*rj*AO_nu(r+R_mu)}``\n
The AO_mu is shifted to zero making the quadrupoles independent to coordinate shifts
"""
QuadMat=np.zeros((6,self.nao_orient,self.nao_orient),dtype='f8')
start_time = timeit.default_timer()
do_faster=False
if (self.dipole is not None) and (self.overlap is not None):
do_faster=True
# choose platform for calculation
if (platform=='cygwin' or platform=="linux" or platform == "linux2") and nt!=1 and nt>=0:
typ='paralell'
elif platform=='win32' or nt==1:
typ='seriall'
else:
typ='seriall_old'
if typ=='seriall' or typ=='paralell':
''' Convert all imput parameters into matrix which has dimension Nao_orient '''
# prepare imput
Coor_lin=np.zeros((self.nao_orient,3))
Coeffs_lin=[]
Exp_lin=[]
Orient_lin=[]
counter1=0
for ii in range(self.nao):
for jj in range(len(self.orient[ii])):
Coor_lin[counter1]=self.coor._value[ii]
Coeffs_lin.append(self.coeff[ii])
Exp_lin.append(self.exp[ii])
Orient_lin.append(self.orient[ii][jj])
counter1+=1
quad_over_line_partial = partial(_quad_over_line, Coor_lin,Coeffs_lin,Exp_lin,Orient_lin,do_faster)
# Only parameter of this function is number of row whih is calculated
else:
counter1=0
for ii in range(self.nao):
for jj in range(len(self.orient[ii])):
counter2=0
for kk in range(self.nao):
for ll in range(len(self.orient[kk])):
Rik=self.coor._value[kk]-self.coor._value[ii]
R0=np.zeros(3)
QuadMat[:,counter1,counter2]=quadrupole_STO(R0,Rik,self.coeff[ii],self.coeff[kk],self.exp[ii],self.exp[kk],self.orient[ii][jj],self.orient[kk][ll])
counter2 += 1
counter1 += 1
elapsed = timeit.default_timer() - start_time
print('Elapsed time for slater quadrupole matrix allocation:',elapsed)
if typ=='paralell':
''' Parallel part '''
# print('Prepairing parallel calculation')
if nt>0:
pool = Pool(processes=nt)
else:
pool = Pool(processes=cpu_count())
index_list=range(self.nao_orient)
QuadMat_tmp= np.array(pool.map(quad_over_line_partial,index_list))
pool.close() # ATTENTION HERE
pool.join()
elif typ=='seriall':
index_list=range(self.nao_orient)
''' Seriall part '''
for ii in range(self.nao_orient):
QuadMat[:,ii,:]=np.swapaxes(quad_over_line_partial(index_list[ii]),0,1)
''' Fill the lower triangle of overlap matrix'''
if typ=='seriall' and do_faster:
for ii in range(self.nao_orient):
for jj in range(ii):
counter=0
Rji=self.coor._value[self.indx_orient[ii][0]]-self.coor._value[self.indx_orient[jj][0]]
Rj=self.coor._value[self.indx_orient[jj][0]]
Dji=np.array([self.dipole['Dip_X'][jj,ii],self.dipole['Dip_Y'][jj,ii],self.dipole['Dip_Z'][jj,ii]])
SSji=self.overlap[jj,ii]
for kk in range(3):
for ll in range(kk,3):
QuadMat[counter,ii,jj]=QuadMat[counter,jj,ii]-Rji[kk]*Dji[ll]-Rji[ll]*Dji[kk]+(Rj[kk]*Rji[ll]+Rj[ll]*Rji[kk]+Rji[kk]*Rji[ll])*SSji
counter+=1
if typ=='paralell' and do_faster:
for ii in range(self.nao_orient):
for jj in range(ii):
counter=0
Rji=self.coor._value[self.indx_orient[ii][0]]-self.coor._value[self.indx_orient[jj][0]]
Rj=self.coor._value[self.indx_orient[jj][0]]
Dji=np.array([self.dipole['Dip_X'][jj,ii],self.dipole['Dip_Y'][jj,ii],self.dipole['Dip_Z'][jj,ii]])
SSji=self.overlap[jj,ii]
for kk in range(3):
for ll in range(kk,3):
QuadMat_tmp[ii,jj,counter]=QuadMat_tmp[jj,ii,counter]-Rji[kk]*Dji[ll]-Rji[ll]*Dji[kk]+(Rj[kk]*Rji[ll]+Rj[ll]*Rji[kk]+Rji[kk]*Rji[ll])*SSji
counter+=1
QuadMat=np.copy(np.swapaxes(QuadMat_tmp,0,2))
QuadMat=np.copy(np.swapaxes(QuadMat,1,2))
elapsed = timeit.default_timer() - start_time
if verbose:
if typ=='paralell':
print('Elapsed time for parallel slater quadrupole matrix allocation:',elapsed)
elif typ=='seriall':
print('Elapsed time for seriall slater quadrupoele matrix allocation:',elapsed)
self.quadrupole=np.copy(QuadMat)
# def get_quadrupole_old(self,nt=0,verbose=False):
# """ Calculate quadrupole matrix between atomic orbitals
#
# Parameters
# ----------
# nt : integer (optional init = 0)
# Specifies how many cores should be used for the calculation.
# Secial cases:
# ``nt=0`` all available cores are used for the calculation.
# ``nt=1`` serial calculation is performed.
# verbose : logical (optional init = False)
# If ``True`` information about time needed for overlap calculation
# will be printed
#
# Notes
# ---------
# Dipole matrix is stored in:\n
# **self.quadrupole** \n
# as numpy array of float dimension (6 x Nao_orient x Nao_orient) and
# ordering of quadrupole moments is: xx, xy, xz, yy, yz, zz
#
# """
#
# QuadMat=np.zeros((6,self.nao_orient,self.nao_orient),dtype='f8')
# start_time = timeit.default_timer()
#
# do_faster=False
# if (self.dipole is not None) and (self.overlap is not None):
# do_faster=True
#
# # choose platform for calculation
# if (platform=='cygwin' or platform=="linux" or platform == "linux2") and nt!=1 and nt>=0:
# typ='paralell'
# elif platform=='win32' or nt==1:
# typ='seriall'
# else:
# typ='seriall_old'
#
# if typ=='seriall' or typ=='paralell':
# ''' Convert all imput parameters into matrix which has dimension Nao_orient '''
# # prepare imput
# Coor_lin=np.zeros((self.nao_orient,3))
# Coeffs_lin=[]
# Exp_lin=[]
# Orient_lin=[]
# counter1=0
# for ii in range(self.nao):
# for jj in range(len(self.orient[ii])):
# Coor_lin[counter1]=self.coor._value[ii]
# Coeffs_lin.append(self.coeff[ii])
# Exp_lin.append(self.exp[ii])
# Orient_lin.append(self.orient[ii][jj])
# counter1+=1
#
# quad_over_line_partial = partial(_quad_over_line, Coor_lin,Coeffs_lin,Exp_lin,Orient_lin,do_faster)
# # Only parameter of this function is number of row whih is calculated
# else:
# counter1=0
# for ii in range(self.nao):
# for jj in range(len(self.orient[ii])):
# counter2=0
# for kk in range(self.nao):
# for ll in range(len(self.orient[kk])):
# Rik=self.coor._value[kk]-self.coor._value[ii]
# R0=np.zeros(3)
# QuadMat[:,counter1,counter2]=quadrupole_STO(R0,Rik,self.coeff[ii],self.coeff[kk],self.exp[ii],self.exp[kk],self.orient[ii][jj],self.orient[kk][ll])
# counter2 += 1
# counter1 += 1
#
# elapsed = timeit.default_timer() - start_time
# print('Elapsed time for slater quadrupole matrix allocation:',elapsed)
#
# if typ=='paralell':
# ''' Parallel part '''
## print('Prepairing parallel calculation')
# if nt>0:
# pool = Pool(processes=nt)
# else:
# pool = Pool(processes=cpu_count())
# index_list=range(self.nao_orient)
# QuadMat_tmp= np.array(pool.map(quad_over_line_partial,index_list))
# pool.close() # ATTENTION HERE
# pool.join()
# elif typ=='seriall':
# index_list=range(self.nao_orient)
# ''' Seriall part '''
# for ii in range(self.nao_orient):
# QuadMat[:,ii,:]=np.swapaxes(quad_over_line_partial(index_list[ii]),0,1)
#
# ''' Fill the lower triangle of overlap matrix'''
# if typ=='seriall' and do_faster:
# for ii in range(self.nao_orient):
# for jj in range(ii):
# counter=0
# Rji=self.coor._value[self.indx_orient[ii][0]]-self.coor._value[self.indx_orient[jj][0]]
# Rj=self.coor._value[self.indx_orient[jj][0]]
# Dji=np.array([self.dipole['Dip_X'][jj,ii],self.dipole['Dip_Y'][jj,ii],self.dipole['Dip_Z'][jj,ii]])
# SSji=self.overlap[jj,ii]
# for kk in range(3):
# for ll in range(kk,3):
# QuadMat[counter,ii,jj]=QuadMat[counter,jj,ii]-Rji[kk]*Dji[ll]-Rji[ll]*Dji[kk]+(Rj[kk]*Rji[ll]+Rj[ll]*Rji[kk]+Rji[kk]*Rji[ll])*SSji
# counter+=1
# if typ=='paralell' and do_faster:
# for ii in range(self.nao_orient):
# for jj in range(ii):
# counter=0
# Rji=self.coor._value[self.indx_orient[ii][0]]-self.coor._value[self.indx_orient[jj][0]]
# Rj=self.coor._value[self.indx_orient[jj][0]]
# Dji=np.array([self.dipole['Dip_X'][jj,ii],self.dipole['Dip_Y'][jj,ii],self.dipole['Dip_Z'][jj,ii]])
# SSji=self.overlap[jj,ii]
# for kk in range(3):
# for ll in range(kk,3):
# QuadMat_tmp[ii,jj,counter]=QuadMat_tmp[jj,ii,counter]-Rji[kk]*Dji[ll]-Rji[ll]*Dji[kk]+(Rj[kk]*Rji[ll]+Rj[ll]*Rji[kk]+Rji[kk]*Rji[ll])*SSji
# counter+=1
# QuadMat=np.copy(np.swapaxes(QuadMat_tmp,0,2))
# QuadMat=np.copy(np.swapaxes(QuadMat,1,2))
#
# elapsed = timeit.default_timer() - start_time
#
# if verbose:
# if typ=='paralell':
# print('Elapsed time for parallel slater quadrupole matrix allocation:',elapsed)
# elif typ=='seriall':
# print('Elapsed time for seriall slater quadrupoele matrix allocation:',elapsed)
#
# self.quadrupole=np.copy(QuadMat)
def get_slater_ao_grid(self,grid,indx,keep_grid=False,new_grid=True): # Jediny je spravne se spravnou normalizaci
""" Evaluate single slater orbital on given grid
Parameters
----------
grid : Grid class
Information about grid on which slater atomic orbital is evaluated.
indx :
Index of atomic orbital whic is evaluated (position in indx_orient)
keep_grid : logical (optional init = False)
If ``True`` local grid (dependent on orbital center) is kept as
global internal variable in order to avoid recalculation of the
grid for calculation of more orientations of the same orbital.
new_grid : logical (optional init = True)
If ``True`` local grid (dependent on orbital center) is recalculated
and the old one is overwriten. It is needed if local grid for orbital
with different center was previously saved.
Returns
---------
slater_ao_tmp : numpy array of float (dimension Grid_Nx x Grid_Ny x Grid_Nz)
Values of slater orbital on grid points defined by grid.
"""
slater_ao_tmp=np.zeros(np.shape(grid.X))
ii=self.indx_orient[indx][0]
# print(indx,'/',mol.ao_spec['Nao_orient'])
if new_grid:
global X_grid_loc,Y_grid_loc,Z_grid_loc,RR_grid_loc
X_grid_loc=np.add(grid.X,-self.coor._value[ii][0]) # posunu grid vzdy tak abych dostal centrum orbitalu do nuly.
Y_grid_loc=np.add(grid.Y,-self.coor._value[ii][1])
Z_grid_loc=np.add(grid.Z,-self.coor._value[ii][2])
RR_grid_loc=np.square(X_grid_loc)+np.square(Y_grid_loc)+np.square(Z_grid_loc)
# Vytvorena souradnicova sit a vsechny souradnice rozlozeny na grid
# Pro kazdou orientaci pouziji ruzny slateruv atomovy orbital kvuli ruzne normalizaci gaussovskych orbitalu
# Vypocet slaterova orbitalu na gridu pro AO=ii a orientaci ao_comb[jj+index]
if self.type[ii][0] in ['s','p','d','f','5d']:
slater_ao=np.zeros(np.shape(grid.X))
for kk in range(len(self.coeff[ii])):
coef=self.coeff[ii][kk]
exp=self.exp[ii][kk]
r_ao= self.coor._value[ii]
norm=norm_GTO(r_ao,exp,self.indx_orient[indx][1])
c=coef*norm
slater_ao += np.multiply(c,np.exp(np.multiply(-exp,RR_grid_loc)))
else:
raise IOError('Supported orbitals are so far only s,p,d,5d,f orbitals')
#slateruv orbital vytvoren
if self.type[ii][0] in ['s','p','d','f']:
m=self.indx_orient[indx][1][0]
n=self.indx_orient[indx][1][1]
o=self.indx_orient[indx][1][2]
slater_ao_tmp=np.copy(slater_ao)
if m!=0:
slater_ao_tmp=np.multiply(np.power(X_grid_loc,m),slater_ao_tmp)
if n!=0:
slater_ao_tmp=np.multiply(np.power(Y_grid_loc,n),slater_ao_tmp)
if o!=0:
slater_ao_tmp=np.multiply(np.power(Z_grid_loc,o),slater_ao_tmp)
elif self.type[ii][0]=='5d':
orient=self.indx_orient[indx][1]
if orient[0]==(-2):
## 3Z^2-R^2
slater_ao_tmp=np.copy(slater_ao)
slater_ao_tmp=np.multiply(3,np.multiply(np.power(Z_grid_loc,2),slater_ao_tmp))
slater_ao_tmp=slater_ao_tmp-np.multiply(RR_grid_loc,slater_ao)
elif orient[0]==(-1) and orient[2]==(-1):
## XZ
slater_ao_tmp=np.copy(slater_ao)
slater_ao_tmp=np.multiply(Z_grid_loc,slater_ao_tmp)
slater_ao_tmp=np.multiply(X_grid_loc,slater_ao_tmp)
elif orient[1]==(-1) and orient[2]==(-1):
## YZ
slater_ao_tmp=np.copy(slater_ao)
slater_ao_tmp=np.multiply(Z_grid_loc,slater_ao_tmp)
slater_ao_tmp=np.multiply(Y_grid_loc,slater_ao_tmp)
elif orient[1]==(-2):
#X^2-Y^2
slater_ao_tmp=np.copy(slater_ao)
slater_ao_tmp=np.multiply(np.power(X_grid_loc,2),slater_ao_tmp)
slater_ao_tmp=slater_ao_tmp-np.multiply(np.power(Y_grid_loc,2),slater_ao)
elif orient[0]==(-1) and orient[1]==(-1):
#XY
slater_ao_tmp=np.copy(slater_ao)
slater_ao_tmp=np.multiply(Y_grid_loc,slater_ao_tmp)
slater_ao_tmp=np.multiply(X_grid_loc,slater_ao_tmp)
else:
raise IOError('5d orbital has only 5 orientations')
if not keep_grid:
del X_grid_loc
del Y_grid_loc
del Z_grid_loc
del RR_grid_loc
return np.array(slater_ao_tmp)
def get_all_slater_grid(self,grid,nt=0): # Jediny je spravne se spravnou normalizaci
""" Evaluate all slater orbitals on given grid. This way basis for
calculation of transition densities or other electron densities.
Parameters
----------
grid : Grid class
Information about grid on which slater atomic orbital is evaluated.
nt : integer (optional init = 0)
Specifies how many cores should be used for the calculation.
Secial cases:
``nt=0`` all available cores are used for the calculation.
``nt=1`` serial calculation is performed.
Notes
---------
All oriented slater atomic orbitals evaluated on given grid are stored at:\n
**self.grid** \n
as a list (size nao_orient) of numpy arrays of float
(dimension Grid_Nx x Grid_Ny x Grid_Nz)
"""
# TODO: add check of OS and decide if calculate serial or parallel
All_slater_grid=[]
do_parallel=False
if nt==1: # serial execution
counter=0
keep_grid=True
for ii in range(self.nao):
new_grid=True
print(ii)
if ii==0:
new_grid=True
elif (ii>0) and (self.atom[ii].indx==self.atom[ii-1].indx):
new_grid=False
for jj in range(len(self.orient[ii])):
if counter==self.nao_orient:
keep_grid=False
All_slater_grid.append(self.get_slater_ao_grid(grid,counter,keep_grid=keep_grid,new_grid=new_grid))
counter+=1
elif nt>1:
from multiprocessing import Pool, cpu_count
pool = Pool(processes=nt)
do_parallel=True
else:
from multiprocessing import Pool, cpu_count
pool = Pool(processes=cpu_count())
do_parallel=True
if do_parallel:
index_list=range(self.nao_orient)
allocate_single_slater_grid_mol_partial = partial(self.get_slater_ao_grid, grid)
All_slater_grid_tmp= pool.map(allocate_single_slater_grid_mol_partial,index_list)
pool.close() # ATTENTION HERE
pool.join()
All_slater_grid=np.copy(All_slater_grid_tmp)
self.grid=All_slater_grid
def _get_ao_rot_mat(self,rotxy,rotxz,rotyz):
RotA={}
RotB={}
RotC={}
orbit=['s','p','d','5d','f']
for ii in orbit:
RotA[ii],RotB[ii],RotC[ii]=fill_basis_transf_matrix(ii,rotxy,rotxz,rotyz)
#TransfMat=np.dot(RotC[self.type[0][0]],np.dot(RotB[self.type[0][0]],RotA[self.type[0][0]]))
TransfMat=np.dot(RotC[self.type[0]],np.dot(RotB[self.type[0]],RotA[self.type[0]]))
for ii in range(1,self.nao):
#Rot_tmp=np.dot(RotC[self.type[ii][0]],np.dot(RotB[self.type[ii][0]],RotA[self.type[ii][0]]))
Rot_tmp=np.dot(RotC[self.type[ii]],np.dot(RotB[self.type[ii]],RotA[self.type[ii]]))
TransfMat=scipy.linalg.block_diag(TransfMat,Rot_tmp)
TransfMat=np.array(TransfMat)
return TransfMat
def _get_ao_rot_mat_1(self,rotxy,rotxz,rotyz):
RotA={}
RotB={}
RotC={}
orbit=['s','p','d','5d','f']
for ii in orbit:
RotA[ii],RotB[ii],RotC[ii]=fill_basis_transf_matrix(ii,-rotxy,-rotxz,-rotyz)
TransfMat=np.dot(RotA[self.type[0][0]],np.dot(RotB[self.type[0][0]],RotC[self.type[0][0]]))
for ii in range(1,self.nao):
Rot_tmp=np.dot(RotA[self.type[ii][0]],np.dot(RotB[self.type[ii][0]],RotC[self.type[ii][0]]))
TransfMat=scipy.linalg.block_diag(TransfMat,Rot_tmp)
TransfMat=np.array(TransfMat)
return TransfMat
def _rotate_dipole(self,rotxy,rotxz,rotyz):
RotA,RotB,RotC=fill_basis_transf_matrix('p',rotxy,rotxz,rotyz)
Rot=np.dot(RotC,np.dot(RotB,RotA))
TransfMat=self._get_ao_rot_mat(rotxy,rotxz,rotyz)
# Transformation of atomic orbitals
self.dipole['Dip_X']=np.dot(TransfMat,np.dot(self.dipole['Dip_X'],TransfMat.T))
self.dipole['Dip_Y']=np.dot(TransfMat,np.dot(self.dipole['Dip_Y'],TransfMat.T))
self.dipole['Dip_Z']=np.dot(TransfMat,np.dot(self.dipole['Dip_Z'],TransfMat.T))
# Transformation of dipole coordinate (x,z,y)
Dip_X_tmp=Rot[0,0]*self.dipole['Dip_X']+Rot[0,1]*self.dipole['Dip_Y']+Rot[0,2]*self.dipole['Dip_Z']
Dip_Y_tmp=Rot[1,0]*self.dipole['Dip_X']+Rot[1,1]*self.dipole['Dip_Y']+Rot[1,2]*self.dipole['Dip_Z']
Dip_Z_tmp=Rot[2,0]*self.dipole['Dip_X']+Rot[2,1]*self.dipole['Dip_Y']+Rot[2,2]*self.dipole['Dip_Z']
self.dipole['Dip_X']=np.copy(Dip_X_tmp)
self.dipole['Dip_Y']=np.copy(Dip_Y_tmp)
self.dipole['Dip_Z']=np.copy(Dip_Z_tmp)
# TODO: Test this
def _rotate_dipole_1(self,rotxy,rotxz,rotyz):
RotA,RotB,RotC=fill_basis_transf_matrix('p',-rotxy,-rotxz,-rotyz)
Rot=np.dot(RotA,np.dot(RotB,RotC))
TransfMat=self._get_ao_rot_mat_1(rotxy,rotxz,rotyz)
# Transformation of atomic orbitals
self.dipole['Dip_X']=np.dot(TransfMat,np.dot(self.dipole['Dip_X'],TransfMat.T))
self.dipole['Dip_Y']=np.dot(TransfMat,np.dot(self.dipole['Dip_Y'],TransfMat.T))
self.dipole['Dip_Z']=np.dot(TransfMat,np.dot(self.dipole['Dip_Z'],TransfMat.T))
# Transformation of dipole coordinate (x,z,y)
Dip_X_tmp=Rot[0,0]*self.dipole['Dip_X']+Rot[0,1]*self.dipole['Dip_Y']+Rot[0,2]*self.dipole['Dip_Z']
Dip_Y_tmp=Rot[1,0]*self.dipole['Dip_X']+Rot[1,1]*self.dipole['Dip_Y']+Rot[1,2]*self.dipole['Dip_Z']
Dip_Z_tmp=Rot[2,0]*self.dipole['Dip_X']+Rot[2,1]*self.dipole['Dip_Y']+Rot[2,2]*self.dipole['Dip_Z']
self.dipole['Dip_X']=np.copy(Dip_X_tmp)
self.dipole['Dip_Y']=
|
np.copy(Dip_Y_tmp)
|
numpy.copy
|
"""
Core framework data structures.
Objects from this module can also be imported from the top-level
module directly, e.g.
from backtesting import Backtest, Strategy
"""
import multiprocessing as mp
import os
import sys
import warnings
from abc import abstractmethod, ABCMeta
from concurrent.futures import ProcessPoolExecutor, as_completed
from copy import copy
from functools import lru_cache, partial
from itertools import repeat, product, chain, compress
from math import copysign
from numbers import Number
from typing import Callable, Dict, List, Optional, Sequence, Tuple, Type, Union
import numpy as np
import pandas as pd
try:
from tqdm.auto import tqdm as _tqdm
_tqdm = partial(_tqdm, leave=False)
except ImportError:
def _tqdm(seq, **_):
return seq
from ._plotting import plot
from ._util import _as_str, _Indicator, _Data, _data_period, try_
__pdoc__ = {
'Strategy.__init__': False,
'Order.__init__': False,
'Position.__init__': False,
'Trade.__init__': False,
}
class Strategy(metaclass=ABCMeta):
"""
A trading strategy base class. Extend this class and
override methods
`backtesting.backtesting.Strategy.init` and
`backtesting.backtesting.Strategy.next` to define
your own strategy.
"""
def __init__(self, broker, data, params):
self._indicators = []
self._broker: _Broker = broker
self._data: _Data = data
self._params = self._check_params(params)
def __repr__(self):
return '<Strategy ' + str(self) + '>'
def __str__(self):
params = ','.join(f'{i[0]}={i[1]}' for i in zip(self._params.keys(),
map(_as_str, self._params.values())))
if params:
params = '(' + params + ')'
return f'{self.__class__.__name__}{params}'
def _check_params(self, params):
for k, v in params.items():
if not hasattr(self, k):
raise AttributeError(
f"Strategy '{self.__class__.__name__}' is missing parameter '{k}'."
"Strategy class should define parameters as class variables before they "
"can be optimized or run with.")
setattr(self, k, v)
return params
def I(self, # noqa: E741, E743
func: Callable, *args,
name=None, plot=True, overlay=None, color=None, scatter=False,
**kwargs) -> np.ndarray:
"""
Declare indicator. An indicator is just an array of values,
but one that is revealed gradually in
`backtesting.backtesting.Strategy.next` much like
`backtesting.backtesting.Strategy.data` is.
Returns `np.ndarray` of indicator values.
`func` is a function that returns the indicator array(s) of
same length as `backtesting.backtesting.Strategy.data`.
In the plot legend, the indicator is labeled with
function name, unless `name` overrides it.
If `plot` is `True`, the indicator is plotted on the resulting
`backtesting.backtesting.Backtest.plot`.
If `overlay` is `True`, the indicator is plotted overlaying the
price candlestick chart (suitable e.g. for moving averages).
If `False`, the indicator is plotted standalone below the
candlestick chart. By default, a heuristic is used which decides
correctly most of the time.
`color` can be string hex RGB triplet or X11 color name.
By default, the next available color is assigned.
If `scatter` is `True`, the plotted indicator marker will be a
circle instead of a connected line segment (default).
Additional `*args` and `**kwargs` are passed to `func` and can
be used for parameters.
For example, using simple moving average function from TA-Lib:
def init():
self.sma = self.I(ta.SMA, self.data.Close, self.n_sma)
"""
if name is None:
params = ','.join(filter(None, map(_as_str, chain(args, kwargs.values()))))
func_name = _as_str(func)
name = (f'{func_name}({params})' if params else f'{func_name}')
else:
name = name.format(*map(_as_str, args),
**dict(zip(kwargs.keys(), map(_as_str, kwargs.values()))))
try:
value = func(*args, **kwargs)
except Exception as e:
raise RuntimeError(f'Indicator "{name}" errored with exception: {e}')
if isinstance(value, pd.DataFrame):
value = value.values.T
if value is not None:
value = try_(lambda: np.asarray(value, order='C'), None)
is_arraylike = value is not None
# Optionally flip the array if the user returned e.g. `df.values`
if is_arraylike and np.argmax(value.shape) == 0:
value = value.T
if not is_arraylike or not 1 <= value.ndim <= 2 or value.shape[-1] != len(self._data.Close):
raise ValueError(
'Indicators must return (optionally a tuple of) numpy.arrays of same '
f'length as `data` (data shape: {self._data.Close.shape}; indicator "{name}"'
f'shape: {getattr(value, "shape" , "")}, returned value: {value})')
if plot and overlay is None and np.issubdtype(value.dtype, np.number):
x = value / self._data.Close
# By default, overlay if strong majority of indicator values
# is within 30% of Close
with np.errstate(invalid='ignore'):
overlay = ((x < 1.4) & (x > .6)).mean() > .6
value = _Indicator(value, name=name, plot=plot, overlay=overlay,
color=color, scatter=scatter,
# _Indicator.s Series accessor uses this:
index=self.data.index)
self._indicators.append(value)
return value
@abstractmethod
def init(self):
"""
Initialize the strategy.
Override this method.
Declare indicators (with `backtesting.backtesting.Strategy.I`).
Precompute what needs to be precomputed or can be precomputed
in a vectorized fashion before the strategy starts.
If you extend composable strategies from `backtesting.lib`,
make sure to call:
super().init()
"""
@abstractmethod
def next(self):
"""
Main strategy runtime method, called as each new
`backtesting.backtesting.Strategy.data`
instance (row; full candlestick bar) becomes available.
This is the main method where strategy decisions
upon data precomputed in `backtesting.backtesting.Strategy.init`
take place.
If you extend composable strategies from `backtesting.lib`,
make sure to call:
super().next()
"""
class __FULL_EQUITY(float):
def __repr__(self): return '.9999'
_FULL_EQUITY = __FULL_EQUITY(1 - sys.float_info.epsilon)
def buy(self, *,
size: float = _FULL_EQUITY,
limit: float = None,
stop: float = None,
sl: float = None,
tp: float = None):
"""
Place a new long order. For explanation of parameters, see `Order` and its properties.
See also `Strategy.sell()`.
"""
assert 0 < size < 1 or round(size) == size, \
"size must be a positive fraction of equity, or a positive whole number of units"
return self._broker.new_order(size, limit, stop, sl, tp)
def sell(self, *,
size: float = _FULL_EQUITY,
limit: float = None,
stop: float = None,
sl: float = None,
tp: float = None):
"""
Place a new short order. For explanation of parameters, see `Order` and its properties.
See also `Strategy.buy()`.
"""
assert 0 < size < 1 or round(size) == size, \
"size must be a positive fraction of equity, or a positive whole number of units"
return self._broker.new_order(-size, limit, stop, sl, tp)
@property
def equity(self) -> float:
"""Current account equity (cash plus assets)."""
return self._broker.equity
@property
def data(self) -> _Data:
"""
Price data, roughly as passed into
`backtesting.backtesting.Backtest.__init__`,
but with two significant exceptions:
* `data` is _not_ a DataFrame, but a custom structure
that serves customized numpy arrays for reasons of performance
and convenience. Besides OHLCV columns, `.index` and length,
it offers `.pip` property, the smallest price unit of change.
* Within `backtesting.backtesting.Strategy.init`, `data` arrays
are available in full length, as passed into
`backtesting.backtesting.Backtest.__init__`
(for precomputing indicators and such). However, within
`backtesting.backtesting.Strategy.next`, `data` arrays are
only as long as the current iteration, simulating gradual
price point revelation. In each call of
`backtesting.backtesting.Strategy.next` (iteratively called by
`backtesting.backtesting.Backtest` internally),
the last array value (e.g. `data.Close[-1]`)
is always the _most recent_ value.
* If you need data arrays (e.g. `data.Close`) to be indexed
**Pandas series**, you can call their `.s` accessor
(e.g. `data.Close.s`). If you need the whole of data
as a **DataFrame**, use `.df` accessor (i.e. `data.df`).
"""
return self._data
@property
def position(self) -> 'Position':
"""Instance of `backtesting.backtesting.Position`."""
return self._broker.position
@property
def orders(self) -> 'Tuple[Order, ...]':
"""List of orders (see `Order`) waiting for execution."""
return _Orders(self._broker.orders)
@property
def trades(self) -> 'Tuple[Trade, ...]':
"""List of active trades (see `Trade`)."""
return tuple(self._broker.trades)
@property
def closed_trades(self) -> 'Tuple[Trade, ...]':
"""List of settled trades (see `Trade`)."""
return tuple(self._broker.closed_trades)
class _Orders(tuple):
"""
TODO: remove this class. Only for deprecation.
"""
def cancel(self):
"""Cancel all non-contingent (i.e. SL/TP) orders."""
for order in self:
if not order.is_contingent:
order.cancel()
def __getattr__(self, item):
# TODO: Warn on deprecations from the previous version. Remove in the next.
removed_attrs = ('entry', 'set_entry', 'is_long', 'is_short',
'sl', 'tp', 'set_sl', 'set_tp')
if item in removed_attrs:
raise AttributeError(f'Strategy.orders.{"/.".join(removed_attrs)} were removed in'
'Backtesting 0.2.0. '
'Use `Order` API instead. See docs.')
raise AttributeError(f"'tuple' object has no attribute {item!r}")
class Position:
"""
Currently held asset position, available as
`backtesting.backtesting.Strategy.position` within
`backtesting.backtesting.Strategy.next`.
Can be used in boolean contexts, e.g.
if self.position:
... # we have a position, either long or short
"""
def __init__(self, broker: '_Broker'):
self.__broker = broker
def __bool__(self):
return self.size != 0
@property
def size(self) -> float:
"""Position size in units of asset. Negative if position is short."""
return sum(trade.size for trade in self.__broker.trades)
@property
def pl(self) -> float:
"""Profit (positive) or loss (negative) of the current position in cash units."""
return sum(trade.pl for trade in self.__broker.trades)
@property
def pl_pct(self) -> float:
"""Profit (positive) or loss (negative) of the current position in percent."""
weights = np.abs([trade.size for trade in self.__broker.trades])
weights = weights / weights.sum()
pl_pcts = np.array([trade.pl_pct for trade in self.__broker.trades])
return (pl_pcts * weights).sum()
@property
def is_long(self) -> bool:
"""True if the position is long (position size is positive)."""
return self.size > 0
@property
def is_short(self) -> bool:
"""True if the position is short (position size is negative)."""
return self.size < 0
def close(self, portion: float = 1.):
"""
Close portion of position by closing `portion` of each active trade. See `Trade.close`.
"""
for trade in self.__broker.trades:
trade.close(portion)
def __repr__(self):
return f'<Position: {self.size} ({len(self.__broker.trades)} trades)>'
class _OutOfMoneyError(Exception):
pass
class Order:
"""
Place new orders through `Strategy.buy()` and `Strategy.sell()`.
Query existing orders through `Strategy.orders`.
When an order is executed or [filled], it results in a `Trade`.
If you wish to modify aspects of a placed but not yet filled order,
cancel it and place a new one instead.
All placed orders are [Good 'Til Canceled].
[filled]: https://www.investopedia.com/terms/f/fill.asp
[Good 'Til Canceled]: https://www.investopedia.com/terms/g/gtc.asp
"""
def __init__(self, broker: '_Broker',
size: float,
limit_price: float = None,
stop_price: float = None,
sl_price: float = None,
tp_price: float = None,
parent_trade: 'Trade' = None):
self.__broker = broker
assert size != 0
self.__size = size
self.__limit_price = limit_price
self.__stop_price = stop_price
self.__sl_price = sl_price
self.__tp_price = tp_price
self.__parent_trade = parent_trade
def _replace(self, **kwargs):
for k, v in kwargs.items():
setattr(self, f'_{self.__class__.__qualname__}__{k}', v)
return self
def __repr__(self):
return '<Order {}>'.format(', '.join(f'{param}={round(value, 5)}'
for param, value in (
('size', self.__size),
('limit', self.__limit_price),
('stop', self.__stop_price),
('sl', self.__sl_price),
('tp', self.__tp_price),
('contingent', self.is_contingent),
) if value is not None))
def cancel(self):
"""Cancel the order."""
self.__broker.orders.remove(self)
trade = self.__parent_trade
if trade:
if self is trade._sl_order:
trade._replace(sl_order=None)
elif self is trade._tp_order:
trade._replace(tp_order=None)
else:
assert False
# Fields getters
@property
def size(self) -> float:
"""
Order size (negative for short orders).
If size is a value between 0 and 1, it is interpreted as a fraction of current
available liquidity (cash plus `Position.pl` minus used margin).
A value greater than or equal to 1 indicates an absolute number of units.
"""
return self.__size
@property
def limit(self) -> Optional[float]:
"""
Order limit price for [limit orders], or None for [market orders],
which are filled at next available price.
[limit orders]: https://www.investopedia.com/terms/l/limitorder.asp
[market orders]: https://www.investopedia.com/terms/m/marketorder.asp
"""
return self.__limit_price
@property
def stop(self) -> Optional[float]:
"""
Order stop price for [stop-limit/stop-market][_] order,
otherwise None if no stop was set, or the stop price has already been hit.
[_]: https://www.investopedia.com/terms/s/stoporder.asp
"""
return self.__stop_price
@property
def sl(self) -> Optional[float]:
"""
A stop-loss price at which, if set, a new contingent stop-market order
will be placed upon the `Trade` following this order's execution.
See also `Trade.sl`.
"""
return self.__sl_price
@property
def tp(self) -> Optional[float]:
"""
A take-profit price at which, if set, a new contingent limit order
will be placed upon the `Trade` following this order's execution.
See also `Trade.tp`.
"""
return self.__tp_price
@property
def parent_trade(self):
return self.__parent_trade
__pdoc__['Order.parent_trade'] = False
# Extra properties
@property
def is_long(self):
"""True if the order is long (order size is positive)."""
return self.__size > 0
@property
def is_short(self):
"""True if the order is short (order size is negative)."""
return self.__size < 0
@property
def is_contingent(self):
"""
True for [contingent] orders, i.e. [OCO] stop-loss and take-profit bracket orders
placed upon an active trade. Remaining contingent orders are canceled when
their parent `Trade` is closed.
You can modify contingent orders through `Trade.sl` and `Trade.tp`.
[contingent]: https://www.investopedia.com/terms/c/contingentorder.asp
[OCO]: https://www.investopedia.com/terms/o/oco.asp
"""
return bool(self.__parent_trade)
class Trade:
"""
When an `Order` is filled, it results in an active `Trade`.
Find active trades in `Strategy.trades` and closed, settled trades in `Strategy.closed_trades`.
"""
def __init__(self, broker: '_Broker', size: int, entry_price: float, entry_bar):
self.__broker = broker
self.__size = size
self.__entry_price = entry_price
self.__exit_price: Optional[float] = None
self.__entry_bar: int = entry_bar
self.__exit_bar: Optional[int] = None
self.__sl_order: Optional[Order] = None
self.__tp_order: Optional[Order] = None
def __repr__(self):
return f'<Trade size={self.__size} time={self.__entry_bar}-{self.__exit_bar or ""} ' \
f'price={self.__entry_price}-{self.__exit_price or ""} pl={self.pl:.0f}>'
def _replace(self, **kwargs):
for k, v in kwargs.items():
setattr(self, f'_{self.__class__.__qualname__}__{k}', v)
return self
def _copy(self, **kwargs):
return copy(self)._replace(**kwargs)
def close(self, portion: float = 1.):
"""Place new `Order` to close `portion` of the trade at next market price."""
assert 0 < portion <= 1, "portion must be a fraction between 0 and 1"
size = copysign(max(1, round(abs(self.__size) * portion)), -self.__size)
order = Order(self.__broker, size, parent_trade=self)
self.__broker.orders.insert(0, order)
# Fields getters
@property
def size(self):
"""Trade size (volume; negative for short trades)."""
return self.__size
@property
def entry_price(self) -> float:
"""Trade entry price."""
return self.__entry_price
@property
def exit_price(self) -> Optional[float]:
"""Trade exit price (or None if the trade is still active)."""
return self.__exit_price
@property
def entry_bar(self) -> int:
"""Candlestick bar index of when the trade was entered."""
return self.__entry_bar
@property
def exit_bar(self) -> Optional[int]:
"""
Candlestick bar index of when the trade was exited
(or None if the trade is still active).
"""
return self.__exit_bar
@property
def _sl_order(self):
return self.__sl_order
@property
def _tp_order(self):
return self.__tp_order
# Extra properties
@property
def entry_time(self) -> Union[pd.Timestamp, int]:
"""Datetime of when the trade was entered."""
return self.__broker._data.index[self.__entry_bar]
@property
def exit_time(self) -> Optional[Union[pd.Timestamp, int]]:
"""Datetime of when the trade was exited."""
if self.__exit_bar is None:
return None
return self.__broker._data.index[self.__exit_bar]
@property
def is_long(self):
"""True if the trade is long (trade size is positive)."""
return self.__size > 0
@property
def is_short(self):
"""True if the trade is short (trade size is negative)."""
return not self.is_long
@property
def pl(self):
"""Trade profit (positive) or loss (negative) in cash units."""
price = self.__exit_price or self.__broker.last_price
return self.__size * (price - self.__entry_price)
@property
def pl_pct(self):
"""Trade profit (positive) or loss (negative) in percent."""
price = self.__exit_price or self.__broker.last_price
return copysign(1, self.__size) * (price / self.__entry_price - 1)
@property
def value(self):
"""Trade total value in cash (volume × price)."""
price = self.__exit_price or self.__broker.last_price
return abs(self.__size) * price
# SL/TP management API
@property
def sl(self):
"""
Stop-loss price at which to close the trade.
This variable is writable. By assigning it a new price value,
you create or modify the existing SL order.
By assigning it `None`, you cancel it.
"""
return self.__sl_order and self.__sl_order.stop
@sl.setter
def sl(self, price: float):
self.__set_contingent('sl', price)
@property
def tp(self):
"""
Take-profit price at which to close the trade.
This property is writable. By assigning it a new price value,
you create or modify the existing TP order.
By assigning it `None`, you cancel it.
"""
return self.__tp_order and self.__tp_order.limit
@tp.setter
def tp(self, price: float):
self.__set_contingent('tp', price)
def __set_contingent(self, type, price):
assert type in ('sl', 'tp')
assert price is None or 0 < price < np.inf
attr = f'_{self.__class__.__qualname__}__{type}_order'
order: Order = getattr(self, attr)
if order:
order.cancel()
if price:
kwargs = dict(stop=price) if type == 'sl' else dict(limit=price)
order = self.__broker.new_order(-self.size, trade=self, **kwargs)
setattr(self, attr, order)
class _Broker:
def __init__(self, *, data, cash, commission, margin,
trade_on_close, hedging, exclusive_orders, index):
assert 0 < cash, f"cash should be >0, is {cash}"
assert -.1 <= commission < .1, \
("commission should be between -10% "
f"(e.g. market-maker's rebates) and 10% (fees), is {commission}")
assert 0 < margin <= 1, f"margin should be between 0 and 1, is {margin}"
self._data: _Data = data
self._cash = cash
self._commission = commission
self._leverage = 1 / margin
self._trade_on_close = trade_on_close
self._hedging = hedging
self._exclusive_orders = exclusive_orders
self._equity = np.tile(np.nan, len(index))
self.orders: List[Order] = []
self.trades: List[Trade] = []
self.position = Position(self)
self.closed_trades: List[Trade] = []
def __repr__(self):
return f'<Broker: {self._cash:.0f}{self.position.pl:+.1f} ({len(self.trades)} trades)>'
def new_order(self,
size: float,
limit: float = None,
stop: float = None,
sl: float = None,
tp: float = None,
*,
trade: Trade = None):
"""
Argument size indicates whether the order is long or short
"""
size = float(size)
stop = stop and float(stop)
limit = limit and float(limit)
sl = sl and float(sl)
tp = tp and float(tp)
is_long = size > 0
adjusted_price = self._adjusted_price(size)
if is_long:
if not (sl or -np.inf) < (limit or stop or adjusted_price) < (tp or np.inf):
raise ValueError(
"Long orders require: "
f"SL ({sl}) < LIMIT ({limit or stop or adjusted_price}) < TP ({tp})")
else:
if not (tp or -np.inf) < (limit or stop or adjusted_price) < (sl or np.inf):
raise ValueError(
"Short orders require: "
f"TP ({tp}) < LIMIT ({limit or stop or adjusted_price}) < SL ({sl})")
order = Order(self, size, limit, stop, sl, tp, trade)
# Put the new order in the order queue,
# inserting SL/TP/trade-closing orders in-front
if trade:
self.orders.insert(0, order)
else:
# If exclusive orders (each new order auto-closes previous orders/position),
# cancel all non-contingent orders and close all open trades beforehand
if self._exclusive_orders:
for o in self.orders:
if not o.is_contingent:
o.cancel()
for t in self.trades:
t.close()
self.orders.append(order)
return order
@property
def last_price(self) -> float:
""" Price at the last (current) close. """
return self._data.Close[-1]
def _adjusted_price(self, size=None, price=None) -> float:
"""
Long/short `price`, adjusted for commisions.
In long positions, the adjusted price is a fraction higher, and vice versa.
"""
return (price or self.last_price) * (1 + copysign(self._commission, size))
@property
def equity(self) -> float:
return self._cash + sum(trade.pl for trade in self.trades)
@property
def margin_available(self) -> float:
# From https://github.com/QuantConnect/Lean/pull/3768
margin_used = sum(trade.value / self._leverage for trade in self.trades)
return max(0, self.equity - margin_used)
def next(self):
i = self._i = len(self._data) - 1
self._process_orders()
# Log account equity for the equity curve
equity = self.equity
self._equity[i] = equity
# If equity is negative, set all to 0 and stop the simulation
if equity <= 0:
assert self.margin_available <= 0
for trade in self.trades:
self._close_trade(trade, self._data.Close[-1], i)
self._cash = 0
self._equity[i:] = 0
raise _OutOfMoneyError
def _process_orders(self):
data = self._data
open, high, low = data.Open[-1], data.High[-1], data.Low[-1]
prev_close = data.Close[-2]
reprocess_orders = False
# Process orders
for order in list(self.orders): # type: Order
# Related SL/TP order was already removed
if order not in self.orders:
continue
# Check if stop condition was hit
stop_price = order.stop
if stop_price:
is_stop_hit = ((high > stop_price) if order.is_long else (low < stop_price))
if not is_stop_hit:
continue
# > When the stop price is reached, a stop order becomes a market/limit order.
# https://www.sec.gov/fast-answers/answersstopordhtm.html
order._replace(stop_price=None)
# Determine purchase price.
# Check if limit order can be filled.
if order.limit:
is_limit_hit = low < order.limit if order.is_long else high > order.limit
# When stop and limit are hit within the same bar, we pessimistically
# assume limit was hit before the stop (i.e. "before it counts")
is_limit_hit_before_stop = (is_limit_hit and
(order.limit < (stop_price or -np.inf)
if order.is_long
else order.limit > (stop_price or np.inf)))
if not is_limit_hit or is_limit_hit_before_stop:
continue
# stop_price, if set, was hit within this bar
price = (min(stop_price or open, order.limit)
if order.is_long else
max(stop_price or open, order.limit))
else:
# Market-if-touched / market order
price = prev_close if self._trade_on_close else open
price = (max(price, stop_price or -np.inf)
if order.is_long else
min(price, stop_price or np.inf))
# Determine entry/exit bar index
is_market_order = not order.limit and not stop_price
time_index = (self._i - 1) if is_market_order and self._trade_on_close else self._i
# If order is a SL/TP order, it should close an existing trade it was contingent upon
if order.parent_trade:
trade = order.parent_trade
_prev_size = trade.size
# If order.size is "greater" than trade.size, this order is a trade.close()
# order and part of the trade was already closed beforehand
size = copysign(min(abs(_prev_size), abs(order.size)), order.size)
# If this trade isn't already closed (e.g. on multiple `trade.close(.5)` calls)
if trade in self.trades:
self._reduce_trade(trade, price, size, time_index)
assert order.size != -_prev_size or trade not in self.trades
if order in (trade._sl_order,
trade._tp_order):
assert order.size == -trade.size
assert order not in self.orders # Removed when trade was closed
else:
# It's a trade.close() order, now done
assert abs(_prev_size) >= abs(size) >= 1
self.orders.remove(order)
continue
# Else this is a stand-alone trade
# Adjust price to include commission (or bid-ask spread).
# In long positions, the adjusted price is a fraction higher, and vice versa.
adjusted_price = self._adjusted_price(order.size, price)
# If order size was specified proportionally,
# precompute true size in units, accounting for margin and spread/commissions
size = order.size
if -1 < size < 1:
size = copysign(int((self.margin_available * self._leverage * abs(size))
// adjusted_price), size)
# Not enough cash/margin even for a single unit
if not size:
self.orders.remove(order)
continue
assert size == round(size)
need_size = int(size)
if not self._hedging:
# Fill position by FIFO closing/reducing existing opposite-facing trades.
# Existing trades are closed at unadjusted price, because the adjustment
# was already made when buying.
for trade in list(self.trades):
if trade.is_long == order.is_long:
continue
assert trade.size * order.size < 0
# Order size greater than this opposite-directed existing trade,
# so it will be closed completely
if abs(need_size) >= abs(trade.size):
self._close_trade(trade, price, time_index)
need_size += trade.size
else:
# The existing trade is larger than the new order,
# so it will only be closed partially
self._reduce_trade(trade, price, need_size, time_index)
need_size = 0
if not need_size:
break
# If we don't have enough liquidity to cover for the order, cancel it
if abs(need_size) * adjusted_price > self.margin_available * self._leverage:
self.orders.remove(order)
continue
# Open a new trade
if need_size:
self._open_trade(adjusted_price, need_size, order.sl, order.tp, time_index)
# We need to reprocess the SL/TP orders newly added to the queue.
# This allows e.g. SL hitting in the same bar the order was open.
# See https://github.com/kernc/backtesting.py/issues/119
if order.sl or order.tp:
if is_market_order:
reprocess_orders = True
elif (low <= (order.sl or -np.inf) <= high or
low <= (order.tp or -np.inf) <= high):
warnings.warn(
f"({data.index[-1]}) A contingent SL/TP order would execute in the "
"same bar its parent stop/limit order was turned into a trade. "
"Since we can't assert the precise intra-candle "
"price movement, the affected SL/TP order will instead be executed on "
"the next (matching) price/bar, making the result (of this trade) "
"somewhat dubious. "
"See https://github.com/kernc/backtesting.py/issues/119",
UserWarning)
# Order processed
self.orders.remove(order)
if reprocess_orders:
self._process_orders()
def _reduce_trade(self, trade: Trade, price: float, size: float, time_index: int):
assert trade.size * size < 0
assert abs(trade.size) >= abs(size)
size_left = trade.size + size
assert size_left * trade.size >= 0
if not size_left:
close_trade = trade
else:
# Reduce existing trade ...
trade._replace(size=size_left)
if trade._sl_order:
trade._sl_order._replace(size=-trade.size)
if trade._tp_order:
trade._tp_order._replace(size=-trade.size)
# ... by closing a reduced copy of it
close_trade = trade._copy(size=-size, sl_order=None, tp_order=None)
self.trades.append(close_trade)
self._close_trade(close_trade, price, time_index)
def _close_trade(self, trade: Trade, price: float, time_index: int):
self.trades.remove(trade)
if trade._sl_order:
self.orders.remove(trade._sl_order)
if trade._tp_order:
self.orders.remove(trade._tp_order)
self.closed_trades.append(trade._replace(exit_price=price, exit_bar=time_index))
self._cash += trade.pl
def _open_trade(self, price: float, size: int, sl: float, tp: float, time_index: int):
trade = Trade(self, size, price, time_index)
self.trades.append(trade)
# Create SL/TP (bracket) orders.
# Make sure SL order is created first so it gets adversarially processed before TP order
# in case of an ambiguous tie (both hit within a single bar).
# Note, sl/tp orders are inserted at the front of the list, thus order reversed.
if tp:
trade.tp = tp
if sl:
trade.sl = sl
class Backtest:
"""
Backtest a particular (parameterized) strategy
on particular data.
Upon initialization, call method
`backtesting.backtesting.Backtest.run` to run a backtest
instance, or `backtesting.backtesting.Backtest.optimize` to
optimize it.
"""
def __init__(self,
data: pd.DataFrame,
strategy: Type[Strategy],
*,
cash: float = 10_000,
commission: float = .0,
margin: float = 1.,
trade_on_close=False,
hedging=False,
exclusive_orders=False
):
"""
Initialize a backtest. Requires data and a strategy to test.
`data` is a `pd.DataFrame` with columns:
`Open`, `High`, `Low`, `Close`, and (optionally) `Volume`.
If any columns are missing, set them to what you have available,
e.g.
df['Open'] = df['High'] = df['Low'] = df['Close']
The passed data frame can contain additional columns that
can be used by the strategy (e.g. sentiment info).
DataFrame index can be either a datetime index (timestamps)
or a monotonic range index (i.e. a sequence of periods).
`strategy` is a `backtesting.backtesting.Strategy`
_subclass_ (not an instance).
`cash` is the initial cash to start with.
`commission` is the commission ratio. E.g. if your broker's commission
is 1% of trade value, set commission to `0.01`. Note, if you wish to
account for bid-ask spread, you can approximate doing so by increasing
the commission, e.g. set it to `0.0002` for commission-less forex
trading where the average spread is roughly 0.2‰ of asking price.
`margin` is the required margin (ratio) of a leveraged account.
No difference is made between initial and maintenance margins.
To run the backtest using e.g. 50:1 leverge that your broker allows,
set margin to `0.02` (1 / leverage).
If `trade_on_close` is `True`, market orders will be filled
with respect to the current bar's closing price instead of the
next bar's open.
If `hedging` is `True`, allow trades in both directions simultaneously.
If `False`, the opposite-facing orders first close existing trades in
a [FIFO] manner.
If `exclusive_orders` is `True`, each new order auto-closes the previous
trade/position, making at most a single trade (long or short) in effect
at each time.
[FIFO]: https://www.investopedia.com/terms/n/nfa-compliance-rule-2-43b.asp
"""
if not (isinstance(strategy, type) and issubclass(strategy, Strategy)):
raise TypeError('`strategy` must be a Strategy sub-type')
if not isinstance(data, pd.DataFrame):
raise TypeError("`data` must be a pandas.DataFrame with columns")
if not isinstance(commission, Number):
raise TypeError('`commission` must be a float value, percent of '
'entry order price')
data = data.copy(deep=False)
# Convert index to datetime index
if (not isinstance(data.index, pd.DatetimeIndex) and
not isinstance(data.index, pd.RangeIndex) and
# Numeric index with most large numbers
(data.index.is_numeric() and
(data.index > pd.Timestamp('1975').timestamp()).mean() > .8)):
try:
data.index = pd.to_datetime(data.index, infer_datetime_format=True)
except ValueError:
pass
if 'Volume' not in data:
data['Volume'] = np.nan
if len(data) == 0:
raise ValueError('OHLC `data` is empty')
if len(data.columns.intersection({'Open', 'High', 'Low', 'Close', 'Volume'})) != 5:
raise ValueError("`data` must be a pandas.DataFrame with columns "
"'Open', 'High', 'Low', 'Close', and (optionally) 'Volume'")
if data[['Open', 'High', 'Low', 'Close']].isnull().values.any():
raise ValueError('Some OHLC values are missing (NaN). '
'Please strip those lines with `df.dropna()` or '
'fill them in with `df.interpolate()` or whatever.')
if np.any(data['Close'] > cash):
warnings.warn('Some prices are larger than initial cash value. Note that fractional '
'trading is not supported. If you want to trade Bitcoin, '
'increase initial cash, or trade μBTC or satoshis instead (GH-134).',
stacklevel=2)
if not data.index.is_monotonic_increasing:
warnings.warn('Data index is not sorted in ascending order. Sorting.',
stacklevel=2)
data = data.sort_index()
if not isinstance(data.index, pd.DatetimeIndex):
warnings.warn('Data index is not datetime. Assuming simple periods, '
'but `pd.DateTimeIndex` is advised.',
stacklevel=2)
self._data: pd.DataFrame = data
self._broker = partial(
_Broker, cash=cash, commission=commission, margin=margin,
trade_on_close=trade_on_close, hedging=hedging,
exclusive_orders=exclusive_orders, index=data.index,
)
self._strategy = strategy
self._results = None
def run(self, **kwargs) -> pd.Series:
"""
Run the backtest. Returns `pd.Series` with results and statistics.
Keyword arguments are interpreted as strategy parameters.
>>> Backtest(GOOG, SmaCross).run()
Start 2004-08-19 00:00:00
End 2013-03-01 00:00:00
Duration 3116 days 00:00:00
Exposure Time [%] 93.9944
Equity Final [$] 51959.9
Equity Peak [$] 75787.4
Return [%] 419.599
Buy & Hold Return [%] 703.458
Return (Ann.) [%] 21.328
Volatility (Ann.) [%] 36.5383
Sharpe Ratio 0.583718
Sortino Ratio 1.09239
Calmar Ratio 0.444518
Max. Drawdown [%] -47.9801
Avg. Drawdown [%] -5.92585
Max. Drawdown Duration 584 days 00:00:00
Avg. Drawdown Duration 41 days 00:00:00
# Trades 65
Win Rate [%] 46.1538
Best Trade [%] 53.596
Worst Trade [%] -18.3989
Avg. Trade [%] 2.35371
Max. Trade Duration 183 days 00:00:00
Avg. Trade Duration 46 days 00:00:00
Profit Factor 2.08802
Expectancy [%] 8.79171
SQN 0.916893
_strategy SmaCross
_equity_curve Eq...
_trades Size EntryB...
dtype: object
"""
data = _Data(self._data.copy(deep=False))
broker: _Broker = self._broker(data=data)
strategy: Strategy = self._strategy(broker, data, kwargs)
strategy.init()
data._update() # Strategy.init might have changed/added to data.df
# Indicators used in Strategy.next()
indicator_attrs = {attr: indicator
for attr, indicator in strategy.__dict__.items()
if isinstance(indicator, _Indicator)}.items()
# Skip first few candles where indicators are still "warming up"
# +1 to have at least two entries available
start = 1 + max((np.isnan(indicator.astype(float)).argmin(axis=-1).max()
for _, indicator in indicator_attrs), default=0)
# Disable "invalid value encountered in ..." warnings. Comparison
# np.nan >= 3 is not invalid; it's False.
with np.errstate(invalid='ignore'):
for i in range(start, len(self._data)):
# Prepare data and indicators for `next` call
data._set_length(i + 1)
for attr, indicator in indicator_attrs:
# Slice indicator on the last dimension (case of 2d indicator)
setattr(strategy, attr, indicator[..., :i + 1])
# Handle orders processing and broker stuff
try:
broker.next()
except _OutOfMoneyError:
break
# Next tick, a moment before bar close
strategy.next()
else:
# Close any remaining open trades so they produce some stats
for trade in broker.trades:
trade.close()
# Re-run broker one last time to handle orders placed in the last strategy
# iteration. Use the same OHLC values as in the last broker iteration.
if start < len(self._data):
try_(broker.next, exception=_OutOfMoneyError)
# Set data back to full length
# for future `indicator._opts['data'].index` calls to work
data._set_length(len(self._data))
self._results = self._compute_stats(broker, strategy)
return self._results
def optimize(self, *,
maximize: Union[str, Callable[[pd.Series], float]] = 'SQN',
method: str = 'grid',
max_tries: Union[int, float] = None,
constraint: Callable[[dict], bool] = None,
return_heatmap: bool = False,
return_optimization: bool = False,
random_state: int = None,
**kwargs) -> Union[pd.Series,
Tuple[pd.Series, pd.Series],
Tuple[pd.Series, pd.Series, dict]]:
"""
Optimize strategy parameters to an optimal combination.
Returns result `pd.Series` of the best run.
`maximize` is a string key from the
`backtesting.backtesting.Backtest.run`-returned results series,
or a function that accepts this series object and returns a number;
the higher the better. By default, the method maximizes
Van Tharp's [System Quality Number](https://google.com/search?q=System+Quality+Number).
`method` is the optimization method. Currently two methods are supported:
* `"grid"` which does an exhaustive (or randomized) search over the
cartesian product of parameter combinations, and
* `"skopt"` which finds close-to-optimal strategy parameters using
[model-based optimization], making at most `max_tries` evaluations.
[model-based optimization]: \
https://scikit-optimize.github.io/stable/auto_examples/bayesian-optimization.html
`max_tries` is the maximal number of strategy runs to perform.
If `method="grid"`, this results in randomized grid search.
If `max_tries` is a floating value between (0, 1], this sets the
number of runs to approximately that fraction of full grid space.
Alternatively, if integer, it denotes the absolute maximum number
of evaluations. If unspecified (default), grid search is exhaustive,
whereas for `method="skopt"`, `max_tries` is set to 200.
`constraint` is a function that accepts a dict-like object of
parameters (with values) and returns `True` when the combination
is admissible to test with. By default, any parameters combination
is considered admissible.
If `return_heatmap` is `True`, besides returning the result
series, an additional `pd.Series` is returned with a multiindex
of all admissible parameter combinations, which can be further
inspected or projected onto 2D to plot a heatmap
(see `backtesting.lib.plot_heatmaps()`).
If `return_optimization` is True and `method = 'skopt'`,
in addition to result series (and maybe heatmap), return raw
[`scipy.optimize.OptimizeResult`][OptimizeResult] for further
inspection, e.g. with [scikit-optimize]\
[plotting tools].
[OptimizeResult]: \
https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.OptimizeResult.html
[scikit-optimize]: https://scikit-optimize.github.io
[plotting tools]: https://scikit-optimize.github.io/stable/modules/plots.html
If you want reproducible optimization results, set `random_state`
to a fixed integer or a `numpy.random.RandomState` object.
Additional keyword arguments represent strategy arguments with
list-like collections of possible values. For example, the following
code finds and returns the "best" of the 7 admissible (of the
9 possible) parameter combinations:
backtest.optimize(sma1=[5, 10, 15], sma2=[10, 20, 40],
constraint=lambda p: p.sma1 < p.sma2)
.. TODO::
Improve multiprocessing/parallel execution on Windos with start method 'spawn'.
"""
if not kwargs:
raise ValueError('Need some strategy parameters to optimize')
maximize_key = None
if isinstance(maximize, str):
maximize_key = str(maximize)
stats = self._results if self._results is not None else self.run()
if maximize not in stats:
raise ValueError('`maximize`, if str, must match a key in pd.Series '
'result of backtest.run()')
def maximize(stats: pd.Series, _key=maximize):
return stats[_key]
elif not callable(maximize):
raise TypeError('`maximize` must be str (a field of backtest.run() result '
'Series) or a function that accepts result Series '
'and returns a number; the higher the better')
have_constraint = bool(constraint)
if constraint is None:
def constraint(_):
return True
elif not callable(constraint):
raise TypeError("`constraint` must be a function that accepts a dict "
"of strategy parameters and returns a bool whether "
"the combination of parameters is admissible or not")
if return_optimization and method != 'skopt':
raise ValueError("return_optimization=True only valid if method='skopt'")
def _tuple(x):
return x if isinstance(x, Sequence) and not isinstance(x, str) else (x,)
for k, v in kwargs.items():
if len(_tuple(v)) == 0:
raise ValueError(f"Optimization variable '{k}' is passed no "
f"optimization values: {k}={v}")
class AttrDict(dict):
def __getattr__(self, item):
return self[item]
def _grid_size():
size = np.prod([len(_tuple(v)) for v in kwargs.values()])
if size < 10_000 and have_constraint:
size = sum(1 for p in product(*(zip(repeat(k), _tuple(v))
for k, v in kwargs.items()))
if constraint(AttrDict(p)))
return size
def _optimize_grid() -> Union[pd.Series, Tuple[pd.Series, pd.Series]]:
rand = np.random.RandomState(random_state).random
grid_frac = (1 if max_tries is None else
max_tries if 0 < max_tries <= 1 else
max_tries / _grid_size())
param_combos = [dict(params) # back to dict so it pickles
for params in (AttrDict(params)
for params in product(*(zip(repeat(k), _tuple(v))
for k, v in kwargs.items())))
if constraint(params) # type: ignore
and rand() <= grid_frac]
if not param_combos:
raise ValueError('No admissible parameter combinations to test')
if len(param_combos) > 300:
warnings.warn(f'Searching for best of {len(param_combos)} configurations.',
stacklevel=2)
heatmap = pd.Series(np.nan,
name=maximize_key,
index=pd.MultiIndex.from_tuples(
[p.values() for p in param_combos],
names=next(iter(param_combos)).keys()))
def _batch(seq):
n = np.clip(int(len(seq) // (os.cpu_count() or 1)), 1, 300)
for i in range(0, len(seq), n):
yield seq[i:i + n]
# Save necessary objects into "global" state; pass into concurrent executor
# (and thus pickle) nothing but two numbers; receive nothing but numbers.
# With start method "fork", children processes will inherit parent address space
# in a copy-on-write manner, achieving better performance/RAM benefit.
backtest_uuid = np.random.random()
param_batches = list(_batch(param_combos))
Backtest._mp_backtests[backtest_uuid] = (self, param_batches, maximize) # type: ignore
try:
# If multiprocessing start method is 'fork' (i.e. on POSIX), use
# a pool of processes to compute results in parallel.
# Otherwise (i.e. on Windos), sequential computation will be "faster".
if mp.get_start_method(allow_none=False) == 'fork':
with ProcessPoolExecutor() as executor:
futures = [executor.submit(Backtest._mp_task, backtest_uuid, i)
for i in range(len(param_batches))]
for future in _tqdm(as_completed(futures), total=len(futures),
desc='Backtest.optimize'):
batch_index, values = future.result()
for value, params in zip(values, param_batches[batch_index]):
heatmap[tuple(params.values())] = value
else:
if os.name == 'posix':
warnings.warn("For multiprocessing support in `Backtest.optimize()` "
"set multiprocessing start method to 'fork'.")
for batch_index in _tqdm(range(len(param_batches))):
_, values = Backtest._mp_task(backtest_uuid, batch_index)
for value, params in zip(values, param_batches[batch_index]):
heatmap[tuple(params.values())] = value
finally:
del Backtest._mp_backtests[backtest_uuid]
best_params = heatmap.idxmax()
if pd.isnull(best_params):
# No trade was made in any of the runs. Just make a random
# run so we get some, if empty, results
stats = self.run(**param_combos[0])
else:
stats = self.run(**dict(zip(heatmap.index.names, best_params)))
if return_heatmap:
return stats, heatmap
return stats
def _optimize_skopt() -> Union[pd.Series,
Tuple[pd.Series, pd.Series],
Tuple[pd.Series, pd.Series, dict]]:
try:
from skopt import forest_minimize
from skopt.space import Integer, Real, Categorical
from skopt.utils import use_named_args
from skopt.callbacks import DeltaXStopper
from skopt.learning import ExtraTreesRegressor
except ImportError:
raise ImportError("Need package 'scikit-optimize' for method='skopt'. "
"pip install scikit-optimize")
nonlocal max_tries
max_tries = (200 if max_tries is None else
max(1, int(max_tries * _grid_size())) if 0 < max_tries <= 1 else
max_tries)
dimensions = []
for key, values in kwargs.items():
values = np.asarray(values)
if values.dtype.kind in 'mM': # timedelta, datetime64
# these dtypes are unsupported in skopt, so convert to raw int
# TODO: save dtype and convert back later
values = values.astype(int)
if values.dtype.kind in 'iumM':
dimensions.append(Integer(low=values.min(), high=values.max(), name=key))
elif values.dtype.kind == 'f':
dimensions.append(Real(low=values.min(), high=values.max(), name=key))
else:
dimensions.append(Categorical(values.tolist(), name=key, transform='onehot'))
# Avoid recomputing re-evaluations:
# "The objective has been evaluated at this point before."
# https://github.com/scikit-optimize/scikit-optimize/issues/302
memoized_run = lru_cache()(lambda tup: self.run(**dict(tup)))
# np.inf/np.nan breaks sklearn, np.finfo(float).max breaks skopt.plots.plot_objective
INVALID = 1e300
progress = iter(_tqdm(repeat(None), total=max_tries, desc='Backtest.optimize'))
@use_named_args(dimensions=dimensions)
def objective_function(**params):
next(progress)
# Check constraints
# TODO: Adjust after https://github.com/scikit-optimize/scikit-optimize/pull/971
if not constraint(AttrDict(params)):
return INVALID
res = memoized_run(tuple(params.items()))
value = -maximize(res)
if np.isnan(value):
return INVALID
return value
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore', 'The objective has been evaluated at this point before.')
res = forest_minimize(
func=objective_function,
dimensions=dimensions,
n_calls=max_tries,
base_estimator=ExtraTreesRegressor(n_estimators=20, min_samples_leaf=2),
acq_func='LCB',
kappa=3,
n_initial_points=min(max_tries, 20 + 3 * len(kwargs)),
initial_point_generator='lhs', # 'sobel' requires n_initial_points ~ 2**N
callback=DeltaXStopper(9e-7),
random_state=random_state)
stats = self.run(**dict(zip(kwargs.keys(), res.x)))
output = [stats]
if return_heatmap:
heatmap = pd.Series(dict(zip(map(tuple, res.x_iters), -res.func_vals)),
name=maximize_key)
heatmap.index.names = kwargs.keys()
heatmap = heatmap[heatmap != -INVALID]
heatmap.sort_index(inplace=True)
output.append(heatmap)
if return_optimization:
valid = res.func_vals != INVALID
res.x_iters = list(compress(res.x_iters, valid))
res.func_vals = res.func_vals[valid]
output.append(res)
return stats if len(output) == 1 else tuple(output)
if method == 'grid':
output = _optimize_grid()
elif method == 'skopt':
output = _optimize_skopt()
else:
raise ValueError(f"Method should be 'grid' or 'skopt', not {method!r}")
return output
@staticmethod
def _mp_task(backtest_uuid, batch_index):
bt, param_batches, maximize_func = Backtest._mp_backtests[backtest_uuid]
return batch_index, [maximize_func(stats) if stats['# Trades'] else np.nan
for stats in (bt.run(**params)
for params in param_batches[batch_index])]
_mp_backtests: Dict[float, Tuple['Backtest', List, Callable]] = {}
@staticmethod
def _compute_drawdown_duration_peaks(dd: pd.Series):
iloc = np.unique(np.r_[(dd == 0).values.nonzero()[0], len(dd) - 1])
iloc = pd.Series(iloc, index=dd.index[iloc])
df = iloc.to_frame('iloc').assign(prev=iloc.shift())
df = df[df['iloc'] > df['prev'] + 1].astype(int)
# If no drawdown since no trade, avoid below for pandas sake and return nan series
if not len(df):
return (dd.replace(0, np.nan),) * 2
df['duration'] = df['iloc'].map(dd.index.__getitem__) - df['prev'].map(dd.index.__getitem__)
df['peak_dd'] = df.apply(lambda row: dd.iloc[row['prev']:row['iloc'] + 1].max(), axis=1)
df = df.reindex(dd.index)
return df['duration'], df['peak_dd']
def _compute_stats(self, broker: _Broker, strategy: Strategy) -> pd.Series:
data = self._data
index = data.index
equity = pd.Series(broker._equity).bfill().fillna(broker._cash).values
dd = 1 - equity / np.maximum.accumulate(equity)
dd_dur, dd_peaks = self._compute_drawdown_duration_peaks(pd.Series(dd, index=index))
equity_df = pd.DataFrame({
'Equity': equity,
'DrawdownPct': dd,
'DrawdownDuration': dd_dur},
index=index)
trades = broker.closed_trades
trades_df = pd.DataFrame({
'Size': [t.size for t in trades],
'EntryBar': [t.entry_bar for t in trades],
'ExitBar': [t.exit_bar for t in trades],
'EntryPrice': [t.entry_price for t in trades],
'ExitPrice': [t.exit_price for t in trades],
'PnL': [t.pl for t in trades],
'ReturnPct': [t.pl_pct for t in trades],
'EntryTime': [t.entry_time for t in trades],
'ExitTime': [t.exit_time for t in trades],
})
trades_df['Duration'] = trades_df['ExitTime'] - trades_df['EntryTime']
pl = trades_df['PnL']
returns = trades_df['ReturnPct']
durations = trades_df['Duration']
def _round_timedelta(value, _period=_data_period(index)):
if not isinstance(value, pd.Timedelta):
return value
resolution = getattr(_period, 'resolution_string', None) or _period.resolution
return value.ceil(resolution)
s = pd.Series(dtype=object)
s.loc['Start'] = index[0]
s.loc['End'] = index[-1]
s.loc['Duration'] = s.End - s.Start
have_position = np.repeat(0, len(index))
for t in trades:
have_position[t.entry_bar:t.exit_bar + 1] = 1 # type: ignore
s.loc['Exposure Time [%]'] = have_position.mean() * 100 # In "n bars" time, not index time
s.loc['Equity Final [$]'] = equity[-1]
s.loc['Equity Peak [$]'] = equity.max()
s.loc['Return [%]'] = (equity[-1] - equity[0]) / equity[0] * 100
c = data.Close.values
s.loc['Buy & Hold Return [%]'] = (c[-1] - c[0]) / c[0] * 100 # long-only return
def geometric_mean(returns):
returns = returns.fillna(0) + 1
return (0 if np.any(returns <= 0) else
np.exp(np.log(returns).sum() / (len(returns) or np.nan)) - 1)
day_returns = gmean_day_return = np.array(np.nan)
annual_trading_days = np.nan
if isinstance(index, pd.DatetimeIndex):
day_returns = equity_df['Equity'].resample('D').last().dropna().pct_change()
gmean_day_return = geometric_mean(day_returns)
annual_trading_days = float(
365 if index.dayofweek.to_series().between(5, 6).mean() > 2/7 * .6 else
252)
# Annualized return and risk metrics are computed based on the (mostly correct)
# assumption that the returns are compounded. See: https://dx.doi.org/10.2139/ssrn.3054517
# Our annualized return matches `empyrical.annual_return(day_returns)` whereas
# our risk doesn't; they use the simpler approach below.
annualized_return = (1 + gmean_day_return)**annual_trading_days - 1
s.loc['Return (Ann.) [%]'] = annualized_return * 100
s.loc['Volatility (Ann.) [%]'] = np.sqrt((day_returns.var(ddof=int(bool(day_returns.shape))) + (1 + gmean_day_return)**2)**annual_trading_days - (1 + gmean_day_return)**(2*annual_trading_days)) * 100 # noqa: E501
# s.loc['Return (Ann.) [%]'] = gmean_day_return * annual_trading_days * 100
# s.loc['Risk (Ann.) [%]'] = day_returns.std(ddof=1) * np.sqrt(annual_trading_days) * 100
# Our Sharpe mismatches `empyrical.sharpe_ratio()` because they use arithmetic mean return
# and simple standard deviation
s.loc['Sharpe Ratio'] = np.clip(s.loc['Return (Ann.) [%]'] / (s.loc['Volatility (Ann.) [%]'] or np.nan), 0, np.inf) # noqa: E501
# Our Sortino mismatches `empyrical.sortino_ratio()` because they use arithmetic mean return
s.loc['Sortino Ratio'] = np.clip(annualized_return / (np.sqrt(np.mean(day_returns.clip(-np.inf, 0)**2)) *
|
np.sqrt(annual_trading_days)
|
numpy.sqrt
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from functools import lru_cache
import os
import vtk
import numpy as np
import vedo
from vedo import settings
import vedo.utils as utils
from vedo.colors import printc, getColor, colorMap, cmaps_names
from vedo.mesh import Mesh, merge
from vedo.pointcloud import Points
from vedo.picture import Picture
from vedo.settings import font_parameters
from deprecated import deprecated
__doc__ = ("""Submodule to generate basic geometric shapes.""" + vedo.docs._defs)
__all__ = [
"Marker",
"Line",
"DashedLine",
"RoundedLine",
"Tube",
"Lines",
"Spline",
"KSpline",
"CSpline",
"Bezier",
"Brace",
"NormalLines",
"Ribbon",
"Arrow",
"Arrows",
"Arrow2D",
"Arrows2D",
"FlatArrow",
"Polygon",
"Rectangle",
"Disc",
"Circle",
"Arc",
"Star",
"Star3D",
"Cross3D",
"Sphere",
"Spheres",
"Earth",
"Ellipsoid",
"Grid",
"TessellatedBox",
"Plane",
"Box",
"Cube",
"Spring",
"Cylinder",
"Cone",
"Pyramid",
"Torus",
"Paraboloid",
"Hyperboloid",
"TextBase",
"Text",
"Text3D",
"Text2D",
"CornerAnnotation",
"Latex",
"Glyph",
"Tensors",
"ParametricShape",
"ConvexHull",
"VedoLogo",
]
##############################################
_reps = [
("\nabla", "∇"),
("\infty", "∞"),
("\rightarrow", "→"),
("\lefttarrow", "←"),
("\partial", "∂"),
("\sqrt", "√"),
("\approx", "≈"),
("\neq", "≠"),
("\leq", "≤"),
("\geq", "≥"),
("\foreach", "∀"),
("\permille", "‰"),
("\euro", "€"),
("\dot", "·"),
("\varnothing", "∅"),
("\int", "∫"),
("\pm", "±"),
("\times","×"),
("\Gamma", "Γ"),
("\Delta", "Δ"),
("\Theta", "Θ"),
("\Lambda", "Λ"),
("\Pi", "Π"),
("\Sigma", "Σ"),
("\Phi", "Φ"),
("\Chi", "X"),
("\Xi", "Ξ"),
("\Psi", "Ψ"),
("\Omega", "Ω"),
("\alpha", "α"),
("\beta", "β"),
("\gamma", "γ"),
("\delta", "δ"),
("\epsilon", "ε"),
("\zeta", "ζ"),
("\eta", "η"),
("\theta", "θ"),
("\kappa", "κ"),
("\lambda", "λ"),
("\mu", "μ"),
("\lowerxi", "ξ"),
("\nu", "ν"),
("\pi", "π"),
("\rho", "ρ"),
("\sigma", "σ"),
("\tau", "τ"),
("\varphi", "φ"),
("\phi", "φ"),
("\chi", "χ"),
("\psi", "ψ"),
("\omega", "ω"),
("\circ", "°"),
("\onehalf", "½"),
("\onefourth", "¼"),
("\threefourths", "¾"),
("\^1", "¹"),
("\^2", "²"),
("\^3", "³"),
("\,", "~"),
]
########################################################################
def Marker(symbol, pos=(0, 0, 0), c='lb', alpha=1, s=0.1, filled=True):
"""
Generate a marker shape.
Can be used in association with ``Glyph``.
"""
if isinstance(symbol, int):
symbs = ['.', 'p','*','h','D','d','o','v','^','>','<','s', 'x', 'a']
symbol = symbol % 14
symbol = symbs[symbol]
if symbol == '.':
mesh = Polygon(nsides=24, r=s*0.75)
elif symbol == 'p':
mesh = Polygon(nsides=5, r=s)
elif symbol == '*':
mesh = Star(r1=0.65*s*1.1, r2=s*1.1, line=not filled)
elif symbol == 'h':
mesh = Polygon(nsides=6, r=s)
elif symbol == 'D':
mesh = Polygon(nsides=4, r=s)
elif symbol == 'd':
mesh = Polygon(nsides=4, r=s*1.1).scale([0.5,1,1])
elif symbol == 'o':
mesh = Polygon(nsides=24, r=s*0.75)
elif symbol == 'v':
mesh = Polygon(nsides=3, r=s).rotateZ(180)
elif symbol == '^':
mesh = Polygon(nsides=3, r=s)
elif symbol == '>':
mesh = Polygon(nsides=3, r=s).rotateZ(-90)
elif symbol == '<':
mesh = Polygon(nsides=3, r=s).rotateZ(90)
elif symbol == 's':
mesh = Polygon(nsides=4, r=s).rotateZ(45)
elif symbol == 'x':
mesh = Text3D('+', pos=(0,0,0), s=s*2.6, justify='center', depth=0)
mesh.rotateZ(45)
elif symbol == 'a':
mesh = Text3D('*', pos=(0,0,0), s=s*3, justify='center', depth=0)
else:
mesh = Text3D(symbol, pos=(0,0,0), s=s*2, justify='center', depth=0)
mesh.flat().lighting('off').wireframe(not filled).c(c).alpha(alpha)
if len(pos) == 2:
pos = (pos[0], pos[1], 0)
mesh.SetPosition(pos)
mesh.name = "Marker"
return mesh
class Star3D(Mesh):
"""
Build a 3D star shape of 5 cusps, mainly useful as a 3D marker.
"""
def __init__(self, pos=(0,0,0), r=1.0, thickness=0.1, c="blue4", alpha=1):
if len(pos) == 2:
pos = (pos[0], pos[1], 0)
pts = ((1.34, 0., -0.37), (5.75e-3, -0.588, thickness/10), (0.377, 0.,-0.38),
(0.0116, 0., -1.35), (-0.366, 0., -0.384), (-1.33, 0., -0.385),
(-0.600, 0., 0.321), (-0.829, 0., 1.19), (-1.17e-3, 0., 0.761),
(0.824, 0., 1.20), (0.602, 0., 0.328), (6.07e-3, 0.588, thickness/10))
fcs = [[0, 1, 2], [0, 11,10], [2, 1, 3], [2, 11, 0], [3, 1, 4], [3, 11, 2],
[4, 1, 5], [4, 11, 3], [5, 1, 6], [5, 11, 4], [6, 1, 7], [6, 11, 5],
[7, 1, 8], [7, 11, 6], [8, 1, 9], [8, 11, 7], [9, 1,10], [9, 11, 8],
[10,1, 0],[10,11, 9]]
Mesh.__init__(self, [pts, fcs], c, alpha)
self.rotateX(90).scale(r).lighting('shiny')
self.SetPosition(pos)
self.name = "Star3D"
def Cross3D(pos=(0,0,0), s=1.0, thickness=0.3, c="b", alpha=1):
"""
Build a 3D cross shape, mainly useful as a 3D marker.
"""
c1 = Cylinder(r=thickness*s, height=2*s)
c2 = Cylinder(r=thickness*s, height=2*s).rotateX(90)
c3 = Cylinder(r=thickness*s, height=2*s).rotateY(90)
cr = merge(c1,c2,c3).color(c).alpha(alpha)
cr.SetPosition(pos)
cr.name = "Cross3D"
return cr
class Glyph(Mesh):
"""
At each vertex of a mesh, another mesh - a `'glyph'` - is shown with
various orientation options and coloring.
The input ``mesh`` can also be a simple list of 2D or 3D coordinates.
Color can be specified as a colormap which maps the size of the orientation
vectors in `orientationArray`.
:param orientationArray: list of vectors, ``vtkAbstractArray``
or the name of an already existing points array.
:type orientationArray: list, str, vtkAbstractArray
:param bool scaleByScalar: glyph mesh is scaled by the active scalars.
:param bool scaleByVectorSize: glyph mesh is scaled by the size of the vectors.
:param bool scaleByVectorComponents: glyph mesh is scaled by the 3 vectors components.
:param bool colorByScalar: glyph mesh is colored based on the scalar value.
:param bool colorByVectorSize: glyph mesh is colored based on the vector size.
:param float tol: set a minimum separation between two close glyphs
(not compatible with `orientationArray` being a list).
|glyphs.py|_ |glyphs_arrows.py|_
|glyphs| |glyphs_arrows|
"""
def __init__(self,
mesh,
glyphObj,
orientationArray=None,
scaleByScalar=False,
scaleByVectorSize=False,
scaleByVectorComponents=False,
colorByScalar=False,
colorByVectorSize=False,
tol=0,
c='k8',
alpha=1,
):
if utils.isSequence(mesh):
# create a cloud of points
poly = Points(mesh).polydata()
elif isinstance(mesh, vtk.vtkPolyData):
poly = mesh
else:
poly = mesh.polydata()
if tol:
cleanPolyData = vtk.vtkCleanPolyData()
cleanPolyData.SetInputData(poly)
cleanPolyData.SetTolerance(tol)
cleanPolyData.Update()
poly = cleanPolyData.GetOutput()
if isinstance(glyphObj, Points):
glyphObj = glyphObj.polydata()
cmap=''
if c in cmaps_names:
cmap = c
c = None
elif utils.isSequence(c): # user passing an array of point colors
ucols = vtk.vtkUnsignedCharArray()
ucols.SetNumberOfComponents(3)
ucols.SetName("glyph_RGB")
for col in c:
cl = getColor(col)
ucols.InsertNextTuple3(cl[0]*255, cl[1]*255, cl[2]*255)
poly.GetPointData().AddArray(ucols)
poly.GetPointData().SetActiveScalars("glyph_RGB")
c = None
gly = vtk.vtkGlyph3D()
gly.SetInputData(poly)
gly.SetSourceData(glyphObj)
if scaleByScalar:
gly.SetScaleModeToScaleByScalar()
elif scaleByVectorSize:
gly.SetScaleModeToScaleByVector()
elif scaleByVectorComponents:
gly.SetScaleModeToScaleByVectorComponents()
else:
gly.SetScaleModeToDataScalingOff()
if colorByVectorSize:
gly.SetVectorModeToUseVector()
gly.SetColorModeToColorByVector()
elif colorByScalar:
gly.SetColorModeToColorByScalar()
else:
gly.SetColorModeToColorByScale()
if orientationArray is not None:
gly.OrientOn()
if isinstance(orientationArray, str):
if orientationArray.lower() == "normals":
gly.SetVectorModeToUseNormal()
else: # passing a name
poly.GetPointData().SetActiveVectors(orientationArray)
gly.SetInputArrayToProcess(0, 0, 0, 0, orientationArray)
gly.SetVectorModeToUseVector()
elif utils.isSequence(orientationArray) and not tol: # passing a list
varr = vtk.vtkFloatArray()
varr.SetNumberOfComponents(3)
varr.SetName("glyph_vectors")
for v in orientationArray:
varr.InsertNextTuple(v)
poly.GetPointData().AddArray(varr)
poly.GetPointData().SetActiveVectors("glyph_vectors")
gly.SetInputArrayToProcess(0, 0, 0, 0, "glyph_vectors")
gly.SetVectorModeToUseVector()
gly.Update()
Mesh.__init__(self, gly.GetOutput(), c, alpha)
self.flat()
if cmap:
lut = vtk.vtkLookupTable()
lut.SetNumberOfTableValues(512)
lut.Build()
for i in range(512):
r, g, b = colorMap(i, cmap, 0, 512)
lut.SetTableValue(i, r, g, b, 1)
self.mapper().SetLookupTable(lut)
self.mapper().ScalarVisibilityOn()
self.mapper().SetScalarModeToUsePointData()
if gly.GetOutput().GetPointData().GetScalars():
rng = gly.GetOutput().GetPointData().GetScalars().GetRange()
self.mapper().SetScalarRange(rng[0], rng[1])
self.name = "Glyph"
class Tensors(Mesh):
"""Geometric representation of tensors defined on a domain or set of points.
Tensors can be scaled and/or rotated according to the source at eache input point.
Scaling and rotation is controlled by the eigenvalues/eigenvectors of the symmetrical part
of the tensor as follows:
For each tensor, the eigenvalues (and associated eigenvectors) are sorted
to determine the major, medium, and minor eigenvalues/eigenvectors.
The eigenvalue decomposition only makes sense for symmetric tensors,
hence the need to only consider the symmetric part of the tensor,
which is 1/2*(T+T.transposed()).
:param str source: preset type of source shape
['ellipsoid', 'cylinder', 'cube' or any specified ``Mesh``]
:param bool useEigenValues: color source glyph using the eigenvalues or by scalars.
:param bool threeAxes: if `False` scale the source in the x-direction,
the medium in the y-direction, and the minor in the z-direction.
Then, the source is rotated so that the glyph's local x-axis lies
along the major eigenvector, y-axis along the medium eigenvector, and z-axis along the minor.
If `True` three sources are produced, each of them oriented along an eigenvector
and scaled according to the corresponding eigenvector.
:param bool isSymmetric: If `True` each source glyph is mirrored (2 or 6 glyphs will be produced).
The x-axis of the source glyph will correspond to the eigenvector on output.
:param float length: distance from the origin to the tip of the source glyph along the x-axis
:param float scale: scaling factor of the source glyph.
:param float maxScale: clamp scaling at this factor.
|tensors| |tensors.py|_ |tensor_grid.py|_
"""
def __init__(self, domain, source='ellipsoid', useEigenValues=True, isSymmetric=True,
threeAxes=False, scale=1, maxScale=None, length=None,
c=None, alpha=1):
if isinstance(source, Points):
src = source.normalize().polydata(False)
else:
if 'ellip' in source:
src = vtk.vtkSphereSource()
src.SetPhiResolution(24)
src.SetThetaResolution(12)
elif 'cyl' in source:
src = vtk.vtkCylinderSource()
src.SetResolution(48)
src.CappingOn()
elif source == 'cube':
src = vtk.vtkCubeSource()
src.Update()
tg = vtk.vtkTensorGlyph()
if isinstance(domain, vtk.vtkPolyData):
tg.SetInputData(domain)
else:
tg.SetInputData(domain.GetMapper().GetInput())
tg.SetSourceData(src.GetOutput())
if c is None:
tg.ColorGlyphsOn()
else:
tg.ColorGlyphsOff()
tg.SetSymmetric(int(isSymmetric))
if length is not None:
tg.SetLength(length)
if useEigenValues:
tg.ExtractEigenvaluesOn()
tg.SetColorModeToEigenvalues()
else:
tg.SetColorModeToScalars()
tg.SetThreeGlyphs(threeAxes)
tg.ScalingOn()
tg.SetScaleFactor(scale)
if maxScale is None:
tg.ClampScalingOn()
maxScale = scale*10
tg.SetMaxScaleFactor(maxScale)
tg.Update()
tgn = vtk.vtkPolyDataNormals()
tgn.SetInputData(tg.GetOutput())
tgn.Update()
Mesh.__init__(self, tgn.GetOutput(), c, alpha)
self.name = "Tensors"
class Line(Mesh):
"""
Build the line segment between points `p0` and `p1`.
If `p0` is a list of points returns the line connecting them.
A 2D set of coords can also be passed as p0=[x..], p1=[y..].
:param bool closed: join last to first point
:param c: color name, number, or list of [R,G,B] colors.
:type c: int, str, list
:param float alpha: transparency in range [0,1].
:param lw: line width.
:param int res: resolution, number of points along the line
(only relevant if only 2 points are specified)
"""
def __init__(self, p0, p1=None, closed=False, c="k4", alpha=1, lw=1, res=2):
if isinstance(p1, vtk.vtkActor):
p1 = p1.GetPosition()
if isinstance(p0, vtk.vtkActor):
p0 = p0.GetPosition()
if isinstance(p0, Points):
p0 = p0.points()
self.slope = [] # filled by analysis.fitLine
self.center = []
self.variances = []
self.coefficients = [] # filled by pyplot.fit()
self.covarianceMatrix = []
self.coefficients = []
self.coefficientErrors = []
self.MonteCarloCoefficients = []
self.reducedChi2 = -1
self.ndof = 0
self.dataSigma = 0
self.errorLines = []
self.errorBand = None
self.res=res
# detect if user is passing a 2D list of points as p0=xlist, p1=ylist:
if len(p0) > 3:
if not utils.isSequence(p0[0]) and not utils.isSequence(p1[0]) and len(p0)==len(p1):
# assume input is 2D xlist, ylist
p0 = np.stack((p0, p1), axis=1)
p1 = None
if len(p0[0]) == 2: # make it 3d
p0 = np.c_[np.array(p0), np.zeros(len(p0))]
# detect if user is passing a list of points:
if utils.isSequence(p0[0]):
if len(p0[0]) == 2: # make it 3d
p0 = np.c_[np.array(p0), np.zeros(len(p0))]
ppoints = vtk.vtkPoints() # Generate the polyline
ppoints.SetData(utils.numpy2vtk(p0, dtype=float))
lines = vtk.vtkCellArray()
npt = len(p0)
if closed:
lines.InsertNextCell(npt+1)
else:
lines.InsertNextCell(npt)
for i in range(npt):
lines.InsertCellPoint(i)
if closed:
lines.InsertCellPoint(0)
poly = vtk.vtkPolyData()
poly.SetPoints(ppoints)
poly.SetLines(lines)
top = p0[-1]
base = p0[0]
self.res = 2
else: # or just 2 points to link
lineSource = vtk.vtkLineSource()
if len(p0) == 2: # make it 3d
p0 = [p0[0],p0[1],0]
if len(p1) == 2:
p1 = [p1[0],p1[1],0]
lineSource.SetPoint1(p0)
lineSource.SetPoint2(p1)
lineSource.SetResolution(res-1)
lineSource.Update()
poly = lineSource.GetOutput()
top = np.array(p1)
base = np.array(p0)
Mesh.__init__(self, poly, c, alpha)
self.lw(lw).lighting('off')
self.PickableOff()
self.DragableOff()
self.base = base
self.top = top
self.name = "Line"
def eval(self, x):
"""
Calculate the position of an intermediate point
as a fraction of the length of the line,
being x=0 the first point and x=1 the last point.
This corresponds to an imaginary point that travels along the line
at constant speed.
Can be used in conjunction with `linInterpolate()`
to map any range to the [0,1] range.
"""
distance1 = 0.
length = self.length()
pts = self.points()
for i in range(1, len(pts)):
p0 = pts[i-1]
p1 = pts[i]
seg = p1-p0
distance0 = distance1
distance1 += np.linalg.norm(seg)
w1 = distance1/length
if w1 >= x:
break
w0 = distance0/length
v = p0 + seg*(x-w0)/(w1-w0)
return v
def pattern(self, stipple, repeats=10):
"""
Define a stipple pattern for dashing the line.
Pass the stipple pattern as a string like '- - -'.
Repeats controls the number of times the pattern repeats in a single segment.
Examples are: '- -', '-- - --', etc.
The resolution of the line (nr of points) can affect how pattern will show up.
:Example:
.. code-block:: python
from vedo import Line
pts = [[1, 0, 0], [5, 2, 0], [3, 3, 1]]
ln = Line(pts, c='r', lw=5).pattern('- -', repeats=10)
ln.show(axes=1)
"""
stipple = str(stipple) * int(2*repeats)
dimension = len(stipple)
image = vtk.vtkImageData()
image.SetDimensions(dimension, 1, 1)
image.AllocateScalars(vtk.VTK_UNSIGNED_CHAR, 4)
image.SetExtent(0, dimension-1, 0, 0, 0, 0)
i_dim = 0
while i_dim < dimension:
for i in range(dimension):
image.SetScalarComponentFromFloat(i_dim, 0, 0, 0, 255)
image.SetScalarComponentFromFloat(i_dim, 0, 0, 1, 255)
image.SetScalarComponentFromFloat(i_dim, 0, 0, 2, 255)
if stipple[i] == ' ':
image.SetScalarComponentFromFloat(i_dim, 0, 0, 3, 0)
else:
image.SetScalarComponentFromFloat(i_dim, 0, 0, 3, 255)
i_dim += 1
polyData = self.polydata(False)
# Create texture coordinates
tcoords = vtk.vtkDoubleArray()
tcoords.SetName("TCoordsStippledLine")
tcoords.SetNumberOfComponents(1)
tcoords.SetNumberOfTuples(polyData.GetNumberOfPoints())
for i in range(polyData.GetNumberOfPoints()):
tcoords.SetTypedTuple(i, [i/2])
polyData.GetPointData().SetTCoords(tcoords)
polyData.GetPointData().Modified()
texture = vtk.vtkTexture()
texture.SetInputData(image)
texture.InterpolateOff()
texture.RepeatOn()
self.SetTexture(texture)
return self
def length(self):
"""Calculate length of the line."""
distance = 0.
pts = self.points()
for i in range(1, len(pts)):
distance += np.linalg.norm(pts[i]-pts[i-1])
return distance
def sweep(self, direction=(1,0,0), res=1):
"""
Sweep the Line along the specified vector direction.
Returns a Mesh surface.
Line position is updated to allow for additional sweepings.
:Example:
.. code-block:: python
from vedo import Line, show
aline = Line([(0,0,0),(1,3,0),(2,4,0)])
surf1 = aline.sweep((1,0.2,0), res=3)
surf2 = aline.sweep((0.2,0,1))
aline.color('r').lineWidth(4)
show(surf1, surf2, aline, axes=1)
"""
line = self.polydata()
rows = line.GetNumberOfPoints()
spacing = 1 / res
surface = vtk.vtkPolyData()
res += 1
numberOfPoints = rows * res
numberOfPolys = (rows - 1) * (res - 1)
points = vtk.vtkPoints()
points.Allocate(numberOfPoints)
cnt = 0
x = [0.,0.,0.]
for row in range(rows):
for col in range(res):
p = [0.,0.,0.]
line.GetPoint(row, p)
x[0] = p[0] + direction[0] * col * spacing
x[1] = p[1] + direction[1] * col * spacing
x[2] = p[2] + direction[2] * col * spacing
points.InsertPoint(cnt, x)
cnt += 1
# Generate the quads
polys = vtk.vtkCellArray()
polys.Allocate(numberOfPolys*4)
pts = [0,0,0,0]
for row in range(rows-1):
for col in range(res-1):
pts[0] = col + row * res
pts[1] = pts[0] + 1
pts[2] = pts[0] + res + 1
pts[3] = pts[0] + res
polys.InsertNextCell(4, pts)
surface.SetPoints(points)
surface.SetPolys(polys)
asurface = vedo.Mesh(surface)
prop = vtk.vtkProperty()
prop.DeepCopy(self.GetProperty())
asurface.SetProperty(prop)
asurface.property = prop
asurface.lighting('default')
self.points(self.points()+direction)
return asurface
class DashedLine(Line):
"""
Consider using `Line.pattern()` instead.
Build a dashed line segment between points `p0` and `p1`.
If `p0` is a list of points returns the line connecting them.
A 2D set of coords can also be passed as p0=[x..], p1=[y..].
:param bool closed: join last to first point
:param float spacing: relative size of the dash.
:param c: color name, number, or list of [R,G,B] colors.
:type c: int, str, list
:param float alpha: transparency in range [0,1].
:param lw: line width.
"""
def __init__(self, p0, p1=None, spacing=0.1, closed=False, c="k5", alpha=1, lw=2):
if isinstance(p1, vtk.vtkActor):
p1 = p1.GetPosition()
if isinstance(p0, vtk.vtkActor):
p0 = p0.GetPosition()
if isinstance(p0, Points):
p0 = p0.points()
# detect if user is passing a 2D list of points as p0=xlist, p1=ylist:
if len(p0) > 3:
if not utils.isSequence(p0[0]) and not utils.isSequence(p1[0]) and len(p0)==len(p1):
# assume input is 2D xlist, ylist
p0 = np.stack((p0, p1), axis=1)
p1 = None
if len(p0[0]) == 2: # make it 3d
p0 = np.c_[np.array(p0), np.zeros(len(p0))]
if closed:
p0 = np.append(p0, [p0[0]], axis=0)
if p1 is not None: # assume passing p0=[x,y]
if len(p0) == 2 and not utils.isSequence(p0[0]):
p0 = (p0[0], p0[1], 0)
if len(p1) == 2 and not utils.isSequence(p1[0]):
p1 = (p1[0], p1[1], 0)
# detect if user is passing a list of points:
if utils.isSequence(p0[0]):
listp = p0
else: # or just 2 points to link
listp = [p0, p1]
listp = np.array(listp)
if listp.shape[1]==2:
listp = np.c_[listp, np.zeros(listp.shape[0])]
xmn = np.min(listp, axis=0)
xmx = np.max(listp, axis=0)
dlen = np.linalg.norm(xmx-xmn)*np.clip(spacing, 0.01,1.0)/10
if not dlen:
Mesh.__init__(self, vtk.vtkPolyData(), c, alpha)
self.name = "DashedLine (void)"
return
qs = []
for ipt in range(len(listp)-1):
p0 = listp[ipt]
p1 = listp[ipt+1]
v = p1-p0
vdist = np.linalg.norm(v)
n1 = int(vdist/dlen)
if not n1: continue
res = 0
for i in range(n1+2):
ist = (i-0.5)/n1
if ist<0: ist=0
qi = p0 + v * (ist - res/vdist)
if ist>1:
qi = p1
res = np.linalg.norm(qi-p1)
qs.append(qi)
break
qs.append(qi)
polylns = vtk.vtkAppendPolyData()
for i,q1 in enumerate(qs):
if not i%2: continue
q0 = qs[i-1]
lineSource = vtk.vtkLineSource()
lineSource.SetPoint1(q0)
lineSource.SetPoint2(q1)
lineSource.Update()
polylns.AddInputData(lineSource.GetOutput())
polylns.Update()
Mesh.__init__(self, polylns.GetOutput(), c, alpha)
self.lw(lw).lighting('off')
self.base = listp[0]
if closed:
self.top = listp[-2]
else:
self.top = listp[-1]
self.name = "DashedLine"
def RoundedLine(pts, lw, c='gray4', alpha=1, res=10):
"""
Create a 2D line of specified thickness (in absolute units) passing through
a list of input points. Borders of the line are rounded.
Parameters
----------
pts : list
a list of points in 2D or 3D (z will be ignored).
lw : float
thickness of the line.
res : int, optional
resolution of the rounded regions. The default is 10.
Example
-------
.. code-block:: python
from vedo import *
pts = [(-4,-3),(1,1),(2,4),(4,1),(3,-1),(2,-5),(9,-3)]
ln = Line(pts, c='r', lw=2).z(0.01)
rl = RoundedLine(pts, 0.6)
show(Points(pts), ln, rl, axes=1)
"""
pts = np.asarray(pts)
if len(pts[0]) == 2: # make it 3d
pts = np.c_[pts, np.zeros(len(pts))]
def _getpts(pts, revd=False):
if revd:
pts = list(reversed(pts))
if len(pts)==2:
p0, p1 = pts
v = p1-p0
dv = np.linalg.norm(v)
nv = np.cross(v, (0,0,-1))
nv = nv/np.linalg.norm(nv)*lw
return [p0+nv, p1+nv]
ptsnew = []
for k in range(len(pts)-2):
p0 = pts[k]
p1 = pts[k+1]
p2 = pts[k+2]
v = p1-p0
u = p2-p1
du = np.linalg.norm(u)
dv = np.linalg.norm(v)
nv = np.cross(v, (0,0,-1))
nv = nv/np.linalg.norm(nv)*lw
nu = np.cross(u, (0,0,-1))
nu = nu/np.linalg.norm(nu)*lw
uv = np.cross(u,v)
if k==0:
ptsnew.append(p0+nv)
if uv[2]<=0:
alpha = np.arccos(np.dot(u,v)/du/dv)
db = lw*np.tan(alpha/2)
p1new = p1+nv -v/dv * db
ptsnew.append(p1new)
else:
p1a = p1+nv
p1b = p1+nu
for i in range(0,res+1):
pab = p1a*(res-i)/res + p1b*i/res
vpab = pab-p1
vpab = vpab/np.linalg.norm(vpab)*lw
ptsnew.append(p1+vpab)
if k == len(pts)-3:
ptsnew.append(p2+nu)
if revd:
ptsnew.append(p2-nu)
return ptsnew
ptsnew = _getpts(pts) + _getpts(pts, revd=True)
lk = Line(ptsnew).triangulate().lw(0).lighting('off')
lk.name = "RoundedLine"
return lk
class Lines(Line):
"""
Build the line segments between two lists of points `startPoints` and `endPoints`.
`startPoints` can be also passed in the form ``[[point1, point2], ...]``.
:param float scale: apply a rescaling factor to the lengths.
|lines|
.. hint:: |fitspheres2.py|_
"""
def __init__(self, startPoints, endPoints=None,
c='k4', alpha=1, lw=1, dotted=False, scale=1, res=1):
if isinstance(startPoints, Points):
startPoints = startPoints.points()
if isinstance(endPoints, Points):
endPoints = endPoints.points()
if endPoints is not None:
startPoints = np.stack((startPoints, endPoints), axis=1)
polylns = vtk.vtkAppendPolyData()
for twopts in startPoints:
lineSource = vtk.vtkLineSource()
lineSource.SetResolution(res)
if len(twopts[0])==2:
lineSource.SetPoint1(twopts[0][0], twopts[0][1], 0.0)
else:
lineSource.SetPoint1(twopts[0])
if scale == 1:
pt2 = twopts[1]
else:
vers = (np.array(twopts[1]) - twopts[0]) * scale
pt2 = np.array(twopts[0]) + vers
if len(pt2)==2:
lineSource.SetPoint2(pt2[0], pt2[1], 0.0)
else:
lineSource.SetPoint2(pt2)
polylns.AddInputConnection(lineSource.GetOutputPort())
polylns.Update()
Mesh.__init__(self, polylns.GetOutput(), c, alpha)
self.lw(lw).lighting('off')
if dotted:
self.GetProperty().SetLineStipplePattern(0xF0F0)
self.GetProperty().SetLineStippleRepeatFactor(1)
self.name = "Lines"
class Spline(Line):
"""
Find the B-Spline curve through a set of points. This curve does not necessarly
pass exactly through all the input points. Needs to import `scipy`.
Return an ``Mesh`` object.
:param float smooth: smoothing factor.
- 0 = interpolate points exactly [default].
- 1 = average point positions.
:param int degree: degree of the spline (1<degree<5)
:param str easing: control sensity of points along the spline.
Available options are
[InSine, OutSine, Sine, InQuad, OutQuad, InCubic, OutCubic,
InQuart, OutQuart, InCirc, OutCirc].
Can be used to create animations (move objects at varying speed).
See e.g.: https://easings.net
:param int res: number of points on the spline
See also: ``CSpline`` and ``KSpline``.
"""
def __init__(self, points,
smooth=0,
degree=2,
closed=False,
s=2,
res=None,
easing="",
):
from scipy.interpolate import splprep, splev
if isinstance(points, Points):
points = points.points()
if len(points[0]) == 2: # make it 3d
points = np.c_[np.array(points), np.zeros(len(points))]
per = 0
if closed:
points = np.append(points, [points[0]], axis=0)
per = 1
if res is None:
res = len(points)*10
points = np.array(points)
minx, miny, minz = np.min(points, axis=0)
maxx, maxy, maxz = np.max(points, axis=0)
maxb = max(maxx - minx, maxy - miny, maxz - minz)
smooth *= maxb / 2 # must be in absolute units
x = np.linspace(0, 1, res)
if easing:
if easing=="InSine":
x = 1 - np.cos((x * np.pi) / 2)
elif easing=="OutSine":
x = np.sin((x * np.pi) / 2)
elif easing=="Sine":
x = -(np.cos(np.pi * x) - 1) / 2
elif easing=="InQuad":
x = x*x
elif easing=="OutQuad":
x = 1 - (1 - x) * (1 - x)
elif easing=="InCubic":
x = x*x
elif easing=="OutCubic":
x = 1 - np.power(1 - x, 3)
elif easing=="InQuart":
x = x * x * x * x
elif easing=="OutQuart":
x = 1 - np.power(1 - x, 4)
elif easing=="InCirc":
x = 1 - np.sqrt(1 - np.power(x, 2))
elif easing=="OutCirc":
x = np.sqrt(1 - np.power(x - 1, 2))
else:
printc("Unkown ease mode", easing, c='r')
# find the knots
tckp, _ = splprep(points.T, task=0, s=smooth, k=degree, per=per)
# evaluate spLine, including interpolated points:
xnew, ynew, znew = splev(x, tckp)
Line.__init__(self, np.c_[xnew, ynew, znew], lw=2)
self.lighting('off')
self.name = "Spline"
class KSpline(Line):
"""
Return a Kochanek spline which runs exactly through all the input points.
See: https://en.wikipedia.org/wiki/Kochanek%E2%80%93Bartels_spline
:param float continuity: changes the sharpness in change between tangents
:param float tension: changes the length of the tangent vector
:param float bias: changes the direction of the tangent vector
:param bool closed: join last to first point to produce a closed curve
:param int res: approximate resolution of the output line.
Default is 20 times the number of input points.
See also: ``Spline`` and ``CSpline``.
|kspline|
"""
def __init__(self, points,
continuity=0, tension=0, bias=0,
closed=False, res=None):
if isinstance(points, Points):
points = points.points()
if not res: res = len(points)*20
if len(points[0]) == 2: # make it 3d
points = np.c_[np.array(points), np.zeros(len(points))]
xspline = vtk.vtkKochanekSpline()
yspline = vtk.vtkKochanekSpline()
zspline = vtk.vtkKochanekSpline()
for s in [xspline, yspline, zspline]:
if bias: s.SetDefaultBias(bias)
if tension: s.SetDefaultTension(tension)
if continuity: s.SetDefaultContinuity(continuity)
s.SetClosed(closed)
for i,p in enumerate(points):
xspline.AddPoint(i, p[0])
yspline.AddPoint(i, p[1])
if len(p)>2:
zspline.AddPoint(i, p[2])
ln = []
for pos in np.linspace(0, len(points), res):
x = xspline.Evaluate(pos)
y = yspline.Evaluate(pos)
z = 0
if len(p)>2:
z = zspline.Evaluate(pos)
ln.append((x,y,z))
Line.__init__(self, ln, lw=2)
self.clean()
self.lighting('off')
self.name = "KSpline"
self.base = np.array(points[0])
self.top = np.array(points[-1])
class CSpline(Line):
"""
Return a Cardinal spline which runs exactly through all the input points.
:param bool closed: join last to first point to produce a closed curve
:param int res: approximateresolution of the output line.
Default is 20 times the number of input points.
See also: ``Spline`` and ``KSpline``.
"""
def __init__(self, points, closed=False, res=None):
if isinstance(points, Points):
points = points.points()
if not res: res = len(points)*20
if len(points[0]) == 2: # make it 3d
points = np.c_[np.array(points), np.zeros(len(points))]
xspline = vtk.vtkCardinalSpline()
yspline = vtk.vtkCardinalSpline()
zspline = vtk.vtkCardinalSpline()
for s in [xspline, yspline, zspline]:
s.SetClosed(closed)
for i,p in enumerate(points):
xspline.AddPoint(i, p[0])
yspline.AddPoint(i, p[1])
if len(p)>2:
zspline.AddPoint(i, p[2])
ln = []
for pos in np.linspace(0, len(points), res):
x = xspline.Evaluate(pos)
y = yspline.Evaluate(pos)
z = 0
if len(p)>2:
z = zspline.Evaluate(pos)
ln.append((x,y,z))
Line.__init__(self, ln, lw=2)
self.clean()
self.lighting('off')
self.name = "CSpline"
self.base = np.array(points[0])
self.top = np.array(points[-1])
def Bezier(points, res=None):
"""Generate the Bezier line that links the first to the last point.
:Example:
.. code-block:: python
from vedo import *
import numpy as np
pts = np.random.randn(25,3)
for i,p in enumerate(pts):
p += [5*i, 15*sin(i/2), i*i*i/200]
show(Points(pts), Bezier(pts), axes=1)
|bezier|
"""
N = len(points)
if res is None:
res = 10 * N
t = np.linspace(0, 1, num=res)
bcurve = np.zeros((res, len(points[0])))
def binom(n, k):
b = 1
for t in range(1, min(k, n-k)+1):
b *= n/t
n -= 1
return b
def bernstein(n, k):
coeff = binom(n, k)
def _bpoly(x):
return coeff * x**k * (1-x)**(n-k)
return _bpoly
for ii in range(N):
b = bernstein(N-1, ii)(t)
bcurve += np.outer(b, points[ii])
ln = Line(bcurve, lw=2)
ln.name = "BezierLine"
return ln
def Brace(q1, q2, style='}', pad=0.2, thickness=1,
font='Kanopus', comment='', s=1, c='k1', alpha=1):
"""
Create a brace (bracket) shape which spans from point q1 to point q2.
Parameters
----------
q1 : list
point 1.
q2 : list
point 2.
style : str, optional
style of the bracket, eg. {}, [], (), <>. The default is '{'.
pad : float, optional
padding space in percent. The default is 0.2.
thickness : float, optional
thickness factor for the bracket. The default is 1.
font : str, optional
font type. The default is 'Kanopus'.
comment : str, optional
additional text to appear next to the bracket. The default is ''.
s : float, optional
scale factor for the comment
|scatter3| |scatter3.py|_
"""
if isinstance(q1, vtk.vtkActor):
q1 = q1.GetPosition()
if isinstance(q2, vtk.vtkActor):
q2 = q2.GetPosition()
if len(q1)==2:
q1 = [q1[0],q1[1],0.0]
if len(q2)==2:
q2 = [q2[0],q2[1],0.0]
q1 = np.array(q1)
q2 = np.array(q2)
q2[2] = q1[2]
if style not in '{}[]()<>|I':
printc("Warning in Brace(): unknown style", style, c='y')
br = Text3D(style, c=c, alpha=alpha, font=font)
x0,x1, y0,y1, _,_ = br.bounds()
flip = False
if style in ['}',']',')','>']:
flip = True
if flip:
br.origin(x0-pad*(x1-x0),y0,0)
else:
br.origin(x1+pad*(x1-x0),y0,0)
angle = np.arctan2( q2[1]-q1[1], q2[0]-q1[0] )*57.3 - 90
br.rotateZ(angle)
fy = 1/(y1-y0)*np.linalg.norm(q1-q2)
fx = fy*0.3*thickness
br.scale([fx,fy,1])
br.pos(q1-br.origin())
if comment:
extra_angle = 90
just = 'center-bottom'
if q2[0]-q1[0] < 0:
extra_angle = -90
just = 'center-top'
if flip:
just = 'center-top'
if q2[0]-q1[0] < 0:
just = 'center-bottom'
cmt = Text3D(comment, c=c, alpha=alpha, font=font, justify=just)
cx0,cx1, cy0,cy1, _,_ = cmt.bounds()
if len(comment)>1:
cmt.rotateZ(angle+extra_angle)
cmt.scale(1/(cy1-cy0)*np.linalg.norm(q1-q2)/6*s)
cm = br.centerOfMass()
cmt.pos(cm+(cm-(q1+q2)/2)*1.4)
br = merge(br, cmt)
br.name = "Brace"
return br
def NormalLines(mesh, ratio=1, atCells=True, scale=1):
"""
Build an ``Mesh`` made of the normals at cells shown as lines.
if `atCells` is `False` normals are shown at vertices.
"""
poly = mesh.clone().computeNormals().polydata()
if atCells:
centers = vtk.vtkCellCenters()
centers.SetInputData(poly)
centers.Update()
poly = centers.GetOutput()
maskPts = vtk.vtkMaskPoints()
maskPts.SetInputData(poly)
maskPts.SetOnRatio(ratio)
maskPts.RandomModeOff()
maskPts.Update()
ln = vtk.vtkLineSource()
ln.SetPoint1(0, 0, 0)
ln.SetPoint2(1, 0, 0)
ln.Update()
glyph = vtk.vtkGlyph3D()
glyph.SetSourceData(ln.GetOutput())
glyph.SetInputData(maskPts.GetOutput())
glyph.SetVectorModeToUseNormal()
b = poly.GetBounds()
sc = max([b[1] - b[0], b[3] - b[2], b[5] - b[4]]) / 50 *scale
glyph.SetScaleFactor(sc)
glyph.OrientOn()
glyph.Update()
glyphActor = Mesh(glyph.GetOutput())
glyphActor.mapper().SetScalarModeToUsePointFieldData()
glyphActor.PickableOff()
glyphActor.SetProperty(mesh.GetProperty())
glyphActor.property = mesh.GetProperty()
return glyphActor
class Tube(Mesh):
"""Build a tube along the line defined by a set of points.
:param r: constant radius or list of radii.
:type r: float, list
:param c: constant color or list of colors for each point.
:type c: float, list
:para int res: resolution, number of sides of the tube
|ribbon.py|_ |tube.py|_
|ribbon| |tube|
"""
def __init__(self, points, r=1, cap=True, c=None, alpha=1, res=12):
if isinstance(points, Mesh):
polyln = points.polydata()
points = points.points()
else:
vpoints = vtk.vtkPoints()
idx = len(points)
for p in points:
if len(p)==3:
vpoints.InsertNextPoint(p[0],p[1],p[2])
else:
vpoints.InsertNextPoint(p[0],p[1],0)
line = vtk.vtkPolyLine()
line.GetPointIds().SetNumberOfIds(idx)
for i in range(idx):
line.GetPointIds().SetId(i, i)
lines = vtk.vtkCellArray()
lines.InsertNextCell(line)
polyln = vtk.vtkPolyData()
polyln.SetPoints(vpoints)
polyln.SetLines(lines)
tuf = vtk.vtkTubeFilter()
tuf.SetCapping(cap)
tuf.SetNumberOfSides(res)
tuf.SetInputData(polyln)
if utils.isSequence(r):
arr = utils.numpy2vtk(r, dtype=float)
arr.SetName("TubeRadius")
polyln.GetPointData().AddArray(arr)
polyln.GetPointData().SetActiveScalars("TubeRadius")
tuf.SetVaryRadiusToVaryRadiusByAbsoluteScalar()
else:
tuf.SetRadius(r)
usingColScals = False
if utils.isSequence(c):
usingColScals = True
cc = vtk.vtkUnsignedCharArray()
cc.SetName("TubeColors")
cc.SetNumberOfComponents(3)
cc.SetNumberOfTuples(len(c))
for i, ic in enumerate(c):
r, g, b = getColor(ic)
cc.InsertTuple3(i, int(255 * r), int(255 * g), int(255 * b))
polyln.GetPointData().AddArray(cc)
c = None
tuf.Update()
Mesh.__init__(self, tuf.GetOutput(), c, alpha)
self.phong()
if usingColScals:
self.mapper().SetScalarModeToUsePointFieldData()
self.mapper().ScalarVisibilityOn()
self.mapper().SelectColorArray("TubeColors")
self.mapper().Modified()
self.base = np.array(points[0])
self.top = np.array(points[-1])
self.name = "Tube"
class Ribbon(Mesh):
"""Connect two lines to generate the surface inbetween.
Set the mode by which to create the ruled surface.
It also works with a single line in input. In this case the ribbon
is formed by following the local plane of the line in space.
:param int mode: If mode=0, resample evenly the input lines (based on length)
and generates triangle strips.
If mode=1, use the existing points and walks around the polyline using existing points.
:param bool closed: if True, join the last point with the first to form
a closed surface
:param list res: ribbon resolutions along the line and perpendicularly to it.
|ribbon| |ribbon.py|_
"""
def __init__(self, line1, line2=None, mode=0, closed=False, width=None,
c="indigo3", alpha=1, res=(200,5)):
if isinstance(line1, Points):
line1 = line1.points()
if isinstance(line2, Points):
line2 = line2.points()
elif line2 is None:
RibbonFilter = vtk.vtkRibbonFilter()
aline = Line(line1)
RibbonFilter.SetInputData(aline.polydata(False))
if width is None:
width = aline.diagonalSize()/20.
RibbonFilter.SetWidth(width)
RibbonFilter.Update()
Mesh.__init__(self, RibbonFilter.GetOutput(), c, alpha)
self.name = "Ribbon"
return #######################
if closed:
line1 = line1.tolist()
line1 += [line1[0]]
line2 = line2.tolist()
line2 += [line2[0]]
if len(line1[0]) == 2:
line1 = np.c_[np.asarray(line1), np.zeros(len(line1))]
if len(line2[0]) == 2:
line2 = np.c_[np.asarray(line2), np.zeros(len(line2))]
ppoints1 = vtk.vtkPoints() # Generate the polyline1
ppoints1.SetData(utils.numpy2vtk(line1, dtype=float))
lines1 = vtk.vtkCellArray()
lines1.InsertNextCell(len(line1))
for i in range(len(line1)):
lines1.InsertCellPoint(i)
poly1 = vtk.vtkPolyData()
poly1.SetPoints(ppoints1)
poly1.SetLines(lines1)
ppoints2 = vtk.vtkPoints() # Generate the polyline2
ppoints2.SetData(utils.numpy2vtk(line2, dtype=float))
lines2 = vtk.vtkCellArray()
lines2.InsertNextCell(len(line2))
for i in range(len(line2)):
lines2.InsertCellPoint(i)
poly2 = vtk.vtkPolyData()
poly2.SetPoints(ppoints2)
poly2.SetLines(lines2)
# build the lines
lines1 = vtk.vtkCellArray()
lines1.InsertNextCell(poly1.GetNumberOfPoints())
for i in range(poly1.GetNumberOfPoints()):
lines1.InsertCellPoint(i)
polygon1 = vtk.vtkPolyData()
polygon1.SetPoints(ppoints1)
polygon1.SetLines(lines1)
lines2 = vtk.vtkCellArray()
lines2.InsertNextCell(poly2.GetNumberOfPoints())
for i in range(poly2.GetNumberOfPoints()):
lines2.InsertCellPoint(i)
polygon2 = vtk.vtkPolyData()
polygon2.SetPoints(ppoints2)
polygon2.SetLines(lines2)
mergedPolyData = vtk.vtkAppendPolyData()
mergedPolyData.AddInputData(polygon1)
mergedPolyData.AddInputData(polygon2)
mergedPolyData.Update()
rsf = vtk.vtkRuledSurfaceFilter()
rsf.CloseSurfaceOff()
rsf.SetRuledMode(mode)
rsf.SetResolution(res[0], res[1])
rsf.SetInputData(mergedPolyData.GetOutput())
rsf.Update()
Mesh.__init__(self, rsf.GetOutput(), c, alpha)
self.name = "Ribbon"
class Arrow(Mesh):
"""
Build a 3D arrow from `startPoint` to `endPoint` of section size `s`,
expressed as the fraction of the window size.
If c is a `float` less than 1, the arrow is rendered as a in a color scale
from white to red.
.. note:: If ``s=None`` the arrow is scaled proportionally to its length
|OrientedArrow|
"""
def __init__(self,
startPoint=(0,0,0),
endPoint=(1,0,0),
s=None,
c="r4",
alpha=1,
res=12
):
# in case user is passing meshs
if isinstance(startPoint, vtk.vtkActor): startPoint = startPoint.GetPosition()
if isinstance(endPoint, vtk.vtkActor): endPoint = endPoint.GetPosition()
axis = np.asarray(endPoint) - np.asarray(startPoint)
length = np.linalg.norm(axis)
if length:
axis = axis / length
theta = np.arccos(axis[2])
phi = np.arctan2(axis[1], axis[0])
self.arr = vtk.vtkArrowSource()
self.arr.SetShaftResolution(res)
self.arr.SetTipResolution(res)
if s:
sz = 0.02
self.arr.SetTipRadius(sz)
self.arr.SetShaftRadius(sz / 1.75)
self.arr.SetTipLength(sz * 15)
self.arr.Update()
t = vtk.vtkTransform()
t.RotateZ(np.rad2deg(phi))
t.RotateY(np.rad2deg(theta))
t.RotateY(-90) # put it along Z
if s:
sz = 800 * s
t.Scale(length, sz, sz)
else:
t.Scale(length, length, length)
tf = vtk.vtkTransformPolyDataFilter()
tf.SetInputData(self.arr.GetOutput())
tf.SetTransform(t)
tf.Update()
Mesh.__init__(self, tf.GetOutput(), c, alpha)
self.phong()
self.SetPosition(startPoint)
self.PickableOff()
self.DragableOff()
self.base = np.array(startPoint)
self.top = np.array(endPoint)
self.tipIndex = None
self.name = "Arrow"
def tipPoint(self, returnIndex=False):
"""Return the coordinates of the tip of the Arrow, or the point index."""
if self.tipIndex is None:
arrpts = utils.vtk2numpy(self.arr.GetOutput().GetPoints().GetData())
self.tipIndex = np.argmax(arrpts[:,0])
if returnIndex:
return self.tipIndex
else:
return self.points()[self.tipIndex]
def Arrows(startPoints, endPoints=None, s=None, thickness=1, c=None, alpha=1, res=12):
"""
Build arrows between two lists of points `startPoints` and `endPoints`.
`startPoints` can be also passed in the form ``[[point1, point2], ...]``.
Color can be specified as a colormap which maps the size of the arrows.
:param float s: fix aspect-ratio of the arrow and scale its cross section
:param c: color or color map name.
:param float alpha: set transparency
:param int res: set arrow resolution
|glyphs_arrows| |glyphs_arrows.py|_
"""
if isinstance(startPoints, Points): startPoints = startPoints.points()
if isinstance(endPoints, Points): endPoints = endPoints.points()
startPoints = np.array(startPoints)
if endPoints is None:
strt = startPoints[:,0]
endPoints = startPoints[:,1]
startPoints = strt
else:
endPoints = np.array(endPoints)
if startPoints.shape[1] == 2: # make it 3d
startPoints = np.c_[startPoints, np.zeros(len(startPoints))]
if endPoints.shape[1] == 2: # make it 3d
endPoints = np.c_[np.array(endPoints), np.zeros(len(endPoints))]
arr = vtk.vtkArrowSource()
arr.SetShaftResolution(res)
arr.SetTipResolution(res)
if s:
sz = 0.02 * s
arr.SetTipRadius(sz*2)
arr.SetShaftRadius(sz*thickness)
arr.SetTipLength(sz*10)
arr.Update()
out = arr.GetOutput()
orients = endPoints - startPoints
arrg = Glyph(startPoints, out,
orientationArray=orients,
scaleByVectorSize=True,
colorByVectorSize=True,
c=c, alpha=alpha)
arrg.flat().lighting('plastic')
arrg.name = "Arrows"
return arrg
class Arrow2D(Mesh):
"""
Build a 2D arrow from `startPoint` to `endPoint`.
:param float shaftLength: fractional shaft length
:param float shaftWidth: fractional shaft width
:param float headLength: fractional head length
:param float headWidth: fractional head width
:param bool fill: if False only generate the outline
"""
def __init__(self,
startPoint=(0,0,0),
endPoint=(1,0,0),
shaftLength=0.8,
shaftWidth=0.05,
headLength=0.25,
headWidth=0.2,
fill=True,
c="r4",
alpha=1):
# in case user is passing meshs
if isinstance(startPoint, vtk.vtkActor): startPoint = startPoint.GetPosition()
if isinstance(endPoint, vtk.vtkActor): endPoint = endPoint.GetPosition()
if len(startPoint) == 2:
startPoint = [startPoint[0], startPoint[1], 0]
if len(endPoint) == 2:
endPoint = [endPoint[0], endPoint[1], 0]
headBase = 1 - headLength
if headWidth < shaftWidth:
headWidth = shaftWidth
if headLength is None or headBase > shaftLength:
headBase = shaftLength
verts = []
verts.append([0, -shaftWidth/2, 0])
verts.append([shaftLength,-shaftWidth/2, 0])
verts.append([headBase, -headWidth/2, 0])
verts.append([1,0,0])
verts.append([headBase, headWidth/2, 0])
verts.append([shaftLength, shaftWidth/2, 0])
verts.append([0, shaftWidth/2, 0])
if fill:
faces = ((0,1,3,5,6), (5,3,4), (1,2,3))
poly = utils.buildPolyData(verts, faces)
else:
lines = ((0,1,2,3,4,5,6,0))
poly = utils.buildPolyData(verts, [], lines=lines)
axis = np.array(endPoint) - np.array(startPoint)
length = np.linalg.norm(axis)
if length:
axis = axis / length
theta = np.arccos(axis[2])
phi = np.arctan2(axis[1], axis[0])
t = vtk.vtkTransform()
t.RotateZ(np.rad2deg(phi))
t.RotateY(np.rad2deg(theta))
t.RotateY(-90) # put it along Z
t.Scale(length, length, length)
tf = vtk.vtkTransformPolyDataFilter()
tf.SetInputData(poly)
tf.SetTransform(t)
tf.Update()
Mesh.__init__(self, tf.GetOutput(), c, alpha)
self.SetPosition(startPoint)
self.lighting('off')
self.DragableOff()
self.PickableOff()
self.base = np.array(startPoint)
self.top = np.array(endPoint)
self.name = "Arrow2D"
def Arrows2D(startPoints, endPoints=None,
shaftLength=0.8,
shaftWidth=0.09,
headLength=None,
headWidth=0.2,
fill=True,
c=None,
cmap=None,
alpha=1):
"""
Build 2D arrows between two lists of points `startPoints` and `endPoints`.
`startPoints` can be also passed in the form ``[[point1, point2], ...]``.
Color can be specified as a colormap which maps the size of the arrows.
:param float shaftLength: fractional shaft length
:param float shaftWidth: fractional shaft width
:param float headLength: fractional head length
:param float headWidth: fractional head width
:param bool fill: if False only generate the outline
:param c: color
:param float alpha: set transparency
:Example:
.. code-block:: python
from vedo import Grid, Arrows2D
g1 = Grid(sx=1, sy=1)
g2 = Grid(sx=1.2, sy=1.2).rotateZ(4)
arrs2d = Arrows2D(g1, g2, c='jet')
arrs2d.show(axes=1, bg='white')
|quiver|
"""
if isinstance(startPoints, Points): startPoints = startPoints.points()
if isinstance(endPoints, Points): endPoints = endPoints.points()
startPoints = np.array(startPoints)
if endPoints is None:
strt = startPoints[:,0]
endPoints = startPoints[:,1]
startPoints = strt
else:
endPoints = np.array(endPoints)
if headLength is None:
headLength = 1 - shaftLength
arr = Arrow2D((0,0,0), (1,0,0),
shaftLength, shaftWidth,
headLength, headWidth, fill)
orients = endPoints - startPoints
if orients.shape[1] == 2: # make it 3d
orients = np.c_[np.array(orients), np.zeros(len(orients))]
pts = Points(startPoints)
arrg = Glyph(pts,
arr.polydata(False),
orientationArray=orients,
scaleByVectorSize=True,
c=c, alpha=alpha).flat().lighting('off')
if c is not None:
arrg.color(c)
arrg.name = "Arrows2D"
return arrg
def FlatArrow(line1, line2, c="r4", alpha=1, tipSize=1, tipWidth=1):
"""Build a 2D arrow in 3D space by joining two close lines.
|flatarrow| |flatarrow.py|_
"""
if isinstance(line1, Points): line1 = line1.points()
if isinstance(line2, Points): line2 = line2.points()
sm1, sm2 = np.array(line1[-1]), np.array(line2[-1])
v = (sm1-sm2)/3*tipWidth
p1 = sm1+v
p2 = sm2-v
pm1 = (sm1+sm2)/2
pm2 = (np.array(line1[-2])+np.array(line2[-2]))/2
pm12 = pm1-pm2
tip = pm12/np.linalg.norm(pm12)*np.linalg.norm(v)*3*tipSize/tipWidth + pm1
line1.append(p1)
line1.append(tip)
line2.append(p2)
line2.append(tip)
resm = max(100, len(line1))
mesh = Ribbon(line1, line2, alpha=alpha, c=c, res=(resm, 1)).phong()
mesh.PickableOff()
mesh.DragableOff()
mesh.name = "FlatArrow"
return mesh
class Polygon(Mesh):
"""
Build a polygon in the `xy` plane of `nsides` of radius `r`.
|Polygon|
"""
def __init__(self, pos=(0, 0, 0), nsides=6, r=1, c="coral", alpha=1):
if len(pos) == 2:
pos = (pos[0], pos[1], 0)
t = np.linspace(np.pi/2, 5/2*np.pi, num=nsides, endpoint=False)
x, y = utils.pol2cart(np.ones_like(t)*r, t)
faces = [list(range(nsides))]
Mesh.__init__(self, [np.c_[x,y], faces], c, alpha)
self.SetPosition(pos)
self.GetProperty().LightingOff()
self.name = "Polygon " + str(nsides)
class Circle(Polygon):
"""
Build a Circle of radius `r`.
"""
def __init__(self, pos=(0,0,0), r=1, c="gray5", alpha=1, res=120):
if len(pos) == 2:
pos = (pos[0], pos[1], 0)
Polygon.__init__(self, pos, nsides=res, r=r)
self.alpha(alpha).c(c)
self.name = "Circle"
class Star(Mesh):
"""
Build a 2D star shape of `n` cusps of inner radius `r1` and outer radius `r2`.
:param bool line: only build the outer line (no internal surface meshing).
|extrude| |extrude.py|_
"""
def __init__(self, pos=(0,0,0), n=5, r1=0.7, r2=1.0, line=False, c="blue6", alpha=1):
if len(pos) == 2:
pos = (pos[0], pos[1], 0)
t = np.linspace(np.pi/2, 5/2*np.pi, num=n, endpoint=False)
x, y = utils.pol2cart(np.ones_like(t)*r2, t)
pts = np.c_[x,y, np.zeros_like(x)]
apts=[]
for i,p in enumerate(pts):
apts.append(p)
if i+1<n:
apts.append((p+pts[i+1])/2*r1/r2)
apts.append((pts[-1]+pts[0])/2*r1/r2)
if line:
apts.append(pts[0])
poly = utils.buildPolyData(apts, lines=list(range(len(apts))))
Mesh.__init__(self, poly, c, alpha)
self.lw(2)
else:
apts.append((0,0,0))
cells=[]
for i in range(2*n-1):
cell = [2*n, i, i+1]
cells.append(cell)
cells.append([2*n, i+1, 0])
Mesh.__init__(self, [apts, cells], c, alpha)
self.SetPosition(pos)
self.name = "Star"
class Disc(Mesh):
"""
Build a 2D disc of inner radius `r1` and outer radius `r2`.
:param list res: resolution in R and Phi
|Disk|
"""
def __init__(self,
pos=(0, 0, 0),
r1=0.5,
r2=1,
c="gray4",
alpha=1,
res=12,
):
if len(pos) == 2:
pos = (pos[0], pos[1], 0)
if utils.isSequence(res):
res_r, res_phi = res
else:
res_r, res_phi = res, 12*res
ps = vtk.vtkDiskSource()
ps.SetInnerRadius(r1)
ps.SetOuterRadius(r2)
ps.SetRadialResolution(res_r)
ps.SetCircumferentialResolution(res_phi)
ps.Update()
Mesh.__init__(self, ps.GetOutput(), c, alpha)
self.flat()
self.SetPosition(pos)
self.name = "Disc"
class Arc(Mesh):
"""
Build a 2D circular arc between points `point1` and `point2`.
If `normal` is specified then `center` is ignored, and
normal vector, a starting `point1` (polar vector)
and an angle defining the arc length need to be assigned.
Arc spans the shortest angular sector point1 and point2,
if invert=True, then the opposite happens.
"""
def __init__(self,
center,
point1,
point2=None,
normal=None,
angle=None,
invert=False,
c="gray4",
alpha=1,
res=48,
):
if len(point1) == 2:
point1 = (point1[0], point1[1], 0)
if point2 is not None and len(point2) == 2:
point2 = (point2[0], point2[1], 0)
ar = vtk.vtkArcSource()
if point2 is not None:
ar.UseNormalAndAngleOff()
ar.SetPoint1(point1)
ar.SetPoint2(point2)
ar.SetCenter(center)
elif normal is not None and angle is not None:
ar.UseNormalAndAngleOn()
ar.SetAngle(angle)
ar.SetPolarVector(point1)
ar.SetNormal(normal)
else:
printc("Error in Arc(): incorrect input.", c='r')
return None
ar.SetNegative(invert)
ar.SetResolution(res)
ar.Update()
Mesh.__init__(self, ar.GetOutput(), c, alpha)
self.lw(2).lighting('off')
self.name = "Arc"
class Sphere(Mesh):
"""Build a sphere at position `pos` of radius `r`.
:param r float: sphere radius
:param int res: resolution in phi, resolution in theta is 2*res
:param bool quads: sphere mesh will be made of quads instead of triangles
|Sphere| |sphericgrid|
"""
def __init__(self, pos=(0, 0, 0), r=1, c="r5", alpha=1, res=24, quads=False):
self.radius = r # used by fitSphere
self.center = pos
self.residue = 0
if quads:
if res<4: res=4
img = vtk.vtkImageData()
img.SetDimensions(res-1,res-1,res-1)
rs = 1./(res-2)
img.SetSpacing(rs,rs,rs)
gf = vtk.vtkGeometryFilter()
gf.SetInputData(img)
gf.Update()
Mesh.__init__(self, gf.GetOutput(), c, alpha)
self.lw(0.1)
cgpts = self.points() - (0.5,0.5,0.5)
x, y, z = cgpts.T
x = x*(1+x*x)/2
y = y*(1+y*y)/2
z = z*(1+z*z)/2
_, theta, phi = utils.cart2spher(x, y, z)
pts = utils.spher2cart(np.ones_like(phi)*r, theta, phi)
self.points(pts)
else:
if utils.isSequence(res):
res_t, res_phi = res
else:
res_t, res_phi = 2*res, res
ss = vtk.vtkSphereSource()
ss.SetRadius(r)
ss.SetThetaResolution(res_t)
ss.SetPhiResolution(res_phi)
ss.Update()
Mesh.__init__(self, ss.GetOutput(), c, alpha)
self.phong()
self.SetPosition(pos)
self.name = "Sphere"
class Spheres(Mesh):
"""
Build a (possibly large) set of spheres at `centers` of radius `r`.
Either `c` or `r` can be a list of RGB colors or radii.
|manyspheres| |manyspheres.py|_
"""
def __init__(self, centers, r=1, c="r5", alpha=1, res=8):
if isinstance(centers, Points):
centers = centers.points()
cisseq = False
if utils.isSequence(c):
cisseq = True
if cisseq:
if len(centers) > len(c):
printc("Mismatch in Spheres() colors", len(centers), len(c), c='r')
raise RuntimeError()
if len(centers) != len(c):
printc("\lightningWarning: mismatch in Spheres() colors", len(centers), len(c))
risseq = False
if utils.isSequence(r):
risseq = True
if risseq:
if len(centers) > len(r):
printc("Mismatch in Spheres() radius", len(centers), len(r), c='r')
raise RuntimeError()
if len(centers) != len(r):
printc("\lightning Warning: mismatch in Spheres() radius", len(centers), len(r))
if cisseq and risseq:
printc("\noentry Limitation: c and r cannot be both sequences.", c='r')
raise RuntimeError()
src = vtk.vtkSphereSource()
if not risseq:
src.SetRadius(r)
if utils.isSequence(res):
res_t, res_phi = res
else:
res_t, res_phi = 2*res, res
src.SetThetaResolution(res_t)
src.SetPhiResolution(res_phi)
src.Update()
psrc = vtk.vtkPointSource()
psrc.SetNumberOfPoints(len(centers))
psrc.Update()
pd = psrc.GetOutput()
vpts = pd.GetPoints()
glyph = vtk.vtkGlyph3D()
glyph.SetSourceConnection(src.GetOutputPort())
if cisseq:
glyph.SetColorModeToColorByScalar()
ucols = vtk.vtkUnsignedCharArray()
ucols.SetNumberOfComponents(3)
ucols.SetName("Colors")
for acol in c:
cx, cy, cz = getColor(acol)
ucols.InsertNextTuple3(cx * 255, cy * 255, cz * 255)
pd.GetPointData().AddArray(ucols)
pd.GetPointData().SetActiveScalars("Colors")
glyph.ScalingOff()
elif risseq:
glyph.SetScaleModeToScaleByScalar()
urads = utils.numpy2vtk(2*np.ascontiguousarray(r), dtype=float)
urads.SetName("Radii")
pd.GetPointData().AddArray(urads)
pd.GetPointData().SetActiveScalars("Radii")
vpts.SetData(utils.numpy2vtk(centers, dtype=float))
glyph.SetInputData(pd)
glyph.Update()
Mesh.__init__(self, glyph.GetOutput(), alpha=alpha)
self.phong()
if cisseq:
self.mapper().ScalarVisibilityOn()
else:
self.mapper().ScalarVisibilityOff()
self.GetProperty().SetColor(getColor(c))
self.name = "Spheres"
class Earth(Mesh):
"""Build a textured mesh representing the Earth.
|geodesic| |geodesic.py|_
"""
def __init__(self, style=1, r=1):
tss = vtk.vtkTexturedSphereSource()
tss.SetRadius(r)
tss.SetThetaResolution(72)
tss.SetPhiResolution(36)
Mesh.__init__(self, tss, c="w")
atext = vtk.vtkTexture()
pnmReader = vtk.vtkJPEGReader()
fn = os.path.join(settings.textures_path, "earth"+ str(style) +".jpg")
pnmReader.SetFileName(fn)
atext.SetInputConnection(pnmReader.GetOutputPort())
atext.InterpolateOn()
self.SetTexture(atext)
self.name = "Earth"
class Ellipsoid(Mesh):
"""
Build a 3D ellipsoid centered at position `pos`.
.. note:: `axis1` and `axis2` are only used to define sizes and one azimuth angle.
|projectsphere|
|pca| |pca.py|_
"""
def __init__(self, pos=(0, 0, 0), axis1=(1, 0, 0), axis2=(0, 2, 0), axis3=(0, 0, 3),
c="cyan4", alpha=1, res=24):
self.center = pos
self.va_error = 0
self.vb_error = 0
self.vc_error = 0
self.axis1 = axis1
self.axis2 = axis2
self.axis3 = axis3
self.nr_of_points = 1 # used by pcaEllipsoid
if utils.isSequence(res):
res_t, res_phi = res
else:
res_t, res_phi = 2*res, res
elliSource = vtk.vtkSphereSource()
elliSource.SetThetaResolution(res_t)
elliSource.SetPhiResolution(res_phi)
elliSource.Update()
l1 = np.linalg.norm(axis1)
l2 = np.linalg.norm(axis2)
l3 = np.linalg.norm(axis3)
self.va = l1
self.vb = l2
self.vc = l3
axis1 = np.array(axis1) / l1
axis2 =
|
np.array(axis2)
|
numpy.array
|
# code adapted from https://github.com/lmcinnes/umap/blob/7e051d8f3c4adca90ca81eb45f6a9d1372c076cf/umap/plot.py
import warnings
from matplotlib import patches
from matplotlib.lines import Line2D
import numpy as np
import pandas as pd
from pandas.api.types import is_categorical_dtype
import anndata
from numbers import Number
import matplotlib.cm
from matplotlib.axes import Axes
from anndata import AnnData
from typing import Union, Optional, List
from ..configuration import _themes, reset_rcParams
from .utils import (
despline_all,
deaxis_all,
_select_font_color,
arrowed_spines,
is_gene_name,
is_cell_anno_column,
is_list_of_lists,
is_layer_keys,
_matplotlib_points,
_datashade_points,
save_fig,
)
from ..preprocessing.utils import (
gen_rotation_2d,
affine_transform,
)
from ..tools.utils import (
update_dict,
get_mapper,
flatten,
)
from ..tools.moments import calc_1nd_moment
from ..dynamo_logger import main_info, main_debug, main_warning
from ..docrep import DocstringProcessor
docstrings = DocstringProcessor()
@docstrings.get_sectionsf("scatters")
def scatters(
adata: AnnData,
basis: str = "umap",
x: int = 0,
y: int = 1,
color: str = "ntr",
layer: str = "X",
highlights: Optional[list] = None,
labels: Optional[list] = None,
values: Optional[list] = None,
theme: Optional[str] = None,
cmap: Optional[str] = None,
color_key: Union[dict, list] = None,
color_key_cmap: Optional[str] = None,
background: Optional[str] = None,
ncols: int = 4,
pointsize: Union[None, float] = None,
figsize: tuple = (6, 4),
show_legend="on data",
use_smoothed: bool = True,
aggregate: Optional[str] = None,
show_arrowed_spines: bool = False,
ax: Optional[matplotlib.axes.Axes] = None,
sort: str = "raw",
save_show_or_return: str = "show",
save_kwargs: dict = {},
return_all: bool = False,
add_gamma_fit: bool = False,
frontier: bool = False,
contour: bool = False,
ccmap: Optional[str] = None,
alpha: float = 0.1,
calpha: float = 0.4,
sym_c: bool = False,
smooth: bool = False,
dpi: int = 100,
inset_dict: dict = {},
marker: str = None,
group: str = None,
add_group_gamma_fit=False,
affine_transform_degree=0,
affine_transform_A=None,
affine_transform_b=None,
stack_colors=False,
stack_colors_threshold=0.001,
stack_colors_title="stacked colors",
stack_colors_legend_size=2,
despline: bool = True,
deaxis: bool = True,
despline_sides: Union[None, List[str]] = None,
**kwargs,
) -> Union[None, Axes]:
"""Plot an embedding as points. Currently this only works
for 2D embeddings. While there are many optional parameters
to further control and tailor the plotting, you need only
pass in the trained/fit umap model to get results. This plot
utility will attempt to do the hard work of avoiding
overplotting issues, and make it easy to automatically
colour points by a categorical labelling or numeric values.
This method is intended to be used within a Jupyter
notebook with ``%matplotlib inline``.
Parameters
----------
adata: :class:`~anndata.AnnData`
an Annodata object
basis: `str`
The reduced dimension.
x: `int` (default: `0`)
The column index of the low dimensional embedding for the x-axis.
y: `int` (default: `1`)
The column index of the low dimensional embedding for the y-axis.
color: `string` (default: `ntr`)
Any column names or gene expression, etc. that will be used for coloring cells.
layer: `str` (default: `X`)
The layer of data to use for the scatter plot.
highlights: `list` (default: None)
Which color group will be highlighted. if highligts is a list of lists - each list is relate to each color
element.
labels: array, shape (n_samples,) (optional, default None)
An array of labels (assumed integer or categorical),
one for each data sample.
This will be used for coloring the points in
the plot according to their label. Note that
this option is mutually exclusive to the ``values``
option.
values: array, shape (n_samples,) (optional, default None)
An array of values (assumed float or continuous),
one for each sample.
This will be used for coloring the points in
the plot according to a colorscale associated
to the total range of values. Note that this
option is mutually exclusive to the ``labels``
option.
theme: string (optional, default None)
A color theme to use for plotting. A small set of
predefined themes are provided which have relatively
good aesthetics. Available themes are:
* 'blue'
* 'red'
* 'green'
* 'inferno'
* 'fire'
* 'viridis'
* 'darkblue'
* 'darkred'
* 'darkgreen'
cmap: string (optional, default 'Blues')
The name of a matplotlib colormap to use for coloring
or shading points. If no labels or values are passed
this will be used for shading points according to
density (largely only of relevance for very large
datasets). If values are passed this will be used for
shading according the value. Note that if theme
is passed then this value will be overridden by the
corresponding option of the theme.
color_key: dict or array, shape (n_categories) (optional, default None)
A way to assign colors to categoricals. This can either be
an explicit dict mapping labels to colors (as strings of form
'#RRGGBB'), or an array like object providing one color for
each distinct category being provided in ``labels``. Either
way this mapping will be used to color points according to
the label. Note that if theme
is passed then this value will be overridden by the
corresponding option of the theme.
color_key_cmap:
The name of a matplotlib colormap to use for categorical coloring.
If an explicit ``color_key`` is not given a color mapping for
categories can be generated from the label list and selecting
a matching list of colors from the given colormap. Note
that if theme
is passed then this value will be overridden by the
corresponding option of the theme.
background: string or None (optional, default 'None`)
The color of the background. Usually this will be either
'white' or 'black', but any color name will work. Ideally
one wants to match this appropriately to the colors being
used for points etc. This is one of the things that themes
handle for you. Note that if theme
is passed then this value will be overridden by the
corresponding option of the theme.
ncols: int (optional, default `4`)
Number of columns for the figure.
pointsize: `None` or `float` (default: None)
The scale of the point size. Actual point cell size is calculated as `500.0 / np.sqrt(adata.shape[0]) *
pointsize`
figsize: `None` or `[float, float]` (default: None)
The width and height of a figure.
show_legend: bool (optional, default True)
Whether to display a legend of the labels
use_smoothed: bool (optional, default True)
Whether to use smoothed values (i.e. M_s / M_u instead of spliced / unspliced, etc.).
aggregate: `str` or `None` (default: `None`)
The column in adata.obs that will be used to aggregate data points.
show_arrowed_spines: bool (optional, default False)
Whether to show a pair of arrowed spines representing the basis of the scatter is currently using.
ax: `matplotlib.Axis` (optional, default `None`)
The matplotlib axes object where new plots will be added to. Only applicable to drawing a single component.
sort: `str` (optional, default `raw`)
The method to reorder data so that high values points will be on top of background points. Can be one of
{'raw', 'abs', 'neg'}, i.e. sorted by raw data, sort by absolute values or sort by negative values.
save_show_or_return: `str` {'save', 'show', 'return'} (default: `show`)
Whether to save, show or return the figure. If "both", it will save and plot the figure at the same time. If
"all", the figure will be saved, displayed and the associated axis and other object will be return.
save_kwargs: `dict` (default: `{}`)
A dictionary that will passed to the save_fig function. By default it is an empty dictionary and the
save_fig function will use the {"path": None, "prefix": 'scatter', "dpi": None, "ext": 'pdf', "transparent":
True, "close": True, "verbose": True} as its parameters. Otherwise you can provide a dictionary that
properly modify those keys according to your needs.
return_all: `bool` (default: `False`)
Whether to return all the scatter related variables. Default is False.
add_gamma_fit: `bool` (default: `False`)
Whether to add the line of the gamma fitting. This will automatically turn on if `basis` points to gene
names and those genes have went through gamma fitting.
frontier: `bool` (default: `False`)
Whether to add the frontier. Scatter plots can be enhanced by using transparency (alpha) in order to show
area of high density and multiple scatter plots can be used to delineate a frontier. See matplotlib tips &
tricks cheatsheet (https://github.com/matplotlib/cheatsheets). Originally inspired by figures from scEU-seq
paper: https://science.sciencemag.org/content/367/6482/1151. If `contour` is set to be True, `frontier`
will be ignored as `contour` also add an outlier for data points.
contour: `bool` (default: `False`)
Whether to add an countor on top of scatter plots. We use tricontourf to plot contour for non-gridded data.
The shapely package was used to create a polygon of the concave hull of the scatters. With the polygon we
then check if the mean of the triangulated points are within the polygon and use this as our condition to
form the mask to create the contour. We also add the polygon shape as a frontier of the data point (similar
to when setting `frontier = True`). When the color of the data points is continuous, we will use the same
cmap as for the scatter points by default, when color is categorical, no contour will be drawn but just the
polygon. cmap can be set with `ccmap` argument. See below. This has recently changed to use seaborn's
kdeplot.
ccmap: `str` or `None` (default: `None`)
The name of a matplotlib colormap to use for coloring or shading points the contour. See above.
calpha: `float` (default: `0.4`)
Contour alpha value passed into sns.kdeplot. The value should be inbetween [0, 1]
sym_c: `bool` (default: `False`)
Whether do you want to make the limits of continuous color to be symmetric, normally this should be used for
plotting velocity, jacobian, curl, divergence or other types of data with both positive or negative values.
smooth: `bool` or `int` (default: `False`)
Whether do you want to further smooth data and how much smoothing do you want. If it is `False`, no
smoothing will be applied. If `True`, smoothing based on one step diffusion of connectivity matrix
(`.uns['moment_cnn'] will be applied. If a number larger than 1, smoothing will based on `smooth` steps of
diffusion.
dpi: `float`, (default: 100.0)
The resolution of the figure in dots-per-inch. Dots per inches (dpi) determines how many pixels the figure
comprises. dpi is different from ppi or points per inches. Note that most elements like lines, markers,
texts have a size given in points so you can convert the points to inches. Matplotlib figures use Points per
inch (ppi) of 72. A line with thickness 1 point will be 1./72. inch wide. A text with fontsize 12 points
will be 12./72. inch heigh. Of course if you change the figure size in inches, points will not change, so a
larger figure in inches still has the same size of the elements.Changing the figure size is thus like taking
a piece of paper of a different size. Doing so, would of course not change the width of the line drawn with
the same pen. On the other hand, changing the dpi scales those elements. At 72 dpi, a line of 1 point size
is one pixel strong. At 144 dpi, this line is 2 pixels strong. A larger dpi will therefore act like a
magnifying glass. All elements are scaled by the magnifying power of the lens. see more details at answer 2
by @ImportanceOfBeingErnest:
https://stackoverflow.com/questions/47633546/relationship-between-dpi-and-figure-size
inset_dict: `dict` (default: {})
A dictionary of parameters in inset_ax. Example, something like {"width": "5%", "height": "50%", "loc":
'lower left', "bbox_to_anchor": (0.85, 0.90, 0.145, 0.145), "bbox_transform": ax.transAxes, "borderpad": 0}
See more details at https://matplotlib.org/api/_as_gen/mpl_toolkits.axes_grid1.inset_locator.inset_axes.html
or https://stackoverflow.com/questions/39803385/what-does-a-4-element-tuple-argument-for-bbox-to-anchor-mean
-in-matplotlib
marker: `str` (default: None)
The marker style. marker can be either an instance of the class or the text shorthand for a particular
marker. See matplotlib.markers for more information about marker styles.
affine_transform_degree:
Transform coordinates of points according to some degree.
affine_transform_A:
Coefficients in affine transformation Ax + b. 2D for now.
affine_transform_b:
Bias in affine transformation Ax + b.
stack_colors:
Whether to stack all color on the same ax passed above.
Currently only support 18 sequential matplotlib default cmaps assigning to different color groups.
(#colors should be smaller than 18, reuse if #colors > 18. To-do: generate cmaps according to #colors)
stack_colors_threshold:
A threshold for filtering points values < threshold when drawing each color.
E.g. if you do not want points with values < 1 showing up on axis, set threshold to be 1
stack_colors_title:
The title for the stack_color plot.
stack_colors_legend_size:
Control the legend size in stack color plot.
despline:
Whether to remove splines of the figure.
despline_sides:
Which side of splines should be removed. Can be any combination of `["bottom", "right", "top", "left"]`.
deaxis:
Whether to remove axis ticks of the figure.
kwargs:
Additional arguments passed to plt.scatters.
Returns
-------
result:
Either None or a matplotlib axis with the relevant plot displayed.
If you are using a notbooks and have ``%matplotlib inline`` set
then this will simply display inline.
"""
import matplotlib.pyplot as plt
from matplotlib import rcParams
from matplotlib.colors import to_hex
from matplotlib.colors import rgb2hex
if calpha < 0 or calpha > 1:
main_warning(
"calpha=%f is invalid (smaller than 0 or larger than 1) and may cause potential issues. Please check."
% (calpha)
)
group_colors = ["b", "g", "r", "c", "m", "y", "k", "w"]
sequential_cmaps = [
"Greys",
"Purples",
"Blues",
"Greens",
"Oranges",
"Reds",
"YlOrBr",
"YlOrRd",
"OrRd",
"PuRd",
"RdPu",
"BuPu",
"GnBu",
"PuBu",
"YlGnBu",
"PuBuGn",
"BuGn",
"YlGn",
]
stack_legend_handles = []
if stack_colors:
color_key = None
def _get_adata_color(adata, cur_l, cur_c):
if cur_l in ["protein", "X_protein"]:
_color = adata.obsm[cur_l].loc[cur_c, :]
elif cur_l == "X":
_color = adata.obs_vector(cur_c, layer=None)
else:
_color = adata.obs_vector(cur_c, layer=cur_l)
return _color
if not (affine_transform_degree is None):
affine_transform_A = gen_rotation_2d(affine_transform_degree)
affine_transform_b = 0
if contour:
frontier = False
if background is None:
_background = rcParams.get("figure.facecolor")
_background = to_hex(_background) if type(_background) is tuple else _background
# if save_show_or_return != 'save': set_figure_params('dynamo', background=_background)
else:
_background = background
# if save_show_or_return != 'save': set_figure_params('dynamo', background=_background)
x, y = (
[x] if type(x) in [int, str] else x,
[y]
if type(y)
in [
int,
str,
]
else y,
)
if all([is_gene_name(adata, i) for i in basis]):
if x[0] not in ["M_s", "X_spliced", "M_t", "X_total", "spliced", "total"] and y[0] not in [
"M_u",
"X_unspliced",
"M_n",
"X_new",
"unspliced",
"new",
]:
if "M_t" in adata.layers.keys() and "M_n" in adata.layers.keys():
x, y = ["M_t"], ["M_n"]
elif "X_total" in adata.layers.keys() and "X_new" in adata.layers.keys():
x, y = ["X_total"], ["X_new"]
elif "M_s" in adata.layers.keys() and "M_u" in adata.layers.keys():
x, y = ["M_s"], ["M_u"]
elif "X_spliced" in adata.layers.keys() and "X_unspliced" in adata.layers.keys():
x, y = ["X_spliced"], ["X_unspliced"]
elif "spliced" in adata.layers.keys() and "unspliced" in adata.layers.keys():
x, y = ["spliced"], ["unspliced"]
elif "total" in adata.layers.keys() and "new" in adata.layers.keys():
x, y = ["total"], ["new"]
else:
raise ValueError(
"your adata oject is corrupted. Please make sure it has at least one of the following "
"pair of layers:"
"'M_s', 'X_spliced', 'M_t', 'X_total', 'spliced', 'total' and "
"'M_u', 'X_unspliced', 'M_n', 'X_new', 'unspliced', 'new'. "
)
if use_smoothed:
mapper = get_mapper()
# check color, layer, basis -> convert to list
if type(color) is str:
color = [color]
if type(layer) is str:
layer = [layer]
if type(basis) is str:
basis = [basis]
if stack_colors and len(color) > len(sequential_cmaps):
main_warning(
"#color: %d passed in is greater than #sequential cmaps: %d, will reuse sequential maps"
% (len(color), len(sequential_cmaps))
)
main_warning("You should consider decreasing your #color")
n_c, n_l, n_b, n_x, n_y = (
1 if color is None else len(color),
1 if layer is None else len(layer),
1 if basis is None else len(basis),
1 if x is None else 1 if type(x) in [anndata._core.views.ArrayView, np.ndarray] else len(x),
# check whether it is an array
1 if y is None else 1 if type(y) in [anndata._core.views.ArrayView, np.ndarray] else len(y),
# check whether it is an array
)
if pointsize is None:
point_size = 16000.0 / np.sqrt(adata.shape[0])
else:
point_size = 16000.0 / np.sqrt(adata.shape[0]) * pointsize
scatter_kwargs = dict(
alpha=alpha,
s=point_size,
edgecolor=None,
linewidth=0,
rasterized=True,
marker=marker,
) # (0, 0, 0, 1)
if kwargs is not None:
scatter_kwargs.update(kwargs)
font_color = _select_font_color(_background)
total_panels, ncols = (
n_c * n_l * n_b * n_x * n_y,
min(max([n_c, n_l, n_b, n_x, n_y]), ncols),
)
nrow, ncol = int(np.ceil(total_panels / ncols)), ncols
if figsize is None:
figsize = plt.rcParams["figsize"]
if total_panels >= 1 and ax is None:
plt.figure(
None,
(figsize[0] * ncol, figsize[1] * nrow),
facecolor=_background,
dpi=dpi,
)
gs = plt.GridSpec(nrow, ncol, wspace=0.12)
ax_index = 0
axes_list, color_list = [], []
color_out = None
def _plot_basis_layer(cur_b, cur_l):
"""a helper function for plotting a specific basis/layer data
Parameters
----------
cur_b :
current basis
cur_l :
current layer
"""
nonlocal adata, x, y, _background, cmap, color_out, labels, values, ax, sym_c, scatter_kwargs, ax_index
if cur_l in ["acceleration", "curvature", "divergence", "velocity_S", "velocity_T"]:
cur_l_smoothed = cur_l
cmap, sym_c = "bwr", True # To-do: maybe use other divergent color map in future
else:
if use_smoothed:
cur_l_smoothed = cur_l if cur_l.startswith("M_") | cur_l.startswith("velocity") else mapper[cur_l]
if cur_l.startswith("velocity"):
cmap, sym_c = "bwr", True
prefix = cur_l + "_" if any([key == cur_l + "_" + cur_b for key in adata.obsm.keys()]) else "X_"
# if prefix + cur_b in adata.obsm.keys():
# if type(x) != str and type(y) != str:
# x_, y_ = (
# adata.obsm[prefix + cur_b][:, int(x)],
# adata.obsm[prefix + cur_b][:, int(y)],
# )
# else:
# continue
if stack_colors:
_stack_background_adata_indices = np.ones(len(adata), dtype=bool)
for cur_c in color:
main_debug("coloring scatter of cur_c: %s" % str(cur_c))
if not stack_colors:
cur_title = cur_c
else:
cur_title = stack_colors_title
_color = _get_adata_color(adata, cur_l, cur_c)
# select data rows based on stack color thresholding
_values = values
if stack_colors:
main_debug("Subsetting adata by stack_colors")
_adata = adata[_color > stack_colors_threshold]
_stack_background_adata_indices = np.logical_and(
_stack_background_adata_indices, (_color < stack_colors_threshold)
)
if values:
_values = values[_color > stack_colors_threshold]
_color = _color[_color > stack_colors_threshold]
main_debug("stack colors: _adata len after thresholding by color value: %d" % (len(_adata)))
if len(_color) == 0:
main_info("skipping color %s because no point of %s is above threshold" % (cur_c, cur_c))
continue
else:
_adata = adata
if (
type(x) in [anndata._core.views.ArrayView, np.ndarray]
and type(y) in [anndata._core.views.ArrayView, np.ndarray]
and len(x) == _adata.n_obs
and len(y) == _adata.n_obs
):
x, y = [x], [y]
elif hasattr(x, "__len__") and hasattr(y, "__len__"):
x, y = list(x), list(y)
for cur_x, cur_y in zip(x, y): # here x / y are arrays
main_debug("handling coordinates, cur_x: %s, cur_y: %s" % (cur_x, cur_y))
if type(cur_x) is int and type(cur_y) is int:
points = pd.DataFrame(
{
cur_b + "_0": _adata.obsm[prefix + cur_b][:, cur_x],
cur_b + "_1": _adata.obsm[prefix + cur_b][:, cur_y],
}
)
points.columns = [cur_b + "_0", cur_b + "_1"]
elif is_gene_name(_adata, cur_x) and is_gene_name(_adata, cur_y):
points = pd.DataFrame(
{
cur_x: _adata.obs_vector(k=cur_x, layer=None)
if cur_l_smoothed == "X"
else _adata.obs_vector(k=cur_x, layer=cur_l_smoothed),
cur_y: _adata.obs_vector(k=cur_y, layer=None)
if cur_l_smoothed == "X"
else _adata.obs_vector(k=cur_y, layer=cur_l_smoothed),
}
)
# points = points.loc[(points > 0).sum(1) > 1, :]
points.columns = [
cur_x + " (" + cur_l_smoothed + ")",
cur_y + " (" + cur_l_smoothed + ")",
]
cur_title = cur_x + " VS " + cur_y
elif is_cell_anno_column(_adata, cur_x) and is_cell_anno_column(_adata, cur_y):
points = pd.DataFrame(
{
cur_x: _adata.obs_vector(cur_x),
cur_y: _adata.obs_vector(cur_y),
}
)
points.columns = [cur_x, cur_y]
cur_title = cur_x + " VS " + cur_y
elif is_cell_anno_column(_adata, cur_x) and is_gene_name(_adata, cur_y):
points = pd.DataFrame(
{
cur_x: _adata.obs_vector(cur_x),
cur_y: _adata.obs_vector(k=cur_y, layer=None)
if cur_l_smoothed == "X"
else _adata.obs_vector(k=cur_y, layer=cur_l_smoothed),
}
)
# points = points.loc[points.iloc[:, 1] > 0, :]
points.columns = [
cur_x,
cur_y + " (" + cur_l_smoothed + ")",
]
cur_title = cur_y
elif is_gene_name(_adata, cur_x) and is_cell_anno_column(_adata, cur_y):
points = pd.DataFrame(
{
cur_x: _adata.obs_vector(k=cur_x, layer=None)
if cur_l_smoothed == "X"
else _adata.obs_vector(k=cur_x, layer=cur_l_smoothed),
cur_y: _adata.obs_vector(cur_y),
}
)
# points = points.loc[points.iloc[:, 0] > 0, :]
points.columns = [
cur_x + " (" + cur_l_smoothed + ")",
cur_y,
]
cur_title = cur_x
elif is_layer_keys(_adata, cur_x) and is_layer_keys(_adata, cur_y):
cur_x_, cur_y_ = (
_adata[:, cur_b].layers[cur_x],
_adata[:, cur_b].layers[cur_y],
)
points = pd.DataFrame({cur_x: flatten(cur_x_), cur_y: flatten(cur_y_)})
# points = points.loc[points.iloc[:, 0] > 0, :]
points.columns = [cur_x, cur_y]
cur_title = cur_b
elif type(cur_x) in [anndata._core.views.ArrayView, np.ndarray] and type(cur_y) in [
anndata._core.views.ArrayView,
np.ndarray,
]:
points = pd.DataFrame({"x": flatten(cur_x), "y": flatten(cur_y)})
points.columns = ["x", "y"]
cur_title = cur_b
else:
raise Exception("Make sure your `x` and `y` are integers, gene names, column names in .obs, etc.")
if aggregate is not None:
groups, uniq_grp = (
_adata.obs[aggregate],
list(_adata.obs[aggregate].unique()),
)
group_color, group_median = (
np.zeros((1, len(uniq_grp))).flatten()
if isinstance(_color[0], Number)
else np.zeros((1, len(uniq_grp))).astype("str").flatten(),
np.zeros((len(uniq_grp), 2)),
)
grp_size = _adata.obs[aggregate].value_counts()[uniq_grp].values
scatter_kwargs = (
{"s": grp_size} if scatter_kwargs is None else update_dict(scatter_kwargs, {"s": grp_size})
)
for ind, cur_grp in enumerate(uniq_grp):
group_median[ind, :] = np.nanmedian(
points.iloc[np.where(groups == cur_grp)[0], :2],
0,
)
if isinstance(_color[0], Number):
group_color[ind] = np.nanmedian(np.array(_color)[np.where(groups == cur_grp)[0]])
else:
group_color[ind] = pd.Series(_color)[np.where(groups == cur_grp)[0]].value_counts().index[0]
points, _color = (
pd.DataFrame(
group_median,
index=uniq_grp,
columns=points.columns,
),
group_color,
)
# https://stackoverflow.com/questions/4187185/how-can-i-check-if-my-python-object-is-a-number
# answer from Boris.
is_not_continuous = not isinstance(_color[0], Number) or _color.dtype.name == "category"
if is_not_continuous:
labels = np.asarray(_color) if is_categorical_dtype(_color) else _color
if theme is None:
if _background in ["#ffffff", "black"]:
_theme_ = "glasbey_dark"
else:
_theme_ = "glasbey_white"
else:
_theme_ = theme
else:
_values = _color
if theme is None:
if _background in ["#ffffff", "black"]:
_theme_ = "inferno" if cur_l != "velocity" else "div_blue_black_red"
else:
_theme_ = "viridis" if not cur_l.startswith("velocity") else "div_blue_red"
else:
_theme_ = theme
_cmap = _themes[_theme_]["cmap"] if cmap is None else cmap
if stack_colors:
main_debug("stack colors: changing cmap")
_cmap = sequential_cmaps[ax_index % len(sequential_cmaps)]
max_color = matplotlib.cm.get_cmap(_cmap)(float("inf"))
legend_circle = Line2D(
[0],
[0],
marker="o",
color="w",
markerfacecolor=max_color,
label=cur_c,
markersize=stack_colors_legend_size,
)
stack_legend_handles.append(legend_circle)
_color_key_cmap = _themes[_theme_]["color_key_cmap"] if color_key_cmap is None else color_key_cmap
_background = _themes[_theme_]["background"] if _background is None else _background
if labels is not None and values is not None:
raise ValueError("Conflicting options; only one of labels or values should be set")
if total_panels > 1 and not stack_colors:
ax = plt.subplot(gs[ax_index])
ax_index += 1
# if highligts is a list of lists - each list is relate to each color element
if highlights is not None:
if is_list_of_lists(highlights):
_highlights = highlights[color.index(cur_c)]
_highlights = _highlights if all([i in _color for i in _highlights]) else None
else:
_highlights = highlights if all([i in _color for i in highlights]) else None
if smooth and not is_not_continuous:
main_debug("smooth and not continuous")
knn = _adata.obsp["moments_con"]
values = (
calc_1nd_moment(values, knn)[0]
if smooth in [1, True]
else calc_1nd_moment(values, knn ** smooth)[0]
)
if affine_transform_A is None or affine_transform_b is None:
point_coords = points.values
else:
point_coords = affine_transform(points.values, affine_transform_A, affine_transform_b)
if points.shape[0] <= figsize[0] * figsize[1] * 100000:
main_debug("drawing with _matplotlib_points function")
ax, color_out = _matplotlib_points(
# points.values,
point_coords,
ax,
labels,
_values,
highlights,
_cmap,
color_key,
_color_key_cmap,
_background,
figsize[0],
figsize[1],
show_legend,
sort=sort,
frontier=frontier,
contour=contour,
ccmap=ccmap,
calpha=calpha,
sym_c=sym_c,
inset_dict=inset_dict,
**scatter_kwargs,
)
if labels is not None:
color_dict = {}
colors = [rgb2hex(i) for i in color_out]
for i, j in zip(labels, colors):
color_dict[i] = j
adata.uns[cur_title + "_colors"] = color_dict
else:
main_debug("drawing with _datashade_points function")
ax = _datashade_points(
# points.values,
point_coords,
ax,
labels,
values,
highlights,
_cmap,
color_key,
_color_key_cmap,
_background,
figsize[0],
figsize[1],
show_legend,
sort=sort,
frontier=frontier,
contour=contour,
ccmap=ccmap,
calpha=calpha,
sym_c=sym_c,
**scatter_kwargs,
)
if ax_index == 1 and show_arrowed_spines:
arrowed_spines(ax, points.columns[:2], _background)
else:
if despline:
despline_all(ax, despline_sides)
if deaxis:
deaxis_all(ax)
ax.set_title(cur_title)
axes_list.append(ax)
color_list.append(color_out)
labels, values = None, None # reset labels and values
if add_gamma_fit and cur_b in _adata.var_names[_adata.var.use_for_dynamics]:
xnew = np.linspace(
points.iloc[:, 0].min(),
points.iloc[:, 0].max() * 0.80,
)
k_name = "gamma_k" if _adata.uns["dynamics"]["experiment_type"] == "one-shot" else "gamma"
if k_name in _adata.var.columns:
if not ("gamma_b" in _adata.var.columns) or all(_adata.var.gamma_b.isna()):
_adata.var.loc[:, "gamma_b"] = 0
ax.plot(
xnew,
xnew * _adata[:, cur_b].var.loc[:, k_name].unique()
+ _adata[:, cur_b].var.loc[:, "gamma_b"].unique(),
dashes=[6, 2],
c=font_color,
)
else:
raise Exception(
"_adata does not seem to have %s column. Velocity estimation is required "
"before running this function." % k_name
)
if group is not None and add_group_gamma_fit and cur_b in _adata.var_names[_adata.var.use_for_dynamics]:
cell_groups = _adata.obs[group]
unique_groups = np.unique(cell_groups)
k_suffix = "gamma_k" if _adata.uns["dynamics"]["experiment_type"] == "one-shot" else "gamma"
for group_idx, cur_group in enumerate(unique_groups):
group_k_name = group + "_" + cur_group + "_" + k_suffix
group_adata = _adata[_adata.obs[group] == cur_group]
group_points = points.iloc[
|
np.array(_adata.obs[group] == cur_group)
|
numpy.array
|
"""
Tests for corrections module
"""
# Copyright (c) <NAME>
# Distributed under the terms of the MIT License
# author: <NAME> (<EMAIL>)
# pylint: disable=R0201
import os
import unittest
import numpy as np
import io
import islatu
from PIL import Image as PILIm
from numpy.testing import assert_almost_equal, assert_equal
from uncertainties import unumpy as unp
from islatu.image import Image
from islatu import cropping, background, image
EXAMPLE_FILE = (
"0. 0. 1. 1. 4. 113. 117. 7. 1. 0. \n"
"0. 0. 0. 3. 4. 127. 144. 9. 2. 0. \n"
"2. 0. 0. 7. 7. 232. 271. 13. 5. 2. \n"
"1. 0. 5. 6. 31. 672. 703. 55. 10. 3. \n"
"2.0000e+00 2.0000e+00 5.0000e+00 1.3000e+01 "
"3.5800e+02 3.4490e+04 3.1763e+04 9.1100e+02 "
"5.5000e+01 7.0000e+00 \n"
"2.0000e+00 2.0000e+00 9.0000e+00 2.0000e+01 "
"9.6300e+02 6.5535e+04 5.5515e+04 1.4450e+03 "
"8.2000e+01 1.0000e+01 \n"
"2.000e+00 2.000e+00 3.000e+00 2.100e+01 "
"1.080e+02 5.337e+03 3.077e+03 1.900e+02 "
"2.500e+01 8.000e+00 \n"
"0. 2. 1. 2. 27. 697. 324. 25. 6. 0. \n"
"0. 2. 1. 3. 16. 525. 245. 15. 4. 3. \n"
"0. 0. 0. 1. 4. 355. 167. 4. 1. 0."
)
EXAMPLE_HOT_PIXEL = (
"5. 0. 1. 1. 4. 113. 117. 7. 1. 0. \n"
"0. 0. 0. 3. 4. 127. 144. 9. 2. 0. \n"
"2. 0. 5.000e+04 7. 7. 232. 271. 13. 5. 2. \n"
"1. 0. 5. 6. 31. 672. 703. 55. 10. 3. \n"
"1. 0. 5. 6. 31. 672. 703. 55. 10. 3. \n"
"1. 0. 5. 6. 31. 672. 703. 55. 10. 3. \n"
"1. 0. 5. 6. 31. 672. 703. 55. 10. 3. \n"
"1. 0. 5. 6. 31. 672. 703. 55. 10. 3. \n"
"1. 0. 5. 6. 31. 672. 703. 55. 10. 3. \n"
"0. 0. 0. 1. 4. 355. 167. 4. 1. 0."
)
EXAMPLES_HOT_PIXEL_CORNERA = (
"5.000e+04 2. 1. 1. 4. 113. 117. 7. 1. 0. \n"
"5. 3. 0. 3. 4. 127. 144. 9. 2. 0. \n"
"2. 0. 5. 7. 7. 232. 271. 13. 5. 2. \n"
"1. 0. 5. 6. 31. 672. 703. 55. 10. 3. \n"
"1. 0. 5. 6. 31. 672. 703. 55. 10. 3. \n"
"1. 0. 5. 6. 31. 672. 703. 55. 10. 3. \n"
"1. 0. 5. 6. 31. 672. 703. 55. 10. 3. \n"
"1. 0. 5. 6. 31. 672. 703. 55. 10. 3. \n"
"1. 0. 5. 6. 31. 672. 703. 55. 10. 3. \n"
"0. 0. 0. 1. 4. 355. 167. 4. 1. 0."
)
EXAMPLES_HOT_PIXEL_CORNERB = (
"5. 0. 1. 1. 4. 113. 117. 7. 1. 0. \n"
"0. 0. 0. 3. 4. 127. 144. 9. 2. 0. \n"
"2. 0. 5. 7. 7. 232. 271. 13. 5. 2. \n"
"1. 0. 5. 6. 31. 672. 703. 55. 10. 3. \n"
"1. 0. 5. 6. 31. 672. 703. 55. 10. 3. \n"
"1. 0. 5. 6. 31. 672. 703. 55. 10. 3. \n"
"1. 0. 5. 6. 31. 672. 703. 55. 10. 3. \n"
"1. 0. 5. 6. 31. 672. 703. 55. 10. 3. \n"
"1. 0. 5. 6. 31. 672. 703. 55. 9. 3. \n"
"0. 0. 0. 1. 4. 355. 167. 4. 0. 5.0000e+04"
)
class TestImage(unittest.TestCase):
"""
Unit tests for Image class
"""
def test_init(self):
"""
Test file reading
"""
b = io.StringIO(EXAMPLE_FILE)
buf = io.BytesIO()
im = PILIm.fromarray(np.loadtxt(b).astype(np.uint32))
im.save(buf, format="png")
buf.seek(0)
test_image = Image(buf)
data = io.StringIO(EXAMPLE_FILE)
expected_image = np.loadtxt(data)
assert_equal((10, 10), test_image.shape)
assert_almost_equal(expected_image, test_image.n)
def test_hot_pixel(self):
"""
Test file reading
"""
b = io.StringIO(EXAMPLE_HOT_PIXEL)
buf = io.BytesIO()
im = PILIm.fromarray(np.loadtxt(b).astype(np.uint32))
im.save(buf, format="png")
buf.seek(0)
test_image = Image(buf, hot_pixel_max=4e4)
assert_equal((10, 10), test_image.shape)
assert_almost_equal(2, test_image.n[2, 2])
def test_hot_pixel_cornera(self):
"""
Test file reading
"""
b = io.StringIO(EXAMPLES_HOT_PIXEL_CORNERA)
buf = io.BytesIO()
im = PILIm.fromarray(np.loadtxt(b).astype(np.uint32))
im.save(buf, format="png")
buf.seek(0)
test_image = Image(buf, hot_pixel_max=4e4)
assert_equal((10, 10), test_image.shape)
assert_almost_equal(2, test_image.n[0, 0])
def test_hot_pixel_cornerb(self):
"""
Test file reading
"""
b = io.StringIO(EXAMPLES_HOT_PIXEL_CORNERB)
buf = io.BytesIO()
im = PILIm.fromarray(np.loadtxt(b).astype(np.uint32))
im.save(buf, format="png")
buf.seek(0)
test_image = Image(buf, hot_pixel_max=4e4)
assert_equal((10, 10), test_image.shape)
assert_almost_equal(4, test_image.n[-1, -1])
def test_init_with_transpose(self):
"""
Test for transposing with reading
"""
b = io.StringIO(EXAMPLE_FILE)
buf = io.BytesIO()
im = PILIm.fromarray(np.loadtxt(b).astype(np.uint32))
im.save(buf, format="png")
buf.seek(0)
test_image = Image(buf, transpose=True)
data = io.StringIO(EXAMPLE_FILE)
expected_image = np.loadtxt(data, unpack=True)
assert_equal((10, 10), test_image.shape)
assert_almost_equal(expected_image, test_image.n)
def test_nominal_values(self):
"""
Test nominal values
"""
b = io.StringIO(EXAMPLE_FILE)
buf = io.BytesIO()
im = PILIm.fromarray(np.loadtxt(b).astype(np.uint32))
im.save(buf, format="png")
buf.seek(0)
test_image = Image(buf)
data = io.StringIO(EXAMPLE_FILE)
expected_image = np.loadtxt(data)
|
assert_equal((10, 10), test_image.shape)
|
numpy.testing.assert_equal
|
import numpy as np
import matplotlib.pyplot as plt
from scipy import integrate
with open('./resources/naca0012.dat') as file_name:
x, y = np.loadtxt(file_name, dtype=float, delimiter='\t',
unpack=True)
val_x, val_y = 0.1, 0.2
x_min, x_max = x.min(), x.max()
y_min, y_max = y.min(), y.max()
x_start, x_end = (x_min - val_x * (x_max - x_min),
x_max + val_x * (x_max - x_min))
y_start, y_end = (y_min - val_y * (y_max - y_min),
y_max + val_y * (y_max - y_min))
size = 10
plt.figure(figsize=(size, (y_end - y_start) / (x_end - x_start) * size))
plt.grid(True)
plt.xlabel('x', fontsize=16)
plt.ylabel('y', fontsize=16)
plt.xlim(x_start, x_end)
plt.ylim(y_start, y_end)
plt.plot(x, y, color='k', linestyle='-', linewidth=2)
plt.show()
class Panel:
def __init__(self, xa, ya, xb, yb):
self.xa, self.ya = xa, ya
self.xb, self.yb = xb, yb
self.xc, self.yc = (xa + xb) / 2, (ya + yb) / 2
self.length = np.sqrt((xb - xa)**2 + (yb - ya)**2)
if xb - xa <= 0.0:
self.beta = np.arccos((yb - ya) / self.length)
elif xb - xa > 0.0:
self.beta = np.pi + np.arccos(-(yb - ya) / self.length)
if self.beta <= np.pi:
self.loc = 'upper'
else:
self.loc = 'lower'
self.sigma = 0.0
self.vt = 0.0
self.cp = 0.0
def define_panels(x, y, N=40):
R = (x.max() - x.min()) / 2
x_center = (x.max() + x.min()) / 2
x_circle = x_center + R * np.cos(np.linspace(0, 2 * np.pi, N + 1))
x_ends = np.copy(x_circle)
y_ends = np.empty_like(x_ends)
x, y = np.append(x, x[0]), np.append(y, y[0])
I = 0
for i in range(N):
while I < len(x) - 1:
if (x[I] <= x_ends[i] <= x[I + 1]) or (x[I + 1] <= x_ends[i] <= x[I]):
break
else:
I += 1
a = (y[I + 1] - y[I]) / (x[I + 1] - x[I])
b = y[I + 1] - a * x[I + 1]
y_ends[i] = a * x_ends[i] + b
y_ends[N] = y_ends[0]
panels = np.empty(N, dtype=object)
for i in range(N):
panels[i] = Panel(x_ends[i], y_ends[i], x_ends[i + 1], y_ends[i + 1])
return panels
N = 40 # number of panels
panels = define_panels(x, y, N) # discretizes of the geometry into panels
# plots the geometry and the panels
val_x, val_y = 0.1, 0.2
x_min, x_max = min(panel.xa for panel in panels), max(
panel.xa for panel in panels)
y_min, y_max = min(panel.ya for panel in panels), max(
panel.ya for panel in panels)
x_start, x_end = x_min - val_x * \
(x_max - x_min), x_max + val_x * (x_max - x_min)
y_start, y_end = y_min - val_y * \
(y_max - y_min), y_max + val_y * (y_max - y_min)
size = 10
plt.figure(figsize=(size, (y_end - y_start) / (x_end - x_start) * size))
plt.grid(True)
plt.xlabel('x', fontsize=16)
plt.ylabel('y', fontsize=16)
plt.xlim(x_start, x_end)
plt.ylim(y_start, y_end)
plt.plot(x, y, color='k', linestyle='-', linewidth=2)
plt.plot(np.append([panel.xa for panel in panels], panels[0].xa),
np.append([panel.ya for panel in panels], panels[0].ya),
linestyle='-', linewidth=1, marker='o', markersize=6, color='#CD2305')
plt.show()
class Freestream:
def __init__(self, u_inf=1.0, alpha=0.0):
self.u_inf = u_inf
self.alpha = alpha * np.pi / 180
u_inf = 1.0
alpha = 0.0
freestream = Freestream(u_inf, alpha)
def integral(x, y, panel, dxdz, dydz):
def func(s):
return (((x - (panel.xa - np.sin(panel.beta) * s)) * dxdz +
(y - (panel.ya + np.cos(panel.beta) * s)) * dydz) /
((x - (panel.xa - np.sin(panel.beta) * s))**2 +
(y - (panel.ya + np.cos(panel.beta) * s))**2))
return integrate.quad(lambda s: func(s), 0.0, panel.length)[0]
def build_matrix(panels):
N = len(panels)
A = np.empty((N, N), dtype=float)
np.fill_diagonal(A, 0.5)
for i, p_i in enumerate(panels):
for j, p_j in enumerate(panels):
if i != j:
A[i, j] = (0.5 / np.pi *
integral(p_i.xc, p_i.yc, p_j,
np.cos(p_i.beta), np.sin(p_i.beta)))
return A
def build_rhs(panels, freestream):
b = np.empty(len(panels), dtype=float)
for i, panel in enumerate(panels):
b[i] = - freestream.u_inf * np.cos(freestream.alpha - panel.beta)
return b
A = build_matrix(panels)
b = build_rhs(panels, freestream)
sigma = np.linalg.solve(A, b)
for i, panel in enumerate(panels):
panel.sigma = sigma[i]
def get_tangencial_velocity(panels, freestream):
N = len(panels)
A = np.empty((N, N), dtype=float)
np.fill_diagonal(A, 0.0)
for i, p_i in enumerate(panels):
for j, p_j in enumerate(panels):
if i != j:
A[i, j] = (0.5 / np.pi *
integral(p_i.xc, p_i.yc, p_j,
-
|
np.sin(p_i.beta)
|
numpy.sin
|
test_input1 = """
inp w
add z w
mod z 2
div w 2
add y w
mod y 2
div w 2
add x w
mod x 2
div w 2
mod w 2
"""
input = """
inp w
mul x 0
add x z
mod x 26
div z 1
add x 14
eql x w
eql x 0
mul y 0
add y 25
mul y x
add y 1
mul z y
mul y 0
add y w
add y 7
mul y x
add z y
inp w
mul x 0
add x z
mod x 26
div z 1
add x 12
eql x w
eql x 0
mul y 0
add y 25
mul y x
add y 1
mul z y
mul y 0
add y w
add y 4
mul y x
add z y
inp w
mul x 0
add x z
mod x 26
div z 1
add x 11
eql x w
eql x 0
mul y 0
add y 25
mul y x
add y 1
mul z y
mul y 0
add y w
add y 8
mul y x
add z y
inp w
mul x 0
add x z
mod x 26
div z 26
add x -4
eql x w
eql x 0
mul y 0
add y 25
mul y x
add y 1
mul z y
mul y 0
add y w
add y 1
mul y x
add z y
inp w
mul x 0
add x z
mod x 26
div z 1
add x 10
eql x w
eql x 0
mul y 0
add y 25
mul y x
add y 1
mul z y
mul y 0
add y w
add y 5
mul y x
add z y
inp w
mul x 0
add x z
mod x 26
div z 1
add x 10
eql x w
eql x 0
mul y 0
add y 25
mul y x
add y 1
mul z y
mul y 0
add y w
add y 14
mul y x
add z y
inp w
mul x 0
add x z
mod x 26
div z 1
add x 15
eql x w
eql x 0
mul y 0
add y 25
mul y x
add y 1
mul z y
mul y 0
add y w
add y 12
mul y x
add z y
inp w
mul x 0
add x z
mod x 26
div z 26
add x -9
eql x w
eql x 0
mul y 0
add y 25
mul y x
add y 1
mul z y
mul y 0
add y w
add y 10
mul y x
add z y
inp w
mul x 0
add x z
mod x 26
div z 26
add x -9
eql x w
eql x 0
mul y 0
add y 25
mul y x
add y 1
mul z y
mul y 0
add y w
add y 5
mul y x
add z y
inp w
mul x 0
add x z
mod x 26
div z 1
add x 12
eql x w
eql x 0
mul y 0
add y 25
mul y x
add y 1
mul z y
mul y 0
add y w
add y 7
mul y x
add z y
inp w
mul x 0
add x z
mod x 26
div z 26
add x -15
eql x w
eql x 0
mul y 0
add y 25
mul y x
add y 1
mul z y
mul y 0
add y w
add y 6
mul y x
add z y
inp w
mul x 0
add x z
mod x 26
div z 26
add x -7
eql x w
eql x 0
mul y 0
add y 25
mul y x
add y 1
mul z y
mul y 0
add y w
add y 8
mul y x
add z y
inp w
mul x 0
add x z
mod x 26
div z 26
add x -10
eql x w
eql x 0
mul y 0
add y 25
mul y x
add y 1
mul z y
mul y 0
add y w
add y 4
mul y x
add z y
inp w
mul x 0
add x z
mod x 26
div z 26
add x 0
eql x w
eql x 0
mul y 0
add y 25
mul y x
add y 1
mul z y
mul y 0
add y w
add y 6
mul y x
add z y
"""
import itertools
from functools import lru_cache
import math
import numpy as np
from sympy import (symbols, Function, Integer, Symbol, Add, Mul, S, Tuple,
cse, lambdify)
from sympy.printing.lambdarepr import LambdaPrinter
from numba import njit
def parse_input(data):
instructions = []
for line in data.strip().splitlines():
if line.startswith('inp'):
instr, a = line.split()
instructions.append((instr, a))
else:
instr, a, b = line.split()
if b in 'xyzw':
instructions.append((instr, a, b))
else:
instructions.append((instr, a, int(b)))
return instructions
def run(instructions, inputs):
vars = {i: 0 for i in 'xyzw'}
for instruction in instructions:
if instruction[0] == 'inp':
res = inputs.pop(0)
vars[instruction[1]] = res
continue
else:
instr, a, b = instruction
assert a in list('xyzw')
A = vars[a]
B = b if isinstance(b, int) else vars[b]
if instr == 'add':
vars[a] = A + B
elif instr == 'mul':
vars[a] = A*B
elif instr == 'div':
if B == 0:
raise ZeroDivisionError("div by 0")
if A*B < 0:
# Division needs to round towards 0
vars[a] = -((-A)//(-B))
else:
vars[a] = A//B
elif instr == 'mod':
if A < 0 or B <= 0:
raise ZeroDivisionError("mod with nonpositive inputs")
vars[a] = A % B
elif instr == 'eql':
vars[a] = int(A == B)
else:
raise ValueError(f"Invalid instruction: {instr!r}")
return vars
class FloorDivide(Function):
@classmethod
def eval(cls, x, y):
if x.is_Integer and y.is_Integer:
if x*y < 0:
# Division needs to round towards 0
return -((-x)//(-y))
else:
return x//y
if y == 1:
return x
# if isinstance(x, Add):
# return x.func(*[FloorDivide(i, y) for i in x.args])
class Mod(Function):
@classmethod
def eval(cls, x, y):
if x.is_Integer and y.is_Integer:
if x < 0 or y <= 0:
raise ZeroDivisionError("mod with nonpositive inputs")
return x % y
if isinstance(x, Symbol) and 'input' in x.name and y.is_Integer and y > 9:
return x
# if isinstance(x, Add):
# coeff, rest = x.as_coeff_Add()
# if coeff != 0:
# return coeff % y + Mod(rest, y)
if isinstance(x, Equal):
return x
class Equal(Function):
@classmethod
def eval(cls, x, y):
if x.is_Integer and y.is_Integer:
return Integer(x == y)
if isinstance(x, Symbol) and y.is_Integer and not (1 <= y <= 9):
return S(0)
if isinstance(y, Symbol) and x.is_Integer and not (1 <= x <= 9):
return S(0)
free_vars = list(x.free_symbols | y.free_symbols)
possibilities = set()
if len(free_vars) <= 0:
for vals in itertools.product(*[range(1, 10)]*len(free_vars)):
if len(possibilities) == 2:
return
s = list(zip(free_vars, vals))
x_ = x.subs(s)
y_ = y.subs(s)
assert x_.is_Integer and y_.is_Integer, (x_, y_)
possibilities.add(Integer(x_ == y_))
if len(possibilities) == 1:
return possibilities.pop()
def run_sympy(instructions, inputs):
vars = {i: 0 for i in 'xyzw'}
input_counter = 0
for instruction in instructions:
if instruction[0] == 'inp':
res = inputs[input_counter]
input_counter += 1
vars[instruction[1]] = res
continue
else:
instr, a, b = instruction
assert a in list('xyzw')
A = vars[a]
B = b if isinstance(b, int) else vars[b]
if instr == 'add':
vars[a] = A + B
elif instr == 'mul':
vars[a] = A*B
elif instr == 'div':
if B == 0:
raise ZeroDivisionError("div by 0")
# if A*B < 0:
# # Division needs to round towards 0
# vars[a] = -((-A)//(-B))
# else:
# vars[a] = A//B
vars[a] = FloorDivide(A, B)
elif instr == 'mod':
# if A < 0 or B <= 0:
# raise ZeroDivisionError("mod with nonpositive inputs")
vars[a] = Mod(A, B)
# vars[a] = A % B
elif instr == 'eql':
vars[a] = Equal(A, B)
else:
raise ValueError(f"Invalid instruction: {instr!r}")
return vars
def part1(instructions):
for N in range(10**14-1, 0, -1):
inputs = [int(i) for i in str(N)]
if 0 in inputs:
continue
print(N)
res = run(instructions, inputs)
if res['z'] == 0:
return N
class CustomPrinter(LambdaPrinter):
def _print_Equal(self, expr):
x, y = expr.args
return f"int({self._print(x)} == {self._print(y)})"
def _print_FloorDivide(self, expr):
x, y = expr.args
return f"({self._print(x)} // {self._print(y)})"
def _print_Mod(self, expr):
x, y = expr.args
return f"({self._print(x)} % {self._print(y)})"
# modules = {"Equal": lambda x, y: int(x == y),
# "FloorDivide": lambda x, y: x//y,
# "Mod": lambda x, y: x % y,
# }
def inner_lambdify(variables, expr):
# Cache only the free variables in the subexpression. Otherwise caching
# cannot work.
free_vars = sorted(expr.free_symbols, key=variables.index)
mapping = [variables.index(i) for i in free_vars]
f = lru_cache(None)(njit(lambdify(free_vars, expr, printer=CustomPrinter)))
return lambda *vars: f(*[vars[i] for i in mapping])
# https://stackoverflow.com/questions/11144513/cartesian-product-of-x-and-y-array-points-into-single-array-of-2d-points
def cartesian_product(*arrays):
if len(arrays) == 0:
return np.empty((1, 0))
la = len(arrays)
dtype = np.result_type(*arrays)
arr = np.empty([len(a) for a in arrays] + [la], dtype=dtype)
for i, a in enumerate(np.ix_(*arrays)):
arr[...,i] = a
return arr.reshape(-1, la)
@njit
def boolallvals2(f, x1, x2):
res = np.zeros((2, 2), dtype=np.int64)
for i1 in x1:
for i2 in x2:
res[int(f(i1, i2))] = np.array([i1, i2])
return res
@njit
def boolallvals4(f, x1, x2, x3, x4):
res = np.zeros((2, 4), dtype=np.int64)
for i1 in x1:
for i2 in x2:
for i3 in x3:
for i4 in x4:
res[int(f(i1, i2, i3, i4))] =
|
np.array([i1, i2, i3, i4])
|
numpy.array
|
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 18 11:45:32 2018
Empirical Wavelet Transform implementation for 1D signals
Original paper:
<NAME>., 2013. Empirical Wavelet Transform. IEEE Transactions on Signal Processing, 61(16), pp.3999–4010.
Available at: http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6522142.
Original Matlab toolbox: https://www.mathworks.com/matlabcentral/fileexchange/42141-empirical-wavelet-transforms
@author: <NAME>
Programa de pós graduação em engenharia elétrica - PPGEE UFMG
Universidade Federal de Minas Gerais - Belo Horizonte, Brazil
Núcleo de Neurociências - NNC
"""
import numpy as np
#%EWT functions
def EWT1D(f, N = 5, log = 0,detect = "locmax", completion = 0, reg = 'average', lengthFilter = 10,sigmaFilter = 5):
"""
=========================================================================
ewt, mfb ,boundaries = EWT1D(f, N = 5, log = 0,detect = "locmax", completion = 0, reg = 'average', lengthFilter = 10,sigmaFilter = 5):
Perform the Empirical Wavelet Transform of f over N scales. See
also the documentation of EWT_Boundaries_Detect for more details about
the available methods and their parameters.
Inputs:
-f: the 1D input signal
Optional Inputs:
-log: 0 or 1 to indicate if we want to work with
the log spectrum
-method: 'locmax','locmaxmin','locmaxminf'
-reg: 'none','gaussian','average'
-lengthFilter: width of the above filters (Gaussian or average)
-sigmaFilter: standard deviation of the above Gaussian filter
-N: maximum number of supports (modes or signal components)
-completion: 0 or 1 to indicate if we try to complete
or not the number of modes if the detection
find a lower number of mode than N
Outputs:
-ewt: contains first the low frequency component and
then the successives frequency subbands
-mfb: contains the filter bank (in the Fourier domain)
-boundaries: vector containing the set of boundaries corresponding
to the Fourier line segmentation (normalized between
0 and Pi)
Original MATLAB Version:
Author: <NAME>
Institution: UCLA - Department of Mathematics
Year: 2013
Version: 2.0
Python Version: <NAME> - <EMAIL>
Universidade Federal de Minas Gerais - Brasil
Núcleo de Neurociências
% =========================================================================
"""
#signal spectrum
ff = np.fft.fft(f)
ff = abs(ff[0:int(np.ceil(ff.size/2))])#one-sided magnitude
#extract boundaries of Fourier Segments
boundaries = EWT_Boundaries_Detect(ff,log,detect,N,reg,lengthFilter,sigmaFilter)
boundaries = boundaries*np.pi/round(ff.size)
if completion == 1 and len(boundaries)<N-1:
boundaries = EWT_Boundaries_Completion(boundaries,N-1)
#Filtering
#extend the signal by mirroring to deal with boundaries
ltemp = int(np.ceil(f.size/2)) #to behave the same as matlab's round
fMirr = np.append(np.flip(f[0:ltemp-1],axis = 0),f)
fMirr = np.append(fMirr,np.flip(f[-ltemp-1:-1],axis = 0))
ffMirr = np.fft.fft(fMirr)
#build the corresponding filter bank
mfb=EWT_Meyer_FilterBank(boundaries,ffMirr.size)
#filter the signal to extract each subband
ewt = np.zeros(mfb.shape)
for k in range(mfb.shape[1]):
ewt[:,k] = np.real(np.fft.ifft(np.conjugate(mfb[:,k])*ffMirr))
ewt = ewt[ltemp-1:-ltemp,:]
return ewt, mfb ,boundaries
def EWT_Boundaries_Detect(ff,log,detect, N, reg, lengthFilter,sigmaFilter):
"""This function segments f into a certain amount of supports by using different technics:
- middle point between consecutive local maxima (default),
- lowest minima between consecutive local maxima (locmaxmin),
- lowest minima between consecutive local maxima of original spectrum (locmaxminf),
Regularized version of the spectrum can be obtained by the
following methods:
- Gaussian filtering (its parameters are filter of width
lengthFilter and standard deviation sigmaFilter)scalesp
- Average filtering (its parameters are filter of width
lengthFilter)
Note: the detected boundaries are given in term of indices
Inputs:
-f: the function to segment
Optional parameters:
-log: 0 or 1 to indicate if we want to work with
the log of the ff
-reg: 'none','gaussian','average'
-lengthFilter: width of the above filters (Gaussian or average)
-sigmaFilter: standard deviation of the above Gaussian filter
-N: maximum number of supports (modes or signal components)
-completion: 0 or 1 to indicate if we try to complete
or not the number of modes if the detection
find a lower number of mode than N
Outputs:
-boundaries: list of detected boundaries
TODO Preprocessing steps not yet implemented
Original MATLAB version:
Author: <NAME> + <NAME>
Institution: UCLA - Department of Mathematics
Year: 2013
Version: 2.0
Python Version: <NAME> - <EMAIL>
Universidade Federal de Minas Gerais - Brasil
Núcleo de Neurociências
"""
from scipy.ndimage.filters import gaussian_filter
#apply log if needed
if log == 1:
ff = np.log(ff)
#Global trend removal - TODO
#Regularization
if reg == 'average':
regFilter = np.ones(lengthFilter)/lengthFilter
presig = np.convolve(ff,regFilter,mode = 'same') #for even lenght, numpy's convolve is shifted when compared with MATLAB's
elif reg == 'gaussian':
regFilter = np.zeros(lengthFilter)
regFilter[regFilter.size//2] = 1 #prefer odd filter lengths - otherwise the gaussian is skewed
presig = np.convolve(ff,gaussian_filter(regFilter,sigmaFilter),mode = 'same')
else:
presig = ff
#Boundaries detection
if detect == "locmax":#Mid-point between two consecutive local maxima computed on the regularized spectrum
boundaries = LocalMax(presig,N)
elif detect == "locmaxmin":#extract the lowest local minima between two selected local maxima
boundaries = LocalMaxMin(presig,N)
elif detect == "locmaxminf":#We extract the lowest local minima on the original spectrum between
#two local maxima selected on the regularized signal
boundaries = LocalMaxMin(presig,N,fm = ff)
#elif detect == "adaptivereg": #TODO
return boundaries+1
def LocalMax(ff, N):
"""
================================================================
bound = LocalMax(f,N)
This function segments f into a maximum of N supports by taking
the middle point between the N largest local maxima.
Note: the detected boundaries are given in term of indices
Inputs:
-f: the function to segment
-N: maximal number of bands
Outputs:
-bound: list of detected boundaries
Original MATLAB version:
Author: <NAME> + <NAME>
Institution: UCLA - Department of Mathematics
Year: 2013
Version: 1.0
Python Version: <NAME> - <EMAIL>
Universidade Federal de Minas Gerais - Brasil
Núcleo de Neurociências
%===============================================================
"""
N=N-1
locmax = np.zeros(ff.size)
locmin = max(ff)*np.ones(ff.size)
for i in
|
np.arange(1,ff.size-1)
|
numpy.arange
|
#
# ########################################################################### #
# Standard Library
import os
from copy import deepcopy, copy
# 3rd Party
import pandas as pd
import scipy.ndimage.filters as filters
import scipy.ndimage.morphology as morphology
import scipy.fftpack as fftpack
import scipy.sparse
import scipy.sparse.linalg
from scipy.interpolate import interp1d
import numpy as np
from astropy import units
from astropy import coordinates
import astropy.io.fits as fits
import scipy.optimize
from scipy import integrate
from functools import reduce
try:
import cvxopt
import cvxopt.solvers
with_cvxopt = True
except ImportError:
with_cvxopt = False
# Internal
#import thimbles as tmb
#from thimbles import hydrogen
from thimbles import resampling
from . import partitioning
from . import piecewise_polynomial
from thimbles.profiles import voigt, gauss
from . import piecewise_polynomial as ppol
# ########################################################################### #
def AngleRA (angle,unit=units.hourangle,raise_errors=False):
"""
An object which represents a right ascension angle
see `astropy.coordinates.RA` for more extensive documentation
The primary difference with astropy is that if the call to coordinates.RA
errors you have the option to ignore it and return None for the angle
i.e. when raise_errors=False
"""
try:
result = coordinates.RA(angle,unit=unit)
except Exception as e:
result = None
if raise_errors:
raise e
return result
def AngleDec (angle,unit=units.degree,raise_errors=False):
"""
An object which represents a declination angle
see `astropy.coordinates.Dec` for more extensive documentation
The primary difference with astropy is that if the call to coordinates.RA
errors you have the option to ignore it and return None for the angle
i.e. when raise_errors=False
"""
try:
result = coordinates.Dec(angle,unit=unit)
except Exception as e:
result = None
if raise_errors:
raise e
return result
def invert(arr):
return np.where(arr > 0, 1.0/(arr+(arr==0)), np.inf)
def inv_var_2_var (inv_var):
"""
Takes an inv_var and converts it to a variance. Carefully
handling zeros and inf values
"""
fill = np.inf
inv_var = np.asarray(inv_var).astype(float)
zeros = (inv_var <= 0.0) # find the non_zeros
inv_var[zeros] = -1.0
var = 1.0/inv_var # inverse
var[zeros] = fill
return var
def var_2_inv_var (var):
"""
Takes variance and converts to an inverse variance. Carefully
handling zeros and inf values
"""
fill = np.inf
var = np.asarray(var).astype(float)
zeros = (var <= 0.0) # find non zeros
var[zeros] = -1.0
inv_var = 1.0/var # inverse
inv_var[zeros] = fill
return inv_var
def clean_variances(variance, zero_ok=False, fill=np.inf):
"""takes input variances which may include nan's 0's and negative values
and replaces those values with a fill value.
variance: ndarray
the array of variances array
zero_ok: bool
if True zero entries will be allowed to persist in the output
fill: float
the value to replace bad variances with
"""
bad_mask = np.isnan(variance)
if zero_ok:
bad_mask += variance < 0
else:
bad_mask += variance <= 0
new_variance = np.where(bad_mask, fill, variance)
return new_variance
def clean_inverse_variances(inverse_variance):
"""takes input inverse variances which may include nan's, negative values
and np.inf and replaces those values with 0
"""
bad_mask = np.isnan(inverse_variance)
bad_mask += inverse_variance < 0
bad_mask += inverse_variance == np.inf
new_ivar = np.where(bad_mask, 0, inverse_variance)
return new_ivar
def reduce_output_shape (arr):
shape = arr.shape
new_shape = tuple()
for i in range(len(shape)):
if shape[i] != 1:
new_shape += (shape[i],)
return arr.reshape(new_shape)
def local_gaussian_fit(
y_values,
y_variance=None,
peak_idx=None,
fit_width=1,
xvalues=None,
opt_cleanup=False,
cleanup_width=3,
return_fit_dict=False
):
"""fit a quadratic function taking pixels from
peak_idx - fit_width to peak_idx + fit_width, to the log of the y_values
passed in. Giving back the parameters of a best fit gaussian.
inputs:
values: numpy.ndarray
the asarray of input values to which the fit will be made
peak_idx: int
the rough pixel location of the maximum about which to fit.
if None the global maximum is found and used.
fit_width: float
the width of the gaussian fit
xvalues: numpy.ndarray
if None then the parameters of the gaussian will be determined
in terms of indicies (e.g. the center occurs at index=20.82) if given
xvalues is interpreted as the corresponding x values for the yvalues
asarray and the returned coefficients will be for that coordinate space
instead.
returns:
center, sigma, peak_y_value
"""
if peak_idx is None:
peak_idx = np.argmax(y_values)
if y_variance is None:
y_variance = y_values
lb = max(peak_idx - fit_width, 0)
ub = min(peak_idx + fit_width, len(y_values)-1)
if xvalues is None:
chopped_xvals = np.arange(lb, ub+1)
peak_xval = peak_idx
else:
assert len(y_values) == len(xvalues)
chopped_xvals = xvalues[lb:ub+1]
peak_xval = xvalues[peak_idx]
xmatrix = np.ones((len(chopped_xvals), 3))
delta = chopped_xvals-peak_xval
x_scale = np.std(delta)
delta /= x_scale
xmatrix[:, 1] = delta
xmatrix[:, 2] = delta**2
chopped_y = y_values[lb:ub+1]
#make sure there are no negative numbers
chopped_y = np.where(chopped_y > 0, chopped_y, 0.0)
chopped_var = y_variance[lb:ub+1]
inv_noise_mat = np.diag(chopped_y/chopped_var, 0) #factor of y for log transform
chopped_y = np.log(chopped_y)
poly_covar = np.linalg.pinv(np.dot(np.dot(xmatrix.transpose(), inv_noise_mat), xmatrix))
poly_var = np.abs(np.dot(poly_covar, np.ones(3)))
diag_var_est = np.diag(poly_covar)
poly_var = np.where(poly_var > diag_var_est, poly_var, diag_var_est)
poly_coeffs = np.dot(poly_covar, np.dot(xmatrix.transpose(), np.dot(inv_noise_mat, chopped_y)))
#poly_coeffs = np.linalg.lstsq(xmatrix, chopped_yvalues)[0]
offset = -(poly_coeffs[1]/(2*poly_coeffs[2]))*x_scale
center = peak_xval + offset
sigma = x_scale/np.sqrt(2*np.abs(poly_coeffs[2]))
center_p_vec = np.asarray([1.0, offset, offset**2])
peak_y_value = np.dot(center_p_vec, poly_coeffs)
def _gauss_resids(pvec, center, x, y):
prof = gauss(x, center, pvec[0])
opt_mult = np.sum(prof*y)/np.sum(prof**2)
resids = y-opt_mult*prof
return resids
if opt_cleanup:
lb = max(peak_idx - cleanup_width, 0)
ub = min(peak_idx + cleanup_width, len(y_values)-1)
if xvalues is None:
xvalues = np.arange(lb, ub+1)
chopped_x = xvalues[lb:ub+1].copy()
chopped_y = y_values[lb:ub+1].copy()
start_vec = [sigma]
opt_res = scipy.optimize.leastsq(_gauss_resids, start_vec, args=(offset+peak_xval, chopped_x, chopped_y))
sigma = np.abs(opt_res[0][0])
if return_fit_dict:
fd = {}
fd["poly_var"] = poly_var
fd["poly_covar"] = poly_covar
fd["poly_coeffs"] = poly_coeffs
fd["x_scale"] = x_scale
fd["center"] = center
fd["sigma"] = sigma
fd["peak_value"] = peak_y_value
#applying first order error propagation theory
cent_var = (poly_var[1]/poly_coeffs[2])**2
cent_var += (poly_coeffs[1]/poly_coeffs[2]**2 * poly_var[2])**2
cent_err = 0.5*x_scale*np.sqrt(cent_var)
fd["center_error"] = cent_err
sigma_var = 1.0/8.0*(np.power(np.abs(poly_coeffs[2]), -3.0/2.0)*poly_var[2])**2
sigma_err = x_scale*np.sqrt(sigma_var)
fd["sigma_error"] = sigma_err
return (center, sigma, np.exp(peak_y_value)), fd
else:
return center, sigma, np.exp(peak_y_value)
def get_local_maxima(arr):
# http://stackoverflow.com/questions/3684484/peak-detection-in-a-2d-asarray/3689710#3689710
"""
Takes an asarray and detects the peaks using the local maximum filter.
Returns a boolean mask of the peaks (i.e. 1 when
the pixel's value is the neighborhood maximum, 0 otherwise)
"""
# define an connected neighborhood
# http://www.scipy.org/doc/api_docs/SciPy.ndimage.morphology.html#generate_binary_structure
neighborhood = morphology.generate_binary_structure(len(arr.shape),2)
# apply the local maximum filter; all locations of maximum value
# in their neighborhood are set to 1
# http://www.scipy.org/doc/api_docs/SciPy.ndimage.filters.html#minimum_filter
local_max = (filters.maximum_filter(arr, footprint=neighborhood)==arr)
# local_max is a mask that contains the peaks we are
# looking for, but also the background.
# In order to isolate the peaks we must remove the background from the mask.
#
# we create the mask of the background
background = (arr==0)
#
# a little technicality: we must erode the background in order to
# successfully subtract it from local_max, otherwise a line will
# appear along the background border (artifact of the local maximum filter)
# http://www.scipy.org/doc/api_docs/SciPy.ndimage.morphology.html#binary_erosion
eroded_background = morphology.binary_erosion(
background, structure=neighborhood, border_value=1)
#
# we obtain the final mask, containing only peaks,
# by removing the background from the local_min mask
detected_maxima = local_max - eroded_background
return detected_maxima
def wavelet_transform_fft(values, g_widths):
"""create a grid of gaussian line profiles and perform a wavelet transform
over the grid. (a.k.a. simply convolve the data with all line profiles
in the grid in an efficient way and then return an asarray that contains
all of these convolutions.
inputs:
values: the asarray of values to transform
g_widths: an asarray of gaussian widths (in pixels) to create profiles for
"""
n_pts ,= values.shape
n_trans = 2**(int(np.log2(n_pts) + 1.5))
n_g_widths = len(g_widths)
val_fft = fftpack.fft(values, n=n_trans)
out_dat = np.zeros((n_g_widths, n_pts), dtype=float)
for g_width_idx in range(n_g_widths):
c_g_width = g_widths[g_width_idx]
l2norm = np.power(1.0/(np.pi*c_g_width), 0.25)
deltas = np.arange(n_pts, dtype=float) - n_pts/2
lprof = l2norm*np.exp(-(deltas/(2.0*c_g_width))**2)
#we should normalize them via the L2 norm so we can see maxima effectively
ltrans = np.abs(fftpack.fft(lprof, n=n_trans))
wvtrans = fftpack.ifft(val_fft*ltrans)
out_dat[g_width_idx] = wvtrans[:n_pts].real
return out_dat
def wavelet_transform(values, g_widths, mask):
"""create a grid of gaussian line profiles and perform a wavelet transform
over the grid. (a.k.a. simply convolve the data with all line profiles
in the grid in an efficient way and then return an asarray that contains
all of these convolutions.
inputs:
values: the asarray of values to transform
g_widths: an asarray of gaussian widths (in pixels) to create profiles for
"""
n_pts ,= values.shape
n_g_widths = len(g_widths)
out_dat = np.zeros((n_g_widths, n_pts), dtype=float)
max_delt = min((n_pts-1)/2, 5*np.max(g_widths))
deltas = np.arange(2*max_delt+1) - max_delt
if mask==None:
mask = np.ones(values.shape)
for g_width_idx in range(n_g_widths):
c_g_width = g_widths[g_width_idx]
lprof = np.exp(-0.5*(deltas/c_g_width)**2)
conv_norm = np.convolve(lprof, mask, mode="same")
conv_norm = np.where(conv_norm, conv_norm, 1.0)
out_dat[g_width_idx] = np.convolve(values*mask, lprof, mode="same")/conv_norm
return out_dat
def _incr_left(idx, maxima, bad_mask):
if (idx - 1) < 0:
return idx, True
elif bad_mask[idx-1]:
return idx, True
elif maxima[idx-1]:
return idx-1, True
else:
return idx-1, False
def _incr_right(idx, maxima, bad_mask):
if (idx + 1) > len(maxima) -1:
return idx, True
elif bad_mask[idx+1]:
return idx, True
elif maxima[idx+1]:
return idx+1, True
else:
return idx+1, False
def trough_bounds(maxima, start_idx, bad_mask=None):
if bad_mask==None:
bad_mask = np.zeros(maxima.shape, dtype=bool)
left_blocked = False
right_blocked = False
left_idx = start_idx
right_idx = start_idx
while not left_blocked:
left_idx, left_blocked = _incr_left(left_idx, maxima, bad_mask)
while not right_blocked:
right_idx, right_blocked = _incr_right(right_idx, maxima, bad_mask)
return left_idx, right_idx
class MinimaStatistics:
pass
def minima_statistics(values, variance, last_delta_fraction=1.0, max_sm_radius=0.5):
#import pdb; pdb.set_trace()
n_pts = len(values)
bad_vals = np.isnan(variance)+(variance <= 0)+(variance > 1.0e40)
sm_pix = int(max_sm_radius*12)
sm_pix += sm_pix % 2
sm_pix += 1
window = np.exp(-np.linspace(-3, 3, sm_pix)**2)
sm_norm = np.convolve(True-bad_vals, window, mode="same")
sm_norm = np.where((sm_norm > 0), sm_norm, 1.0)
smoothed_vals = np.convolve(values*(True-bad_vals), window, mode="same")/sm_norm
maxima = get_local_maxima(smoothed_vals)
#import pdb; pdb.set_trace()
minima = get_local_maxima(-values)
min_idxs ,= np.where(minima*(True-bad_vals))
l_z = []
r_z = []
n_cons = []
left_idxs = []
right_idxs = []
for min_idx in min_idxs:
min_val = values[min_idx]
left_idx, right_idx = trough_bounds(maxima, min_idx, bad_vals)
left_delta_cor = np.abs(values[left_idx] - values[min(n_pts-1, left_idx+1)])
left_h = (values[left_idx]-min_val)
left_h -= (1.0-last_delta_fraction)*left_delta_cor
left_z = left_h/np.sqrt(variance[left_idx] + variance[min_idx])
right_delta_cor = np.abs(values[right_idx] - values[max(0, right_idx-1)])
right_h = (values[right_idx]-min_val)
right_h -= (1.0-last_delta_fraction)*right_delta_cor
right_z = right_h/np.sqrt(variance[right_idx] + variance[min_idx])
l_z.append(left_z)
r_z.append(right_z)
n_cons.append(right_idx-left_idx)
left_idxs.append(left_idx)
right_idxs.append(right_idx)
l_z = np.asarray(l_z)
r_z = np.asarray(r_z)
n_cons = np.asarray(n_cons)
left_idxs = np.asarray(left_idxs)
right_idxs = np.asarray(right_idxs)
ms = MinimaStatistics()
ms.left_idxs = left_idxs
ms.min_idxs = min_idxs
ms.right_idxs = right_idxs
ms.l_z = l_z
ms.r_z = r_z
ms.n_c = n_cons
return ms
def detect_features(
values,
variance,
reject_fraction=0.5,
last_delta_fraction=1.0,
mask_back_off=-1,
max_sm_radius=0.5,
stat_func=lambda lz, rz, nc: np.sqrt(lz**2+rz**2+0.25*(nc-2)**2),
min_stats=None
):
"""detect spectral features using an arbitrary statistic
by default the stat_func is
s = np.sqrt(l_z**2+r_z**2+0.25*(n_c-2)**2)
where l_z is the z score of the difference between the minima
and the closest local maximum on the left, r_z is the same for the right.
n_c is the number of contiguous points around the minimum for which the
value monotonically increases.
inputs
values: ndarray
the flux values
variance: ndarray
the variance values associated to the flux values
threshold: float
the threshold to put on the statistic s for a detection
last_delta_fraction: float
the fraction of the difference between the first
maxima pixel found and the pixel value immediately prior to be used.
mask_back_off: int
the number of pixels to back off from the local maxima
when creating the feature mask. a value of 0 would mask the maxima
a value of 1 would leave the maxima unmasked, a value of -1 masks
the maximum and one pixel out, a value of -2 masks the maxima and
2 pixels out etc etc.
max_sm_radius: float
the pixel with a local maximum
min_stats: MinimaStatistics
if the minima statistics have already been calculated you can pass it in
and it will not be recalculated.
returns:
a tuple of
left_idxs, min_idxs, right_idxs, feature_mask
left_idxs: the index of the left bounding maximum
min_idxs: the index of the local minimum
right_idxs: the index of the right bounding maximum
feature_mask: a boolean asarray the shape of values which is true if there is
no detected feature affecting the associated pixel.
"""
#import matplotlib.pyplot as plt
#import pdb; pdb.set_trace()
if min_stats != None:
msres = min_stats
msres = minima_statistics(values, variance, last_delta_fraction)
left_idxs, min_idxs, right_idxs = msres.left_idxs, msres.min_idxs, msres.right_idxs
l_z, r_z, n_cons = msres.l_z, msres.r_z, msres.n_c
s = stat_func(l_z, r_z, n_cons)
sorted_s = np.sort(s)
switch_idx = int(len(sorted_s)*(1.0-reject_fraction))
threshold = np.mean(sorted_s[switch_idx:switch_idx+2])
mask = s > threshold
left_idxs = left_idxs[mask].copy()
min_idxs = min_idxs[mask].copy()
right_idxs = right_idxs[mask].copy()
feature_mask = np.ones(values.shape, dtype=bool)
for left_idx, right_idx in zip(left_idxs, right_idxs):
feature_mask[max(0, left_idx+mask_back_off):right_idx-mask_back_off+1] = False
return msres, feature_mask
def smoothed_mad_error(
values,
smoothing_scale=3,
median_scale = 200,
apply_poisson=True,
rejection_threshold=1e-5,
post_smooth=0,
max_snr=1000.0,
):
"""estimate the noise characteristics of an input data vector under the assumption
that the underlying "true" value is slowly varying and the high frequency fluctuations
are representative of the level of the noise.
"""
good_mask = np.ones(len(values), dtype=bool)
if hasattr(values, "ivar") and hasattr(values, "flux"):
if not values.ivar is None:
good_mask *= values.ivar > 0
values = values.flux
#detect and reject perfectly flat regions
#sec_der = scipy.gradient(scipy.gradient(values))
#good_mask *= filters.uniform_filter(np.abs(sec_der) > 0, 2) > 0
good_mask *= values > rejection_threshold
#smooth the flux accross the accepted fluxes
smfl = np.where(good_mask, values, 0)
num_fr = filters.uniform_filter(smfl, smoothing_scale)
denom_fr = filters.uniform_filter(smfl, smoothing_scale)
smfl = np.where(denom_fr > 0, num_fr/denom_fr, 0)
#smfl[good_mask] = filters.gaussian_filter(values[good_mask], smoothing_scale)
diffs = values - smfl
#use the running median change to protect ourselves from outliers
mad = np.repeat(np.inf, len(values))
mad[good_mask] = filters.median_filter(np.abs(diffs)[good_mask], median_scale)
eff_width = 2.0*smoothing_scale #effective filter width
#need to correct for loss of variance from the averaging
correction_factor = eff_width/max(1, (eff_width-1))
char_sigma = correction_factor*1.48*mad #the characteristic noise level
#smooth the running median filter so we don't have wildly changing noise levels
if post_smooth > 0:
char_sigma = filters.gaussian_filter(char_sigma, post_smooth)
if apply_poisson:
#scale the noise at each point to be proportional to its value
var = ((smfl+1e-10)/np.median(smfl[good_mask]))*char_sigma**2
else:
var = char_sigma**2
#put an upper limit on detected snr
var = np.where(np.abs(values)**2/var < max_snr**2, var, 1.0/max_snr**2)
#var = clean_variances(var)
return var
def min_delta_bins(x, min_delta, target_n=1, forced_breaks=None):
"""return a set of x values which partition x into bins.
each bin must be at least min_delta wide and must contain at least target_n
points in the x asarray (with an exception made when forced_breaks are used).
The bins attempt to be the smallest that they can be while achieving both
these constraints.
Bins are simply built up from the left until the constraints are met
and then a new bin is begun, so the optimum bins do not necessarily result.
inputs
x: iterable
the coordinates to choose a binning for
min_delta: float
the smallest allowed bin size
target_n: int
the desired number of objects per bin
forced_breaks: list
if forced_breaks is specified then the numbers in forced_breaks will
be included in the output bins. The bins will otherwise still attempt to
meet the min_delta and target_n values.
"""
sorted_x = np.sort(np.asarray(x))
x_diff = np.abs(sorted_x[1:]-sorted_x[:-1])
if forced_breaks == None:
forced_breaks = []
forced_breaks = sorted(forced_breaks)
diff_sum = np.cumsum(x_diff)
last_diff_sum = 0
last_diff_idx = 0
current_n = 0
bins = [sorted_x[0]]
last_bin_forced = True
next_force_idx = 0
for i in range(len(x_diff)):
current_n += 1
if len(forced_breaks) > next_force_idx:
next_fb = forced_breaks[next_force_idx]
if sorted_x[i+1] > next_fb:
bins.append(next_fb)
next_force_idx += 1
last_diff_sum = diff_sum[i]
below_target_n = current_n < target_n
below_target_delta = next_fb - sorted_x[last_diff_idx]
if below_target_n or below_target_delta:
if not last_bin_forced:
bins.pop()
last_bin_forced = True
last_diff_idx = i
last_diff_sum = diff_sum[i]
current_n = 0
if diff_sum[i]-last_diff_sum > min_delta and current_n >= target_n:
avg_br = 0.5*(sorted_x[i] + sorted_x[i+1])
bins.append(avg_br)
last_diff_sum = diff_sum[i]
last_diff_idx = i
current_n = 0
last_bin_forced = False
if not last_bin_forced:
bins.pop()
if bins[-1] != sorted_x[-1]:
bins.append(sorted_x[-1])
return bins
def layered_median_mask(arr, n_layers=3, first_layer_width=31, last_layer_width=11, rejection_sigma=2.0):
marr = np.asarray(arr)
assert n_layers > 1
assert first_layer_width >= 1
assert last_layer_width >= 1
layer_widths = np.asarray(np.linspace(first_layer_width, last_layer_width, n_layers), dtype=int)
mask = np.ones(marr.shape, dtype=bool)
for layer_idx in range(n_layers):
lw = layer_widths[layer_idx]
masked_arr = marr[mask]
filtered = filters.median_filter(masked_arr, int(lw))
local_mad = filters.median_filter(np.abs(masked_arr-filtered), int(lw))
mask[mask] = masked_arr >= (filtered - rejection_sigma*1.4*local_mad)
return mask
def smooth_ppol_fit(x, y, y_inv=None, order=3, mask=None, mult=None, partition="adaptive", partition_kwargs=None):
if partition_kwargs == None:
partition_kwargs = {}
if y_inv == None:
y_inv = np.ones(y.shape)
if mult==None:
mult = np.ones(y.shape, dtype=float)
if mask == None:
mask = np.ones(y.shape, dtype=bool)
masked_x = x[mask]
if partition == "adaptive":
try:
partition = min_delta_bins(masked_x, **partition_kwargs)[1:-1]
except:
partition = [np.median(masked_x)]
pp_gen = piecewise_polynomial.RCPPB(poly_order=order, control_points=partition)
ppol_basis = pp_gen.get_basis(masked_x).transpose()
ppol_basis *= mult.reshape((-1, 1))
in_sig = 1.0/np.sqrt(y_inv[mask])
med_sig = np.median(in_sig)
fit_coeffs = irls(ppol_basis, y,
sigma=in_sig,
gamma=5.0,
max_iter=5,
)
n_polys = len(partition) + 1
n_coeffs = order+1
out_coeffs = np.zeros((n_polys, n_coeffs))
for basis_idx in range(pp_gen.n_basis):
c_coeffs = pp_gen.basis_coefficients[basis_idx].reshape((n_polys, n_coeffs))
out_coeffs += c_coeffs*fit_coeffs[basis_idx]
out_ppol = piecewise_polynomial.PiecewisePolynomial(out_coeffs, partition, centers=pp_gen.centers, scales=pp_gen.scales, bounds=pp_gen.bounds)
return out_ppol
def echelle_normalize(spectra, masks="layered median", partition="adaptive", mask_kwargs=None, partition_kwargs=None):
"""normalize in a way that shares normalization shape accross nearby
orders"""
n_spec = len(spectra)
if mask_kwargs == None:
mask_kwargs = {}
if partition_kwargs == None:
partition_kwargs = {}
if masks == "layered median":
masks = []
for spec_idx in range(n_spec):
flux = spectra[spec_idx].flux
mask = layered_median_mask(flux, **mask_kwargs)
masks.append(mask)
elif len(masks) == n_spec and len(masks[0]) == len(flux):
masks = [
|
np.asarray(mask, dtype=bool)
|
numpy.asarray
|
import gym
from gym import error, spaces, utils
from gym.utils import seeding
from gym import spaces
import numpy as np
from collections import namedtuple
from enum import Enum
import copy
import logging
logger = logging.getLogger(__name__)
random = None
AgentState = Enum('AgentState', 'alive crashed finished out')
LaneSpec = namedtuple('LaneSpec', ['cars', 'speed_range'])
GridDrivingState = namedtuple('GridDrivingState', ['cars', 'agent', 'finish_position', 'occupancy_trails', 'agent_state'])
MaskSpec = namedtuple('MaskSpec', ['type', 'radius'])
class DenseReward:
FINISH_REWARD = 100
MISSED_REWARD = -5
CRASH_REWARD = -20
TIMESTEP_REWARD = -1
class SparseReward:
FINISH_REWARD = 10
MISSED_REWARD = 0
CRASH_REWARD = 0
TIMESTEP_REWARD = 0
class DefaultConfig:
LANES = [
LaneSpec(1, [-2, -1]),
LaneSpec(2, [-3, -3]),
LaneSpec(3, [-3, -1]),
]
WIDTH = 10
PLAYER_SPEED_RANGE = [-1, -1]
STOCHASTICITY = 1.0
class ActionNotFoundException(Exception):
pass
class AgentCrashedException(Exception):
pass
class AgentOutOfBoundaryException(Exception):
pass
class AgentFinishedException(Exception):
pass
class CarNotStartedException(Exception):
pass
class Point(object):
def __init__(self, x=0, y=0):
self.x = x
self.y = y
def __add__(self, other):
return Point(self.x + other.x, self.y + other.y)
def __eq__(self, other):
return self.x == other.x and self.y == other.y
def __mul__(self, other):
return Point(self.x * other, self.y * other)
def __rmul__(self, other):
return self.__mul__(other)
def __str__(self):
return "Point(x={},y={})".format(self.x, self.y)
def __repr__(self):
return str(self)
def __hash__(self):
return hash(str(self))
@property
def tuple(self):
return (self.x, self.y)
class Rectangle(object):
def __init__(self, w, h, x=0, y=0):
self.w, self.h = w, h
self.x, self.y = x, y
def sample_point(self):
return Point(random.randint(self.x, self.x + self.w), random.randint(self.y, self.y + self.h))
def bound(self, point, bound_x=True, bound_y=True):
x = np.minimum(np.maximum(point.x, self.x), self.x + self.w - 1) if bound_x else point.x
y = np.minimum(np.maximum(point.y, self.y), self.y + self.h - 1) if bound_y else point.y
return Point(x, y)
def circular_bound(self, point, bound_x=True, bound_y=True):
x = self.x + ((point.x - self.x) % self.w) if bound_x else point.x
y = self.y + ((point.y - self.y) % self.h) if bound_y else point.y
return Point(x, y)
def contains(self, point):
return (point.x >= self.x and point.x < self.x + self.w) and (point.y >= self.y and point.y < self.y + self.h)
def __str__(self):
return "Rectangle(w={},h={},x={},y={})".format(self.w, self.h, self.x, self.y)
class Car(object):
def __init__(self, position, speed_range, world, circular=True, auto_brake=True, auto_lane=True, p=1.0, id=None, ignore=False):
self.id = id
self.position = position
self.speed_range = speed_range
self.world = world
self.bound = self.world.boundary.circular_bound if self.world and self.world.boundary and circular else lambda x, **kwargs: x
self.auto_brake = auto_brake
self.auto_lane = auto_lane
self.p = p
self.done()
self.ignore = ignore
self.ignored = self.ignore
self.speed = 0
def sample_speed(self):
if random.random_sample() > self.p:
return np.round(
|
np.average(self.speed_range)
|
numpy.average
|
import numpy as np
import sys
import os
import cv2
import torch.nn.functional as F
import torch
import re
import matplotlib.pyplot as plt
TAG_FLOAT = 202021.25
# testing vim, and git push
def readflo(file):
assert type(file) is str, "file is not str %r" % str(file)
assert os.path.isfile(file) is True, "file does not exist %r" % str(file)
assert file[-4:] == '.flo', "file ending is not .flo %r" % file[-4:]
f = open(file, 'rb')
flo_number = np.fromfile(f, np.float32, count=1)[0]
assert flo_number == TAG_FLOAT, 'Flow number %r incorrect. Invalid .flo file' % flo_number
w = np.fromfile(f, np.int32, count=1)
h = np.fromfile(f, np.int32, count=1)
data = np.fromfile(f, np.float32, count=2 * w[0] * h[0])
flow = np.resize(data, (int(h[0]), int(w[0]), 2))
f.close()
return flow
def readPFM(file):
file = open(file, 'rb')
color = None
width = None
height = None
scale = None
endian = None
header = file.readline().rstrip()
if header.decode("ascii") == 'PF':
color = True
elif header.decode("ascii") == 'Pf':
color = False
else:
raise Exception('Not a PFM file.')
dim_match = re.match(r'^(\d+)\s(\d+)\s$', file.readline().decode("ascii"))
if dim_match:
width, height = list(map(int, dim_match.groups()))
else:
raise Exception('Malformed PFM header.')
scale = float(file.readline().decode("ascii").rstrip())
if scale < 0: # little-endian
endian = '<'
scale = -scale
else:
endian = '>' # big-endian
data = np.fromfile(file, endian + 'f')
shape = (height, width, 3) if color else (height, width)
data = np.reshape(data, shape)
data = np.flipud(data)
return data[:, :, :2]
def makeColorwheel():
RY = 15
YG = 6
GC = 4
CB = 11
BM = 13
MR = 6
ncols = RY + YG + GC + CB + BM + MR
colorwheel = np.zeros([ncols, 3]) # r g b
col = 0
# RY
colorwheel[0:RY, 0] = 255
colorwheel[0:RY, 1] = np.floor(255 * np.arange(0, RY, 1) / RY)
col += RY
# YG
colorwheel[col:YG + col, 0] = 255 - np.floor(255 * np.arange(0, YG, 1) / YG)
colorwheel[col:YG + col, 1] = 255
col += YG
# GC
colorwheel[col:GC + col, 1] = 255
colorwheel[col:GC + col, 2] = np.floor(255 * np.arange(0, GC, 1) / GC)
col += GC
# CB
colorwheel[col:CB + col, 1] = 255 - np.floor(255 * np.arange(0, CB, 1) / CB)
colorwheel[col:CB + col, 2] = 255
col += CB
# BM
colorwheel[col:BM + col, 2] = 255
colorwheel[col:BM + col, 0] = np.floor(255 * np.arange(0, BM, 1) / BM)
col += BM
# MR
colorwheel[col:MR + col, 2] = 255 - np.floor(255 * np.arange(0, MR, 1) / MR)
colorwheel[col:MR + col, 0] = 255
return colorwheel
def computeColor(u, v):
colorwheel = makeColorwheel()
nan_u = np.isnan(u)
nan_v = np.isnan(v)
nan_u = np.where(nan_u)
nan_v = np.where(nan_v)
u[nan_u] = 0
u[nan_v] = 0
v[nan_u] = 0
v[nan_v] = 0
ncols = colorwheel.shape[0]
radius = np.sqrt(u ** 2 + v ** 2)
a = np.arctan2(-v, -u) / np.pi
fk = (a + 1) / 2 * (ncols - 1) # -1~1 maped to 1~ncols
k0 = fk.astype(np.uint8) # 1, 2, ..., ncols
k1 = k0 + 1
k1[k1 == ncols] = 0
f = fk - k0
img = np.empty([k1.shape[0], k1.shape[1], 3])
ncolors = colorwheel.shape[1]
for i in range(ncolors):
tmp = colorwheel[:, i]
col0 = tmp[k0] / 255
col1 = tmp[k1] / 255
col = (1 - f) * col0 + f * col1
idx = radius <= 1
col[idx] = 1 - radius[idx] * (1 - col[idx]) # increase saturation with radius
col[~idx] *= 0.75 # out of range
img[:, :, 2 - i] = np.floor(255 * col).astype(np.uint8)
#img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
return img.astype(np.uint8)
def computeImg(flow, verbose=False, savePath=None):
eps = sys.float_info.epsilon
UNKNOWN_FLOW_THRESH = 1e9
UNKNOWN_FLOW = 1e10
if flow.shape[0] == 2:
u = flow[0, :, :]
v = flow[1, :, :]
else:
u = flow[:, :, 0]
v = flow[:, :, 1]
maxu = -999
maxv = -999
minu = 999
minv = 999
maxrad = -1
# fix unknown flow
greater_u = np.where(u > UNKNOWN_FLOW_THRESH)
greater_v = np.where(v > UNKNOWN_FLOW_THRESH)
u[greater_u] = 0
u[greater_v] = 0
v[greater_u] = 0
v[greater_v] = 0
maxu = max([maxu, np.amax(u)])
minu = min([minu, np.amin(u)])
maxv = max([maxv, np.amax(v)])
minv = min([minv, np.amin(v)])
rad = np.sqrt(np.multiply(u, u) + np.multiply(v, v))
maxrad = max([maxrad, np.amax(rad)])
u = u / (maxrad + eps)
v = v / (maxrad + eps)
img = computeColor(u, v)
if savePath is not None:
cv2.imwrite(savePath, img)
if verbose:
cv2.imshow('image', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img
def computerArrows(flow, step=16, verbose=False, savePath=None, img=None):
h, w = flow.shape[:2]
y, x = np.mgrid[step / 2:h:step, step / 2:w:step].reshape(2, -1).astype(int)
fx, fy = flow[y, x].T
lines = np.vstack([x, y, x + fx, y + fy]).T.reshape(-1, 2, 2)
lines = np.int32(lines + 0.5)
if img is None:
vis = np.ones((h, w)).astype('uint8')*255
else:
vis = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
cv2.polylines(vis, lines, 0, (0, 255, 0))
for (x2, y2), (x1, y1) in lines:
cv2.circle(vis, (x1, y1), 1, (0, 255, 0), -1)
if savePath is not None:
cv2.imwrite(savePath, vis)
if verbose:
cv2.imshow('arrowsViz', vis)
cv2.waitKey(0)
cv2.destroyAllWindows()
return vis
def disp_function(pred_flo, true_flo):
height, width = true_flo.shape[1:]
pred_flo = F.interpolate(pred_flo, (height, width), mode='bilinear', align_corners=False)
pred_flo = computeImg(pred_flo[0].cpu().numpy())
if true_flo.shape[0] == 2:
true_flo = computeImg(true_flo.cpu().numpy())
image1, image2 = np.expand_dims(pred_flo, axis=0), np.expand_dims(true_flo, axis=0)
return np.concatenate((image1, image2), axis=0)
else:
true_flo = true_flo[:3]
true_flo = true_flo.transpose(0, 2)
true_flo = true_flo.transpose(0, 1)
image1, image2 =
|
np.expand_dims(pred_flo, axis=0)
|
numpy.expand_dims
|
'''
Copyright 2018 <NAME>
E-mail: <EMAIL>
This is the implementation of Deep-reinforcement-learning-based scheduler for High-Level Synthesis.
This file contains the supervised learning (SL) part of the training pipeline.
'''
import time, sys, os, argparse
import random
import numpy as np
import visdom
import matplotlib.pyplot as plt
from logger import LogHandler
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from graph import Graph
from preprocess import preprocess
from policy import Policy
from dag_dataset import DagDataset
parser = argparse.ArgumentParser(description="Deep-RL-Based HLS Scheduler (Supervised Learning)")
parser.add_argument("--use_cuda", type=int, default=1, help="Use cuda? (default: True, the 1st GPU)")
parser.add_argument("--input_graphs", type=int, default=3500, help="Number of input graphs? (default: 3500)")
parser.add_argument("--batch_size", type=int, default=128, help="Batch size? (default: 128)")
parser.add_argument("--learning_rate", type=float, default=5e-4, help="Learning rate? (default: 5e-4)")
parser.add_argument("--epoch", type=int, default=10000, help="Number of epoch? (default: 10000)")
parser.add_argument("--use_network", type=str, default="", help="Use previous network? Input the name of the network. (default: None)")
args = parser.parse_args()
logger_num, logger = LogHandler("sl").getLogger()
logger.info("Deep-RL-Based HLS Scheduler (Supervised Learning)")
print("Logger num: %d" % logger_num)
device = torch.device(("cuda:%d" % (args.use_cuda-1)) if args.use_cuda != 0 else "cpu")
file_name = "_sl_" + time.strftime("%Y%m%d_") + str(logger_num)
STATE_SIZE = (50,50)
if args.use_network == "":
net = Policy(STATE_SIZE[0]).to(device)
print("Build a new network!")
else:
try:
net = torch.load("./Networks/" + args.use_network).to(device)
print("Loaded %s." % args.use_network)
logger.info("Pretrained network: %s (%s)" % (args.use_network,"gpu" if args.use_cuda else "cpu"))
except:
print("No such network named %s. Rebuild a new network!" % args.use_network)
net = Policy(STATE_SIZE[0]).to(device)
network_file = "./Networks/policy" + file_name + ".pkl"
logger.info("New network: %s (%s)" % (network_file,"gpu" if args.use_cuda else "cpu"))
criterion = nn.NLLLoss()
optimizer = torch.optim.Adam(net.parameters(),lr=args.learning_rate)
logger.info(net.features)
logger.info(net.classifier)
logger.info("NLLLoss (Negative Log likelihood loss) + Adam")
logger.info("Batch size: %d, Learning rate: %f" % (args.batch_size,args.learning_rate))
best_accuracy = 0
viz = visdom.Visdom()
cur_batch_win, epoch_loss_win = None, None
def train(epoch):
global cur_batch_win
net.train()
total_correct = 0
loss_list, batch_list = [], []
for i, (state, action) in enumerate(data_train_loader):
state = torch.Tensor(state.float()).to(device)
action = torch.Tensor(action.float()).type(torch.LongTensor).to(device)
optimizer.zero_grad()
output = net(state)
# bs*50 <- bs labels
loss = criterion(output,action)
loss_list.append(loss.item())
batch_list.append(i+1)
predict = output.data.max(1)[1]
total_correct += predict.eq(action.data.view_as(predict)).sum()
if i % 10 == 0:
logger.info("Train - Epoch %d, Batch: %d, Loss: %f" % (epoch,i,loss.item()))
if viz.check_connection():
cur_batch_win = viz.line(X=torch.FloatTensor(batch_list), Y=torch.FloatTensor(loss_list),
win=cur_batch_win, name='current_batch_loss',
update=(None if cur_batch_win is None else 'replace'),
opts={'title': 'Epoch Loss Trace','xlabel': 'Batch Number','ylabel': 'Loss','width': 1200,'height': 600})
loss.backward()
optimizer.step()
avg_loss = np.array(loss_list).sum() / len(data_train_loader)
accuracy = float(total_correct) / len(data_train)
logger.info("Train Epoch %d: Avg. Loss: %f, Accuracy: %f" % (epoch,avg_loss,accuracy))
print("Train Epoch %d: Avg. Loss: %f, Accuracy: %f" % (epoch,avg_loss,accuracy))
return avg_loss
def test(epoch):
global best_accuracy
net.eval()
total_correct = 0
avg_loss = 0.0
for i, (state, action) in enumerate(data_test_loader):
state = torch.Tensor(state.float()).to(device)
action = torch.Tensor(action.float()).type(torch.LongTensor).to(device)
output = net(state)
avg_loss += criterion(output, action).item() # sum()
predict = output.data.max(1)[1]
total_correct += predict.eq(action.data.view_as(predict)).sum()
avg_loss /= (len(data_test_loader))
accuracy = float(total_correct) / len(data_test)
logger.info("Test Epoch %d: Avg. Loss: %f, Accuracy: %f" % (epoch,avg_loss,accuracy))
print("Test Epoch %d: Avg. Loss: %f, Accuracy: %f" % (epoch,avg_loss,accuracy))
if best_accuracy < accuracy:
best_accuracy = accuracy
torch.save(net,network_file[:-4]+"_best.pkl")
return avg_loss
def visualization(epoch,train_loss,test_loss):
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([i for i in range(1,epoch+1)],np.array(train_loss),label="train")
ax.plot([i for i in range(1,epoch+1)],np.array(test_loss),label="test")
ax.set_xlabel("Epoch")
ax.set_ylabel("Loss")
ax.legend()
fig.savefig("./Loss/fig" + file_name + ".jpg")
plt.cla()
plt.close()
np.save("./Loss/train_loss" + file_name + ".npy",np.array(train_loss))
np.save("./Loss/test_loss" + file_name + ".npy",
|
np.array(test_loss)
|
numpy.array
|
import abc
import contextlib
import random
import collections
import copy
import numpy as np
import networkx as nx
"""A general Interface"""
class GraphSimilarityDataset(object):
"""Base class for all the graph similarity learning datasets.
This class defines some common interfaces a graph similarity dataset can have,
in particular the functions that creates iterators over pairs and triplets.
"""
@abc.abstractmethod
def triplets(self, batch_size):
"""Create an iterator over triplets.
Args:
batch_size: int, number of triplets in a batch.
Yields:
graphs: a `GraphData` instance. The batch of triplets put together. Each
triplet has 3 graphs (x, y, z). Here the first graph is duplicated once
so the graphs for each triplet are ordered as (x, y, x, z) in the batch.
The batch contains `batch_size` number of triplets, hence `4*batch_size`
many graphs.
"""
pass
@abc.abstractmethod
def pairs(self, batch_size):
"""Create an iterator over pairs.
Args:
batch_size: int, number of pairs in a batch.
Yields:
graphs: a `GraphData` instance. The batch of pairs put together. Each
pair has 2 graphs (x, y). The batch contains `batch_size` number of
pairs, hence `2*batch_size` many graphs.
labels: [batch_size] int labels for each pair, +1 for similar, -1 for not.
"""
pass
"""Graph Edit Distance Task"""
# Graph Manipulation Functions
def permute_graph_nodes(g):
"""Permute node ordering of a graph, returns a new graph."""
n = g.number_of_nodes()
new_g = nx.Graph()
new_g.add_nodes_from(range(n))
perm = np.random.permutation(n)
edges = g.edges()
new_edges = []
for x, y in edges:
new_edges.append((perm[x], perm[y]))
new_g.add_edges_from(new_edges)
return new_g
def substitute_random_edges(g, n):
"""Substitutes n edges from graph g with another n randomly picked edges."""
g = copy.deepcopy(g)
n_nodes = g.number_of_nodes()
edges = list(g.edges())
# sample n edges without replacement
e_remove = [
edges[i] for i in np.random.choice(np.arange(len(edges)), n, replace=False)
]
edge_set = set(edges)
e_add = set()
while len(e_add) < n:
e = np.random.choice(n_nodes, 2, replace=False)
# make sure e does not exist and is not already chosen to be added
if (
(e[0], e[1]) not in edge_set
and (e[1], e[0]) not in edge_set
and (e[0], e[1]) not in e_add
and (e[1], e[0]) not in e_add
):
e_add.add((e[0], e[1]))
for i, j in e_remove:
g.remove_edge(i, j)
for i, j in e_add:
g.add_edge(i, j)
return g
class GraphEditDistanceDataset(GraphSimilarityDataset):
"""Graph edit distance dataset."""
def __init__(
self,
n_nodes_range,
p_edge_range,
n_changes_positive,
n_changes_negative,
permute=True,
):
"""Constructor.
Args:
n_nodes_range: a tuple (n_min, n_max). The minimum and maximum number of
nodes in a graph to generate.
p_edge_range: a tuple (p_min, p_max). The minimum and maximum edge
probability.
n_changes_positive: the number of edge substitutions for a pair to be
considered positive (similar).
n_changes_negative: the number of edge substitutions for a pair to be
considered negative (not similar).
permute: if True (default), permute node orderings in addition to
changing edges; if False, the node orderings across a pair or triplet of
graphs will be the same, useful for visualization.
"""
self._n_min, self._n_max = n_nodes_range
self._p_min, self._p_max = p_edge_range
self._k_pos = n_changes_positive
self._k_neg = n_changes_negative
self._permute = permute
def _get_graph(self):
"""Generate one graph."""
n_nodes = np.random.randint(self._n_min, self._n_max + 1)
p_edge = np.random.uniform(self._p_min, self._p_max)
# do a little bit of filtering
n_trials = 100
for _ in range(n_trials):
g = nx.erdos_renyi_graph(n_nodes, p_edge)
if nx.is_connected(g):
return g
raise ValueError("Failed to generate a connected graph.")
def _get_pair(self, positive):
"""Generate one pair of graphs."""
g = self._get_graph()
if self._permute:
permuted_g = permute_graph_nodes(g)
else:
permuted_g = g
n_changes = self._k_pos if positive else self._k_neg
changed_g = substitute_random_edges(g, n_changes)
return permuted_g, changed_g
def _get_triplet(self):
"""Generate one triplet of graphs."""
g = self._get_graph()
if self._permute:
permuted_g = permute_graph_nodes(g)
else:
permuted_g = g
pos_g = substitute_random_edges(g, self._k_pos)
neg_g = substitute_random_edges(g, self._k_neg)
return permuted_g, pos_g, neg_g
def triplets(self, batch_size):
"""Yields batches of triplet data."""
while True:
batch_graphs = []
for _ in range(batch_size):
g1, g2, g3 = self._get_triplet()
batch_graphs.append((g1, g2, g1, g3))
yield self._pack_batch(batch_graphs)
def pairs(self, batch_size):
"""Yields batches of pair data."""
while True:
batch_graphs = []
batch_labels = []
positive = True
for _ in range(batch_size):
g1, g2 = self._get_pair(positive)
batch_graphs.append((g1, g2))
batch_labels.append(1 if positive else -1)
positive = not positive
packed_graphs = self._pack_batch(batch_graphs)
labels = np.array(batch_labels, dtype=np.int32)
yield packed_graphs, labels
def _pack_batch(self, graphs):
"""Pack a batch of graphs into a single `GraphData` instance.
Args:
graphs: a list of generated networkx graphs.
Returns:
graph_data: a `GraphData` instance, with node and edge indices properly
shifted.
"""
Graphs = []
for graph in graphs:
for inergraph in graph:
Graphs.append(inergraph)
graphs = Graphs
from_idx = []
to_idx = []
graph_idx = []
n_total_nodes = 0
n_total_edges = 0
for i, g in enumerate(graphs):
n_nodes = g.number_of_nodes()
n_edges = g.number_of_edges()
edges = np.array(g.edges(), dtype=np.int32)
# shift the node indices for the edges
from_idx.append(edges[:, 0] + n_total_nodes)
to_idx.append(edges[:, 1] + n_total_nodes)
graph_idx.append(np.ones(n_nodes, dtype=np.int32) * i)
n_total_nodes += n_nodes
n_total_edges += n_edges
GraphData = collections.namedtuple('GraphData', [
'from_idx',
'to_idx',
'node_features',
'edge_features',
'graph_idx',
'n_graphs'])
return GraphData(
from_idx=np.concatenate(from_idx, axis=0),
to_idx=np.concatenate(to_idx, axis=0),
# this task only cares about the structures, the graphs have no features.
# setting higher dimension of ones to confirm code functioning
# with high dimensional features.
node_features=np.ones((n_total_nodes, 8), dtype=np.float32),
edge_features=
|
np.ones((n_total_edges, 4), dtype=np.float32)
|
numpy.ones
|
import pandas as pd
import config
import numpy as np
import python_speech_features as spe_feats
from scipy.stats import kurtosis, skew
from scipy.signal import lfilter
import librosa
from pydub import AudioSegment
from pydub.silence import split_on_silence
from pydub.utils import mediainfo
from pydub.playback import play
import math
#compute RMS value of a signal and return it (in dB scale)
#seems not to work?
def get_RMS(s):
s_rms = np.sqrt(np.mean(np.power(s,2)))
#convert to dB scale
#s_db = 20*np.log10(s_rms/1.0)
return s_rms
#seems not to work either
#RMS-based normalization of a signal, based on a target level (in dB)
def RMS_normalization(s, dB_targ):
#desired level is converted to linear scale
rms_targ = 10**(dB_targ/20.0)
#compute scaling factor
scale = rms_targ/get_RMS(s)
#scale amplitude of input signal
scaled_s = scale*s
return scaled_s
def match_target_amplitude(audioSegment_sound, target_dBFS):
dBFS_diff = target_dBFS - audioSegment_sound.dBFS
return audioSegment_sound.apply_gain(dBFS_diff)
#Apply pre-emphasis (high-pass) filter
def apply_preEmph(x):
x_filt = lfilter([1., -0.97], 1, x)
return x_filt
#Obtain autocorrelation
def autocorr(x):
result =
|
np.correlate(x, x, mode='full')
|
numpy.correlate
|
from functools import lru_cache
import os
import glob
import binarytree as bt
import numpy as np
import emcee
# in case we don't have this module
try:
import classifier.photometry
import classifier.spectra
classifier_module = True
except ImportError:
classifier_module = False
from . import model
from . import photometry
from . import spectrum
from . import filter
from . import utils
from . import result
from . import db
from . import config as cfg
# these are for getting info to pymultinest
global_obs = ()
global_mod = ()
global_p_rng = ()
def fit_results(file,update_mn=False,update_an=False,
update_json=False,update_thumb=False,
sort=True,custom_sort=True,nospec=False):
"""Return a list of fitting results.
Parameters
----------
file : str
The raw photometry file to use as input.
update_mn : bool, optional
Force update of multinest fitting.
update_an : bool, optional
Force update of post-multinest fitting analysis.
sort : bool, optional
Sort results by decreasing evidence.
custom_sort: bool, optional
Additonally sort results using per-target config.
nospec : bool, optional
Exclude observed specta from fitting (for speed).
"""
print(" Fitting")
results = []
# fit any extra models that we'll append later
extra = []
if len(cfg.fitting['extra_models']) > 0:
for m in cfg.fitting['extra_models']:
print(" ",m)
r = result.Result.get(
file,m,update_mn=update_mn,
update_an=update_an,update_json=update_json,
update_thumb=update_thumb,nospec=nospec
)
# check for files with no photometry
if not hasattr(r,'obs'):
print(" no photometry = no results")
return None
extra.append(r)
# get results for models defined by conf, overrides default
if len(cfg.fitting['models']) > 0:
for m in cfg.fitting['models']:
print(" ",m)
r = result.Result.get(
file,m,update_mn=update_mn,
update_an=update_an,update_json=update_json,
update_thumb=update_thumb,nospec=nospec
)
# check for files with no photometry
if not hasattr(r,'obs'):
print(" no photometry = no results")
return None
results.append(r)
else:
# binary tree-based fitting
t = model_director(file)
try:
print_model_tree(t)
except:
print_model_tree(t, cute=False)
while t.left is not None and t.right is not None:
print(" ",t.left.value,"vs.",t.right.value)
r1 = result.Result.get(
file,t.left.value,update_mn=update_mn,
update_an=update_an,update_json=update_json,
update_thumb=update_thumb,nospec=nospec
)
r2 = result.Result.get(
file,t.right.value,update_mn=update_mn,
update_an=update_an,update_json=update_json,
update_thumb=update_thumb,nospec=nospec
)
# check for files with no photometry
if not hasattr(r1,'obs'):
print(" no photometry = no results")
return None
# append results, only append left result at start since where
# on lower branches the left model has already been done
if len(results) == 0:
results.append(r1)
results.append(r2)
# move on down the tree
if r2.evidence > r1.evidence + cfg.fitting['ev_threshold']:
t = t.right
else:
t = t.left
# sort list of results by evidence (required for subsequent custom sort)
print(' Sorting')
if sort or custom_sort:
print(' sorting results by evidence')
results = [results[i] for i in result.sort_results(results)]
else:
print(' no results sorting')
# sort list of results by custom method
if custom_sort:
print(' applying db.custom_sort results sorting')
srt = db.custom_sort(file, results)
if srt is None:
print(" couldn't get config")
else:
results = [results[i] for i in srt]
# append extra results
if len(extra) > 0:
print(' Appending')
[print(' {}'.format(m)) for m in cfg.fitting['extra_models']]
results = results + extra
# save a thumb of the best fit next to the input file, update every
# time since we may have changed best fit (but not fitting itself)
results[0].sed_thumbnail(file='{}/{}_thumb.png'.format(results[0].path, results[0].id), update=True)
return results
def model_director(file,reddening=False,use_classifier=False):
"""Workflow for model fitting.
Parameters
----------
file : str
Name of the photometry file we are fitting.
reddening : bool, optional
Use models with reddening.
use_classifier : bool, optional
Use classifier.
"""
# default model tries star + up to two bb components
if reddening:
star = 'phoenix_m_av'
else:
star = 'phoenix_m'
t_star = model_tree(top=(star,), extra='modbb_disk_r',n_extra=2)
# cool star model
if reddening:
cool = 'phoenix_cool_av'
else:
cool = 'phoenix_cool'
t_cool = model_tree(top=(cool,), extra='modbb_disk_r')
# look for spectral type, LTY types get cool models, other types
# default to star models, and M5-9 (or just M) get both
tree = t_star
cool = 0
kw = utils.get_sdb_keywords(file)
if 'sp_type' in kw.keys():
if kw['sp_type'] is None:
pass
elif kw['sp_type'][:2] == 'DA' or kw['sp_type'][:2] == 'DB' or kw['sp_type'][:2] == 'DC':
tree = model_tree(top=('koester_wd',), extra='bb_disk_r')
elif kw['sp_type'][0] in 'LTY':
tree = t_cool
elif kw['sp_type'][0] == 'M':
if len(kw['sp_type']) > 1:
if kw['sp_type'][1] in '56789':
cool = 1
elif kw['sp_type'][0:2] == 'dM':
if len(kw['sp_type']) > 2:
if kw['sp_type'][2] in '56789':
cool = 1
if cool == 1:
tree = bt.Node(('top',))
tree.left = t_cool
tree.right = t_star
tree.value = ('top',)
return tree
def model_tree(top=('phoenix_m',),extra='modbb_disk_r',n_extra=1):
"""Return a binary tree for alternative models.
Parameters
----------
top : str, optional
Name of the first model.
extra : str, optional
Name of the additional model component.
n_extra : int, optional
Include two extra component branch.
"""
# the top node doesn't matter unless this tree becomes a branch
# of a bigger tree
t = bt.Node( top )
t.left = bt.Node( top )
t.right = bt.Node( top + (extra,) )
if n_extra == 2:
t.right.left = bt.Node( top + (extra,) )
t.right.right = bt.Node( top + (extra, extra) )
return t
def print_model_tree(t, cute=True):
"""Print a model tree, shortening the model names.
Unicode symbols here https://unicode-table.com/en/
Parameters
----------
t : binary tree
Tree to print.
cute : bool, optional
Print cute ascii symbols.
"""
if cute:
r = {'top':u'\u2602',
'phoenix_m':u'\u2606',
'phoenix_m_av':u'\u2605',
'phoenix_cool':u'\u2733',
'phoenix_cool_av':u'\u2739',
'modbb_disk_r':u'\u29b8',
'bb_disk_r':u'\u25cb'
}
else:
r = {'top':'t',
'phoenix_m':'p',
'phoenix_m_av':'pr',
'phoenix_cool':'c',
'phoenix_cool_av':'cr',
'modbb_disk_r':'mb',
'bb_disk_r':'b'
}
l = bt.convert(t)
for i in range(len(l)):
x = ()
if l[i] is not None:
for j in range(len(l[i])):
if l[i][j] in r.keys():
x += (r[l[i][j]],)
else:
x += (l[i][j],)
l[i] = x = ''.join(x)
bt.convert(l).show()
@lru_cache(maxsize=2)
def concat_obs(o):
"""Concatenate observations
Concatenate the observations (filters and spectra), not
shifting the normalisation of the spectra, but noting how
many observations there are in each component (as the models
will probably contain more due to colours/indices).
"""
obs_wav = np.array([],dtype=float)
obs_filt = np.array([],dtype=object)
obs_fnu = np.array([],dtype=float)
obs_e_fnu = np.array([],dtype=float)
obs_uplim = np.array([],dtype=bool)
obs_ignore = np.array([],dtype=bool)
obs_bibcode = np.array([],dtype=str)
obs_nel = np.array([],dtype=int)
ispec = -2 # start one extra from end, we will put -1 there for phot
obs_ispec = np.array([],dtype=int)
for obs in o:
if isinstance(obs,photometry.Photometry):
obs_wav = np.append(obs_wav,obs.mean_wavelength())
obs_filt = np.append(obs_fnu,obs.filters)
obs_fnu = np.append(obs_fnu,obs.fnujy)
obs_e_fnu = np.append(obs_e_fnu,obs.e_fnujy)
obs_ispec = np.append(obs_ispec,np.repeat(-1,len(obs.filters)))
obs_uplim = np.append(obs_uplim,obs.upperlim)
obs_ignore = np.append(obs_ignore,obs.ignore)
obs_bibcode = np.append(obs_bibcode,obs.bibcode)
elif isinstance(obs,spectrum.ObsSpectrum):
n = len(obs.wavelength)
obs_wav =
|
np.append(obs_wav,obs.wavelength)
|
numpy.append
|
try:
import sys,logging
from reportlab.pdfgen import canvas
from reportlab.platypus import Table,TableStyle
from reportlab.lib import colors
from reportlab.lib.pagesizes import A4
from reportlab.graphics import renderPDF
from svglib.svglib import svg2rlg
from io import BytesIO
from datetime import date
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import numpy as np
from reportlab.platypus.tables import Table
import seaborn as sns
import os
import pandas as pd
from sklearn.cross_decomposition import PLSRegression
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.model_selection import cross_val_predict
from sklearn import preprocessing
# sys.path.append(r"F:\Work\Maptor\venv\HelpingModel")
from PLSR_SSS_Helper import PLSR_SSS_Helper
from RFHelper import RFHelper
from osgeo import gdal, ogr, gdal_array # I/O image data
import numpy as np # math and array handling
import matplotlib.pyplot as plt # plot figures
import pandas as pd # handling large data as table sheets
from joblib import dump, load
from operator import itemgetter
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.model_selection import cross_val_predict
except Exception as e:
print('Can not import files:' + str(e))
input("Press Enter to exit!")
sys.exit(0)
class ReportModule():
def build_doc(self,path,mode):
try:
doc = canvas.Canvas(path)
doc.setLineWidth(.3)
doc.setFont('Helvetica-Bold', 16)
doc.drawString(30, 810, mode)
doc.setFont('Helvetica', 12)
doc.drawString(30, 793, 'Software Maptor v1.4')
doc.drawString(30, 779, "Developer: <NAME> & <NAME>")
doc.drawString(30, 765, 'Support: <EMAIL>')
today = date.today()
doc.drawString(30, 751, "Date: " + str(today))
doc.drawString(30, 737, '')
doc.line(20, 735, 570, 735)
return doc
except ValueError as e:
logging.error("Exception occurred", exc_info=True)
print(e)
def Clf_prepare_report(self,doc,img,roi,importance,table_M,trees,ob_score,class_prediction,ValidationData,attribute,dir_path):
try:
os.mkdir(dir_path+"/Graphs")
path = dir_path + "/Graphs/"
fig = plt.figure(figsize=(6, 6))
plt.subplot(221)
plt.imshow(img[:, :, 0], cmap=plt.cm.Greys_r)
plt.title('RS Image - first band')
plt.subplot(222)
plt.imshow(class_prediction, cmap=plt.cm.Spectral)
plt.title('Classification result')
plt.subplot(223)
plt.imshow(roi, cmap=plt.cm.Spectral)
plt.title('Training Data')
plt.subplot(224)
plt.imshow(ValidationData, cmap=plt.cm.Spectral)
plt.title('Validation Data')
imgdata = BytesIO()
fig.savefig(imgdata, format='svg', bbox_inches='tight', pad_inches=0)
imgdata.seek(0) # rewind the data
drawing = svg2rlg(imgdata)
renderPDF.draw(drawing, doc, 30, 210)
fig.clf()
plt.imshow(img[:, :, 0], cmap=plt.cm.Greys_r)
plt.title('RS Image - first band')
plt.savefig(path + "'RS_Image.png", dpi=300)
plt.clf()
plt.imshow(class_prediction, cmap=plt.cm.Spectral)
plt.title('Classification result')
plt.savefig(path + "Classification_result.png", dpi=300)
plt.clf()
plt.imshow(roi, cmap=plt.cm.Spectral)
plt.title('Training Data')
plt.savefig(path + "Training_Data.png", dpi=300)
plt.clf()
plt.imshow(ValidationData, cmap=plt.cm.Spectral)
plt.title('Validation Data')
plt.savefig(path + "Validation_Data.png", dpi=300)
plt.clf()
doc.showPage()
# doc.line(20, 810, 570, 810)
# doc.setLineWidth(.3)
# doc.setFont('Helvetica-Bold', 14)
# doc.drawString(30, 790, 'Section 1: General Information and Training')
# doc.drawString(30, 770, "Section 2: Validation")
# doc.line(20, 750, 570, 750)
n_samples = (roi > 0).sum()
# doc.drawString(30, 730, "Section :1")
doc.setFont('Helvetica', 12)
doc.drawString(30, 710, 'Image Extent: ' + str(img.shape[0]) + " x " + str(img.shape[1]))
doc.drawString(30, 690, 'Number of Bands: ' + str(len(importance)))
doc.drawString(30, 670, 'Field Name (shape file) of your classes: ' + attribute)
doc.drawString(30, 650, 'Random Forest Training: ')
doc.drawString(30, 630, 'Number of Trees: ' + str(trees))
doc.drawString(30, 610, 'Number of training Pixel: ' + str(n_samples))
# What are our classification labels?
labels = np.unique(roi[roi > 0])
# print('Number of classes :'+ labels.size)
doc.drawString(30, 590, 'Number of classes :' + str(labels.size))
doc.drawString(30, 570, 'Out-Of-Bag prediction of accuracy: ' + str(ob_score))
# doc.showPage()
X = img[roi > 0, :]
y = roi[roi > 0]
data = ["Band","Importance"]
data= [(k, v) for k, v in importance.items()]
data2 = data[:]
# dummy = [['00', '01'],
# ['10', '11'],
# ['20', '21'],
# ['30', '31'],
# ['20', '21'],
# ['20', '21'],
# ['20', '21'],
# ['20', '21'],
# ['10', '11'],
# ['20', '21'],
# ['30', '31'],
# ['20', '21'],
# ['20', '21'],
# ['20', '21'],
# ['00', '01'],
# ['10', '11'],
# ['20', '21'],
# ['30', '31'],
# ['20', '21'],
# ['20', '21'],
# ['20', '21'],
# ['20', '21'],
# ['10', '11'],
# ['20', '21'],
# ['30', '31'],
# ['20', '21'],
# ['20', '21'],
# ['20', '21'],
# ['00', '01'],
# ['10', '11'],
# ['20', '21'],
# ['30', '31'],
# ['20', '21'],
# ['20', '21'],
# ['20', '21'],
# ['20', '21'],
# ['10', '11'],
# ['20', '21'],
# ['30', '31'],
# ['20', '21'],
# ['20', '21'],
# ['20', '21'],
# ['00', '01'],
# ['10', '11'],
# ['20', '21'],
# ['30', '31'],
# ['20', '21'],
# ['20', '21'],
# ['20', '21'],
# ['20', '21'],
# ['10', '11'],
# ['20', '21'],
# ['30', '31'],
# ['20', '21'],
# ['20', '21'],
# ['20', '21']]
#
# data.extend(dummy)
data2.sort(key=lambda x: x[1], reverse=True)
data.insert(0, ("Band", "Importance"))
data2.insert(0, ("Band", "Importance"))
fig = plt.figure(figsize=(6, 6))
sns.heatmap(table_M, annot=True, cmap="BuPu", fmt='g')
#plt.show()
imgdata = BytesIO()
fig.savefig(path+"pred_acc_training.png")
fig.savefig(imgdata, format='svg', bbox_inches='tight', pad_inches=0)
imgdata.seek(0) # rewind the data
drawing = svg2rlg(imgdata)
doc.setFont('Helvetica-Bold', 14)
doc.drawString(30,520,"Convolution matrix (prediction accuracy of the training data):")
renderPDF.draw(drawing, doc, 30, 30)
doc.showPage()
doc.drawString(30, 700, "Band importance (left: ordered by band number | right: ordered by importance):")
if (len(data) > 21):
chunks = (self.chunks(data, 20))
ordered_chunks = (self.chunks(data2, 20))
iterationNumer = 0
for unorder, ordered in zip(chunks, ordered_chunks):
if iterationNumer != 0:
unorder.insert(0, ("Band", "Importance"))
ordered.insert(0, ("Band", "Importance"))
unordered_table = Table(unorder)
ordered_table = Table(ordered)
unordered_table.setStyle(TableStyle([("BOX", (0, 0), (-1, -1), 0.25, colors.black),
('INNERGRID', (0, 0), (-1, -1), 0.25, colors.black)]))
ordered_table.setStyle(TableStyle([("BOX", (0, 0), (-1, -1), 0.25, colors.black),
('INNERGRID', (0, 0), (-1, -1), 0.25, colors.black)]))
unordered_table.wrapOn(doc, 60, 200)
unordered_table.drawOn(doc, 60, 200)
ordered_table.wrapOn(doc, 280, 200)
ordered_table.drawOn(doc, 280, 200)
# else:
# unordered_table.wrapOn(doc, 60, 400)
# unordered_table.drawOn(doc, 60, 400)
#
# ordered_table.wrapOn(doc, 280, 400)
# ordered_table.drawOn(doc, 280, 400)
columns = [""]
ordered_table = Table(columns)
unordered_table = Table(columns)
doc.showPage()
iterationNumer += 1
else:
table = Table(data)
table2 = Table(data2)
table.setStyle(TableStyle([("BOX", (0, 0), (-1, -1), 0.25, colors.black),
('INNERGRID', (0, 0), (-1, -1), 0.25, colors.black)]))
table2.setStyle(TableStyle([("BOX", (0, 0), (-1, -1), 0.25, colors.black),
('INNERGRID', (0, 0), (-1, -1), 0.25, colors.black)]))
table.wrapOn(doc, 60, 400)
table.drawOn(doc, 60, 400)
table2.wrapOn(doc, 280, 400)
table2.drawOn(doc, 280, 400)
doc.showPage()
#print(trees)
#print(table)
#print(importance)
return doc
except ValueError as e:
logging.error("Exception occurred", exc_info=True)
print(e)
def Clf_prepare_section3(self,doc,X_v,y_v,convolution_mat,df_sum_mat,score,roi_v,model_path,Image_savePath,dir_path):
try:
#print(Image_savePath+"here.................!!")
dir_path +="/Graphs/"
n_samples = (roi_v > 0).sum()
#print("Number of validation Pixels: "+ str(n_samples))
doc.setFont('Helvetica-Bold', 12)
doc.drawString(30, 780, "Number of validation Pixels: "+ str(n_samples))
doc.setFont('Helvetica-Bold', 12)
#print(score)
doc.drawString(30, 760, str(score))
doc.setFont('Helvetica-Bold', 14)
doc.drawString(30, 720, "Convolution matrix (prediction accuracy of the validation data")
convolution_mat.index = convolution_mat.index.rename('truth')
convolution_mat.columns = convolution_mat.columns.rename('predict')
del convolution_mat['All']
convolution_mat = convolution_mat.drop('All', axis=0)
fig = plt.figure(figsize=(6, 5))
sns.heatmap(convolution_mat, annot=True, cmap="BuPu", fmt='g')
# plt.show()
imgdata = BytesIO()
fig.savefig(dir_path + "pred_acc_validation.png")
fig.savefig(imgdata, format='svg', bbox_inches='tight', pad_inches=0)
imgdata.seek(0) # rewind the data
drawing = svg2rlg(imgdata)
renderPDF.draw(drawing, doc, 40, 300)
fig.clf()
df_sum_mat = df_sum_mat.round(3)
accuracy = df_sum_mat.iloc[-3:]
df_sum_mat = df_sum_mat.head(-3)
r, c = df_sum_mat.shape
seq = list(range(1, r+1))
df_sum_mat.insert(0,"band",seq)
data = np.array(df_sum_mat).tolist()
data.insert(0,("class","precision","recall","f1-score","support"))
t1 = Table(data)
t1.setStyle(TableStyle([("BOX", (0, 0), (-1, -1), 0.25, colors.black),
('INNERGRID', (0, 0), (-1, -1), 0.25, colors.black)]))
doc.showPage()
doc.drawString(30, 760,"Precision, recall, F-measure and support for each class:")
t1.wrapOn(doc, 30, 500)
t1.drawOn(doc, 90, 500)
# acc_col_name = ['accuracy','macro avg','weighted avg']
# accuracy.insert(0,".",acc_col_name)
# acc_data = np.array(accuracy).tolist()
# t2 = Table(acc_data)
#
# t2.setStyle(TableStyle([("BOX", (0, 0), (-1, -1), 0.25, colors.black),
# ('INNERGRID', (0, 0), (-1, -1), 0.25, colors.black)]))
# t2.wrapOn(doc, 30, 100)
# t2.drawOn(doc, 330, 100)
doc.setFont('Helvetica', 8)
doc.drawString(20, 370, "Image Saved to Path: "+Image_savePath)
# if model_path!="/":
# doc.drawString(30, 20, "Model Saved to Path: Model not Save")
# else:
doc.drawString(20, 350, "Model Saved to Path:"+ model_path)
return doc
except ValueError as e:
logging.error("Exception occurred", exc_info=True)
print(e)
def make_rf_reg_report(self,doc,img,RFHelper,dir_path):
# Display images
try:
os.mkdir(dir_path + "/Graphs")
path = dir_path + "/Graphs/"
width, height = A4
print("here........."+RFHelper.reportpath)
print(RFHelper.prediction_map)
print(RFHelper.modelsavepath)
print(RFHelper.img_path)
print(RFHelper.train_data)
doc.setFont('Helvetica-Bold', 14)
doc.drawString(30, 700,"DIRECTORIES:")
doc.setFont('Helvetica', 7)
doc.drawString(30, 680, "Remote Sensing Image: " + RFHelper.img_path)
doc.drawString(30, 660, "Shape file: " + RFHelper.train_data)
doc.drawString(30, 640, "Report Save Path: "+ RFHelper.reportpath)
doc.drawString(30, 620, "Regression image saved to: " +RFHelper.prediction_map)
if RFHelper.modelsavepath !='/':
doc.drawString(30, 600, "Model saved to: "+RFHelper.modelsavepath)
else:
doc.drawString(30, 600, "Model saved to: Model not saved")
doc.line(20, 580, 570, 580)
if img.shape[0] > img.shape[1]:
fig = plt.figure(figsize=(5, 4))
plt.subplot(121)
print(img.shape)
plt.imshow(img[:, :, 0], cmap=plt.cm.Greys_r)
roi_positions = np.where(RFHelper.training > 0)
plt.scatter(roi_positions[1], roi_positions[0], marker='x', c='r')
plt.title('first RS band and sample points', fontsize=8)
plt.title('RS image - first band')
imgdata = BytesIO()
fig.savefig(imgdata, format='svg', bbox_inches='tight', pad_inches=0)
imgdata.seek(0) # rewind the data
drawing = svg2rlg(imgdata)
renderPDF.draw(drawing, doc, 50, 160)
fig.clf()
plt.close(fig)
fig = plt.figure(figsize=(5, 4))
plt.imshow(RFHelper.prediction, cmap=plt.cm.Spectral)
plt.title('Prediction')
# plt.show()
imgdata = BytesIO()
fig.savefig(imgdata, format='svg', bbox_inches='tight', pad_inches=0)
imgdata.seek(0) # rewind the data
drawing = svg2rlg(imgdata)
renderPDF.draw(drawing, doc, 400, 160)
fig.clf()
plt.close(fig)
doc.showPage()
if img.shape[0]<= img.shape[1]:
fig = plt.figure(figsize=(5, 4))
plt.subplot(121)
print(img.shape)
plt.imshow(img[:, :, 0], cmap=plt.cm.Greys_r)
roi_positions = np.where(RFHelper.training > 0)
plt.scatter(roi_positions[1], roi_positions[0], marker='x', c='r')
plt.title('first RS band and sample points', fontsize=8)
imgdata = BytesIO()
fig.savefig(imgdata, format='svg', bbox_inches='tight', pad_inches=0)
imgdata.seek(0) # rewind the data
drawing = svg2rlg(imgdata)
renderPDF.draw(drawing, doc, 50, 360)
# else:
# renderPDF.draw(drawing, doc, 50, 250)
fig.clf()
plt.close(fig)
fig = plt.figure(figsize=(5, 4))
plt.imshow(RFHelper.prediction, cmap=plt.cm.Spectral)
plt.title('Prediction')
# plt.show()
imgdata = BytesIO()
fig.savefig(imgdata, format='svg', bbox_inches='tight', pad_inches=0)
imgdata.seek(0) # rewind the data
drawing = svg2rlg(imgdata)
renderPDF.draw(drawing, doc, 70, 60)
fig.clf()
plt.close(fig)
doc.showPage()
plt.imshow(img[:, :, 0], cmap=plt.cm.Greys_r)
roi_positions = np.where(RFHelper.training > 0)
plt.scatter(roi_positions[1], roi_positions[0], marker='x', c='r')
plt.title('first RS band and sample points', fontsize=8)
plt.savefig(path + "RS image-first_band.png", dpi=300)
plt.clf()
# plt.imshow(RFHelper.training, cmap=plt.cm.Spectral) # data = roi && cmap = plt.cm.Spectral
# plt.title('Training Image')
# plt.savefig(path + "Training_Image.png", dpi=300)
# plt.clf()
plt.imshow(RFHelper.prediction, cmap=plt.cm.Spectral)
plt.title('Prediction')
plt.savefig(path + "Prediction.png", dpi=300)
plt.clf()
# doc.line(20,800, 570, 800)
# doc.setLineWidth(.3)
# doc.setFont('Helvetica-Bold', 14)
# doc.drawString(30, 780, 'Section 1: General Information and Training')
# doc.drawString(30, 765, "Section 2: Validation")
# doc.line(20, 750, 570, 750)
doc.setFont('Helvetica-Bold', 14)
doc.drawString(30, 730, 'Section 1:')
doc.setFont('Helvetica', 14)
doc.drawString(30, 710,"Image extent: "+RFHelper.helper.imageExtent + "(Rows x Columns)")
doc.drawString(30, 690, "Number of Bands: "+ str(RFHelper.helper.BandNumber))
doc.drawString(30, 670, "Field name (shape file) of your classes: "+RFHelper.helper.FieldName)
doc.setFont('Helvetica', 14)
doc.drawString(30,650," Random Forrest Training")
doc.setFont('Helvetica', 14)
doc.drawString(30, 630, " Number of Tress: "+ str(RFHelper.helper.TreesNo))
doc.drawString(30, 610, " Number of samples: "+ str(RFHelper.helper.SampleNo))
doc.drawString(30, 590, " Split size for Test: "+str(RFHelper.helper.SplitSize))
doc.drawString(30, 570, " Training samples: "+str(RFHelper.helper.TrainingSample))
doc.drawString(30, 550, " Test sample: "+str(RFHelper.helper.TestSample))
doc.drawString(30, 530, "Co efficient of determination R^2 of the prediction : "+RFHelper.helper.Coeff)
doc.setFont('Helvetica-Bold', 14)
doc.drawString(30, 510, "left: ordered by band number| right: ordered by importance ")
data = ["Band", "Importance"]
data = [(k, v) for k, v in RFHelper.helper.BandImportance.items()]
# dummy = [('Band',1.33),('Band',5),('Band',4),('Band',2),('Band',6),('Band',8),('Band',43),('Band',3),('Band',113),('Band',233),('Band',13),('Band',133)]
# data.extend(dummy)
data2 = data[:]
data2.sort(key=lambda x: x[1], reverse=True)
data.insert(0, ("Band", "Importance"))
data2.insert(0, ("Band", "Importance"))
datalen = len(data)
print((datalen))
if(len(data)>21):
chunks = (self.chunks(data,20))
ordered_chunks = (self.chunks(data2, 20))
iterationNumer = 0
for unorder,ordered in zip(chunks,ordered_chunks):
if iterationNumer != 0:
unorder.insert(0, ("Band", "Importance"))
ordered.insert(0, ("Band", "Importance"))
unordered_table = Table(unorder)
ordered_table = Table(ordered)
unordered_table.setStyle(TableStyle([("BOX", (0, 0), (-1, -1), 0.25, colors.black),
('INNERGRID', (0, 0), (-1, -1), 0.25, colors.black)]))
ordered_table.setStyle(TableStyle([("BOX", (0, 0), (-1, -1), 0.25, colors.black),
('INNERGRID', (0, 0), (-1, -1), 0.25, colors.black)]))
if iterationNumer == 0:
unordered_table.wrapOn(doc, 60, 100)
unordered_table.drawOn(doc, 60, 100)
ordered_table.wrapOn(doc, 280, 100)
ordered_table.drawOn(doc,280, 100)
else:
unordered_table.wrapOn(doc, 60, 400)
unordered_table.drawOn(doc, 60, 400)
ordered_table.wrapOn(doc, 280, 400)
ordered_table.drawOn(doc, 280, 400)
columns = [""]
ordered_table = Table(columns)
unordered_table = Table(columns)
doc.showPage()
iterationNumer += 1
else:
table = Table(data)
table2 = Table(data2)
table.setStyle(TableStyle([("BOX", (0, 0), (-1, -1), 0.25, colors.black),
('INNERGRID', (0, 0), (-1, -1), 0.25, colors.black)]))
table2.setStyle(TableStyle([("BOX", (0, 0), (-1, -1), 0.25, colors.black),
('INNERGRID', (0, 0), (-1, -1), 0.25, colors.black)]))
table.wrapOn(doc, 0,0)
table.drawOn(doc, 60, 100)
table2.wrapOn(doc, 60, 100)
table2.drawOn(doc, 280, 100)
doc.showPage()
doc.setFont('Helvetica-Bold', 14)
doc.drawString(30, 790, 'Section 2:')
doc.setFont('Helvetica', 14)
doc.drawString(30, 770, "n of the data: " + str(RFHelper.helper.n_testdata))
doc.drawString(30, 750, "Mean of the Variable : " + str(RFHelper.helper.meanvariable))
doc.drawString(30, 730, "Standard Deviation of the Variable: " + str(round(RFHelper.helper.stdDev,2)))
doc.drawString(30, 710, "----------------------------------------")
doc.setFont('Helvetica', 14)
doc.drawString(30, 670, " Mean Absolute Error : " + str(round(RFHelper.helper.absError,2)))
doc.drawString(30, 650, " Mean Squared Error:" + str(round(RFHelper.helper.meanSqError,2)))
doc.drawString(30, 630, " RMSE:" + str(round(RFHelper.helper.RMSE,2)))
x = str(round(RFHelper.helper.MAPE,2))
doc.drawString(30, 600, " Mean absoulute percentage error(MAPE)/ Accuracy :" + x + " %")
doc.drawString(30, 560, "----------------------------------------")
doc.setFont('Helvetica-Bold', 14)
doc.drawString(30, 540, "Co efficient of determination R^2 of the prediction : " + str(round(RFHelper.helper.coeffR,2)))
fig, ax = plt.subplots()
ax.scatter(RFHelper.helper.testlabels, RFHelper.helper.pred_test_ds)
ax.plot([RFHelper.helper.testlabels.min(), RFHelper.helper.testlabels.max()], [RFHelper.helper.testlabels.min(), RFHelper.helper.testlabels.max()], 'k--', lw=1)
ax.set_xlabel('Measured')
ax.set_ylabel('Predicted')
imgdata = BytesIO()
fig.savefig(imgdata, format='svg', bbox_inches='tight', pad_inches=0)
plt.savefig(path + "r2_Prediction.png", dpi=300)
imgdata.seek(0) # rewind the data
drawing = svg2rlg(imgdata)
renderPDF.draw(drawing, doc, 30, 110)
fig.clf()
plt.close(fig)
return doc
except ValueError as e:
logging.error("Exception occurred", exc_info=True)
print(e)
def make_plsr_sss_report(self, doc, img, dir_path, PLSR_SSS_Helper):
# self, doc, img, TrainingData, X, mse, msemin, component, y, y_c, y_cv, attribute, importance, prediction, dir_path, reportpath, prediction_map, modelsavepath, img_path, trn_path
try:
# reportparameters = PLSR_SSS_Helper()
# reportparameters = helpermodel
print(PLSR_SSS_Helper)
print(type(PLSR_SSS_Helper))
os.mkdir(dir_path + "/Graphs")
path = dir_path + "/Graphs/"
width, height = A4
doc.setFont('Helvetica-Bold', 14)
doc.drawString(30, 705, "DIRECTORIES:")
doc.setFont('Helvetica', 10)
doc.drawString(30, 680, "Remote Sensing Image: " + str(PLSR_SSS_Helper.img_path))
doc.drawString(30, 660, "Shape file: " + str(PLSR_SSS_Helper.tran_path))
doc.drawString(30, 640, "Report Save Path: " + str(PLSR_SSS_Helper.reportpath))
doc.drawString(30, 620, "Regression image saved to: " + str(PLSR_SSS_Helper.prediction_map))
# if modelsavepath != '/':
doc.drawString(30, 600, "Model saved to: " + str(PLSR_SSS_Helper.modelsavepath))
# else:
# doc.drawString(30, 600, "Model saved to: Model not saved")
doc.line(20, 580, 570, 580)
mask = np.copy(img[:, :, 0])
mask[mask > 0.0] = 1.0 # all actual pixels have a value of 1.0
# plt.imshow(mask)
prediction_ = PLSR_SSS_Helper.prediction * mask
if img.shape[0] > img.shape[1]:
fig = plt.figure(figsize=(5, 4))
plt.subplot(121)
plt.imshow(img[:, :, 0], cmap=plt.cm.Greys_r)
roi_positions = np.where(PLSR_SSS_Helper.train_data > 0)
plt.scatter(roi_positions[1], roi_positions[0], marker='x', c='r')
plt.title('first RS band and sample points', fontsize=8)
plt.subplot(122)
plt.imshow(prediction_, cmap=plt.cm.Spectral, vmin=PLSR_SSS_Helper.y.min(), vmax=PLSR_SSS_Helper.y.max())
plt.title('Prediction: ' + PLSR_SSS_Helper.attribute, fontsize=8)
plt.colorbar()
imgdata = BytesIO()
plt.savefig(path + "RS image-first_band.png", dpi=300)
fig.savefig(imgdata, format='svg', bbox_inches='tight', pad_inches=0)
imgdata.seek(0) # rewind the data
drawing = svg2rlg(imgdata)
renderPDF.draw(drawing, doc, 50, 160)
fig.clf()
plt.close(fig)
doc.showPage()
if img.shape[0] <= img.shape[1]:
fig = plt.figure(figsize=(5, 4))
plt.subplot(121)
plt.imshow(img[:, :, 0], cmap=plt.cm.Greys_r)
roi_positions = np.where(PLSR_SSS_Helper.train_data > 0)
plt.scatter(roi_positions[1], roi_positions[0], marker='x', c='r')
plt.title('first RS band and sample points')
plt.subplot(122)
plt.imshow(prediction_, cmap=plt.cm.Spectral, vmin=PLSR_SSS_Helper.y.min(), vmax=PLSR_SSS_Helper.y.max())
plt.title('Prediction')
plt.colorbar()
imgdata = BytesIO()
plt.savefig(path + "RS image-first_band.png", dpi=300)
fig.savefig(imgdata, format='svg', bbox_inches='tight', pad_inches=0)
imgdata.seek(0) # rewind the data
drawing = svg2rlg(imgdata)
renderPDF.draw(drawing, doc, 100, 160)
fig.clf()
plt.close(fig)
doc.showPage()
n_samples = (PLSR_SSS_Helper.train_data > 0).sum()
# doc.drawString(30, 720, 'We have {n} training samples'.format(n=n_samples))
# Subset the image dataset with the training image = X
# Mask the classes on the training dataset = y
# These will have n_samples rows
features = pd.DataFrame(PLSR_SSS_Helper.X)
band_names = []
for i in range(PLSR_SSS_Helper.X.shape[1]):
# for i in range(0,2500):
nband = "Band_" + str(i + 1)
band_names.append(nband)
features.columns = band_names
print(features.shape)
doc.line(20, 810, 570, 810)
doc.setLineWidth(.3)
doc.setFont('Helvetica-Bold', 14)
doc.drawString(30, 785, 'Section : General Information and Training')
doc.line(20, 770, 570, 770)
doc.setFont('Helvetica', 10)
doc.drawString(30, 700,
'The Image Extend: ' + str(img.shape[0]) + " x " + str(img.shape[1]) + " (Rows x Columns)")
doc.drawString(30, 680, 'The number of bands is: ' + str(features.shape[1]))
# doc.drawString(30, 680,'The shape of our features is: '+str(features.shape))
doc.drawString(30, 660, 'Selected Attribute: ' + str(PLSR_SSS_Helper.attribute))
doc.drawString(30, 640, 'The number of Sample is: ' + str(features.shape[0]))
doc.drawString(30, 620, '---------------------------------------------------------')
#
features['value'] = PLSR_SSS_Helper.y
features.head()
# Labels are the values we want to predict
labels = np.array(features['value'])
# Remove the labels from the features
# axis 1 refers to the columns
features = features.drop('value', axis=1)
# Saving feature names for later use
feature_list = list(features.columns)
# Convert to numpy array
features = np.array(features)
# doc.drawString(30, 620, 'Training Features Shape: ' + str(features.shape))
# doc.drawString(30, 600, 'Training Labels Shape: ' + str(labels.shape))
# return [doc, labels, features]
#
# doc.showPage()
suggested_comp = PLSR_SSS_Helper.msemin + 1
print("Suggested number of components: ", suggested_comp)
doc.drawString(30, 560, "Selected number of PLS components: " + str(suggested_comp))
fig = plt.figure(figsize=(5, 4))
with plt.style.context(('ggplot')):
plt.plot(PLSR_SSS_Helper.component, np.array(PLSR_SSS_Helper.mse), '-v', color='blue', mfc='blue')
plt.plot(PLSR_SSS_Helper.component[PLSR_SSS_Helper.msemin], np.array(PLSR_SSS_Helper.mse)[PLSR_SSS_Helper.msemin], 'P',
ms=10, mfc='red')
plt.xlabel('Number of PLS components')
plt.ylabel('MSE')
plt.title('PLSR MSE vs. Components')
plt.xlim(left=-1)
plt.savefig(path + "PLSR MSE vs. Components.png", dpi=300)
# plt.show()
imgdata = BytesIO()
fig.savefig(imgdata, format='svg', bbox_inches='tight', pad_inches=0)
imgdata.seek(0) # rewind the data
drawing = svg2rlg(imgdata)
renderPDF.draw(drawing, doc, 50, 150)
fig.clf()
plt.close(fig)
doc.showPage()
score_c = r2_score(PLSR_SSS_Helper.y, PLSR_SSS_Helper.y_c)
score_cv = r2_score(PLSR_SSS_Helper.y, PLSR_SSS_Helper.y_cv)
# Calculate mean squared error for calibration and cross validation
mse_c = mean_squared_error(PLSR_SSS_Helper.y, PLSR_SSS_Helper.y_c)
mse_cv = mean_squared_error(PLSR_SSS_Helper.y, PLSR_SSS_Helper.y_cv)
# print("2 HERE!!!!!! mse_cv"+str(mse_cv))
print('R2 calib: %5.3f' % score_c)
doc.drawString(30, 730, 'R2 of the training: %5.3f' % score_c)
print('R2 LOOCV: %5.3f' % score_cv)
doc.drawString(30, 710, 'R2 LOOCV: %5.3f' % score_cv)
print('MSE calib: %5.3f' % mse_c)
doc.drawString(30, 690, 'MSE of the training: %5.3f' % mse_c)
print('MSE LOOCV: %5.3f' % mse_cv)
doc.drawString(30, 670, 'MSE LOOCV: %5.3f' % mse_cv)
imp = {}
for i in range(features.shape[1]):
print('Band {}: {}'.format(i + 1, round(PLSR_SSS_Helper.importance[i], 2)))
imp['Band{}'.format(i + 1)] = round(PLSR_SSS_Helper.importance[i], 2)
data = [(k, v) for k, v in imp.items()]
# dummy = [('Band',1.33),('Band',5),('Band',4),('Band',2),('Band',6),('Band',8),('Band',43),('Band',3),('Band',113),('Band',233),('Band',13),('Band',133)]
# data.extend(dummy)
data2 = data[:]
data2.sort(key=lambda x: x[1], reverse=True)
data.insert(0, ("Band", "Importance"))
data2.insert(0, ("Band", "Importance"))
doc.setFont('Helvetica-Bold', 14)
doc.drawString(30, 510, "Band importance (left: ordered by band number | right: ordered by importance):")
doc.setFont('Helvetica', 10)
if (len(data) > 21):
chunks = (self.chunks(data, 20))
ordered_chunks = (self.chunks(data2, 20))
iterationNumer = 0
for unorder, ordered in zip(chunks, ordered_chunks):
if iterationNumer != 0:
unorder.insert(0, ("Band", "Importance"))
ordered.insert(0, ("Band", "Importance"))
unordered_table = Table(unorder)
ordered_table = Table(ordered)
unordered_table.setStyle(TableStyle([("BOX", (0, 0), (-1, -1), 0.25, colors.black),
('INNERGRID', (0, 0), (-1, -1), 0.25, colors.black)]))
ordered_table.setStyle(TableStyle([("BOX", (0, 0), (-1, -1), 0.25, colors.black),
('INNERGRID', (0, 0), (-1, -1), 0.25, colors.black)]))
if iterationNumer == 0:
unordered_table.wrapOn(doc, 60, 100)
unordered_table.drawOn(doc, 60, 100)
ordered_table.wrapOn(doc, 280, 100)
ordered_table.drawOn(doc, 280, 100)
else:
unordered_table.wrapOn(doc, 60, 400)
unordered_table.drawOn(doc, 60, 400)
ordered_table.wrapOn(doc, 280, 400)
ordered_table.drawOn(doc, 280, 400)
columns = [""]
ordered_table = Table(columns)
unordered_table = Table(columns)
doc.showPage()
iterationNumer += 1
else:
table = Table(data)
table2 = Table(data2)
table.setStyle(TableStyle([("BOX", (0, 0), (-1, -1), 0.25, colors.black),
('INNERGRID', (0, 0), (-1, -1), 0.25, colors.black)]))
table2.setStyle(TableStyle([("BOX", (0, 0), (-1, -1), 0.25, colors.black),
('INNERGRID', (0, 0), (-1, -1), 0.25, colors.black)]))
table.wrapOn(doc, 0, 0)
table.drawOn(doc, 60, 100)
table2.wrapOn(doc, 60, 100)
table2.drawOn(doc, 280, 100)
doc.showPage()
doc.line(20, 810, 570, 810)
doc.setLineWidth(.3)
doc.setFont('Helvetica-Bold', 14)
doc.drawString(30, 785, 'Section : Validation')
doc.line(20, 770, 570, 770)
z = np.polyfit(PLSR_SSS_Helper.y, PLSR_SSS_Helper.y_c, 1)
with plt.style.context(('ggplot')):
fig, ax = plt.subplots(figsize=(5, 5))
ax.scatter(PLSR_SSS_Helper.y_c, PLSR_SSS_Helper.y, c='red', edgecolors='k')
# Plot the best fit line
ax.plot(np.polyval(z, PLSR_SSS_Helper.y), PLSR_SSS_Helper.y, c='blue', linewidth=1)
# Plot the ideal 1:1 line
ax.plot(PLSR_SSS_Helper.y, PLSR_SSS_Helper.y, color='green', linewidth=1)
plt.title('$R^{2}$ (CV): ' + str(score_cv))
plt.xlabel('Predicted')
plt.ylabel('Measured')
plt.legend(['best fit line', 'ideal 1:1 line', 'samples'])
imgdata = BytesIO()
plt.savefig(path + "R2CV", dpi=300)
fig.savefig(imgdata, format='svg', bbox_inches='tight', pad_inches=0)
imgdata.seek(0) # rewind the data
drawing = svg2rlg(imgdata)
renderPDF.draw(drawing, doc, 50, 300)
fig.clf()
plt.close(fig)
# In[16]:
# Calculate the absolute errors
errors = abs(PLSR_SSS_Helper.y - PLSR_SSS_Helper.y_cv)
# Print out the mean absolute error (mae)
# Print out the mean absolute error (mae)
doc.setFont('Helvetica', 10)
doc.drawString(30, 270, '----------------------------')
print('-------------')
print('n of the test data: {}'.format(len(labels)))
doc.drawString(30, 250, 'n of the test data: ' + str((len(labels))))
print('Mean of the variable: {:.2f}'.format(np.mean(labels)))
doc.drawString(30, 230, 'Mean of the Samples: ' + str(round((np.mean(labels)), 2)))
print('Standard deviation of the variable: {:.2f}'.format(np.std(labels)))
doc.drawString(30, 210, 'Standard deviation of the Samples: ' + str(round(np.std(labels), 2)))
print('-------------')
doc.drawString(30, 190, '----------------------------')
print('Mean Absolute Error: {:.2f}'.format(np.mean(errors)))
doc.drawString(30, 170, 'MAE: ' + str(round(np.mean(errors), 2)))
print('Mean squared error: {:.2f}'.format(mse_cv))
doc.drawString(30, 150, 'MSE: ' + str(round(mse_cv, 2)))
print('RMSE: {:.2f}'.format(np.sqrt(mse_cv)))
doc.drawString(30, 130, 'RMSE: ' + str(round(np.sqrt(mse_cv), 2)))
RPD = np.std(labels) / np.sqrt(mse_cv)
print('RPD: {:.2f} | How often does RMSE of Prediction fit in the Standard Deviation of the samples'.format(
RPD))
doc.drawString(30, 110, 'RPD: ' + str(
round(RPD, 2)) + "| How often does RMSE of Prediction fit in the Standard Deviation of the samples")
mape = 100 * (errors / labels)
# Calculate and display accuracy
accuracy = 100 - np.mean(mape)
print('mean absolute percentage error (MAPE): {:.2f} %'.format(np.mean(mape)))
doc.drawString(30, 90, 'Mean Absolute Percentage Error (MAPE): ' + str(round(np.mean(mape), 2)) + " %")
print('accuracy (100 % - mape): {:.2f} %'.format(accuracy))
doc.drawString(30, 70, 'Accuracy (100 % - MAPE): ' + str(round(accuracy, 2)) + " %")
print('-------------')
doc.drawString(30, 50, '----------------------------')
# The coefficient of determination: 1 is perfect prediction
print('Coefficient of determination r²: {:.2f}'.format(r2_score(PLSR_SSS_Helper.y, PLSR_SSS_Helper.y_cv)))
doc.drawString(30, 30,
'Coefficient of determination r²: ' + str(round(r2_score(PLSR_SSS_Helper.y, PLSR_SSS_Helper.y_cv), 2)))
#
return doc
except ValueError as e:
logging.error("Exception occurred", exc_info=True)
print(e)
def make_plsr_lds_report(self,doc,dir_path,PLSR_LDS_Helper):
os.mkdir(dir_path+"/Graphs")
path = dir_path +"/Graphs/"
doc.setFont('Helvetica-Bold', 14)
doc.drawString(30, 705, "DIRECTORIES:")
doc.setFont('Helvetica', 10)
doc.drawString(30, 680, "Remote Sensing Image: " + str(PLSR_LDS_Helper.img_path))
doc.drawString(30, 660, "Shape file: " + str(PLSR_LDS_Helper.tran_path))
doc.drawString(30, 640, "Report Save Path: " + str(PLSR_LDS_Helper.reportpath))
doc.drawString(30, 620, "Regression image saved to: " + str(PLSR_LDS_Helper.prediction_map))
# if modelsavepath != '/':
doc.drawString(30, 600, "Model saved to: " + str(PLSR_LDS_Helper.modelsavepath))
doc.line(20, 580, 570, 580)
mask = np.copy(PLSR_LDS_Helper.img[:, :, 0])
mask[mask > 0.0] = 1.0 # all actual pixels have a value of 1.0
prediction_ = PLSR_LDS_Helper.prediction * mask
if PLSR_LDS_Helper.img.shape[0] > PLSR_LDS_Helper.img.shape[1]:
fig = plt.figure(figsize=(5, 4))
plt.subplot(121)
print(PLSR_LDS_Helper.img.shape)
plt.imshow(PLSR_LDS_Helper.img[:, :, 0], cmap=plt.cm.Greys_r)
roi_positions = np.where(PLSR_LDS_Helper.train_data > 0)
plt.scatter(roi_positions[1], roi_positions[0], marker='x', c='r')
plt.title('RS image - first band')
# plt.subplot(122)
# plt.imshow(PLSR_LDS_Helper.train_data, cmap=plt.cm.Spectral) # data = roi && cmap = plt.cm.Spectral
# plt.title('Training Image')
# plt.show()
imgdata = BytesIO()
fig.savefig(imgdata, format='svg', bbox_inches='tight', pad_inches=0)
imgdata.seek(0) # rewind the data
drawing = svg2rlg(imgdata)
renderPDF.draw(drawing, doc, 50, 160)
# else:
# renderPDF.draw(drawing, doc, 50, 250)
fig.clf()
plt.close(fig)
fig = plt.figure(figsize=(5, 4))
plt.imshow(PLSR_LDS_Helper.prediction, cmap=plt.cm.Spectral)
plt.colorbar()
plt.title('Prediction')
# plt.show()
imgdata = BytesIO()
fig.savefig(imgdata, format='svg', bbox_inches='tight', pad_inches=0)
imgdata.seek(0) # rewind the data
drawing = svg2rlg(imgdata)
renderPDF.draw(drawing, doc, 400, 160)
fig.clf()
plt.close(fig)
doc.showPage()
if PLSR_LDS_Helper.img.shape[0] <= PLSR_LDS_Helper.img.shape[1]:
fig = plt.figure(figsize=(5, 4))
plt.subplot(121)
print(PLSR_LDS_Helper.img.shape)
plt.imshow(PLSR_LDS_Helper.img[:, :, 0], cmap=plt.cm.Greys_r)
roi_positions = np.where(PLSR_LDS_Helper.train_data > 0)
plt.scatter(roi_positions[1], roi_positions[0], marker='x', c='r')
plt.title('RS image - first band')
# plt.subplot(122)
# plt.imshow(PLSR_LDS_Helper.train_data, cmap=plt.cm.Spectral) # data = roi && cmap = plt.cm.Spectral
# plt.title('Training Image')
# plt.show()
imgdata = BytesIO()
fig.savefig(imgdata, format='svg', bbox_inches='tight', pad_inches=0)
imgdata.seek(0) # rewind the data
drawing = svg2rlg(imgdata)
renderPDF.draw(drawing, doc, 50, 160)
# else:
# renderPDF.draw(drawing, doc, 50, 250)
fig.clf()
plt.close(fig)
fig = plt.figure(figsize=(5, 4))
plt.imshow(PLSR_LDS_Helper.prediction, cmap=plt.cm.Spectral)
plt.colorbar()
plt.title('Prediction')
# plt.show()
imgdata = BytesIO()
fig.savefig(imgdata, format='svg', bbox_inches='tight', pad_inches=0)
imgdata.seek(0) # rewind the data
drawing = svg2rlg(imgdata)
renderPDF.draw(drawing, doc, 100, 160)
fig.clf()
plt.close(fig)
doc.showPage()
roi_positions = np.where(PLSR_LDS_Helper.train_data > 0)
plt.imshow(PLSR_LDS_Helper.img[:, :, 0], cmap=plt.cm.Greys_r)
plt.scatter(roi_positions[1], roi_positions[0], marker='x', c='r')
plt.title('RS image - first band')
plt.savefig(path + "RS image-first_band.png", dpi=300)
plt.clf()
# plt.imshow(PLSR_LDS_Helper.train_data, cmap=plt.cm.Spectral) # data = roi && cmap = plt.cm.Spectral
# plt.title('Training Image')
# plt.savefig(path + "Training_Image.png", dpi=300)
# plt.clf()
plt.imshow(PLSR_LDS_Helper.prediction, cmap=plt.cm.Spectral)
plt.colorbar()
plt.title('Prediction')
plt.savefig(path + "Prediction.png", dpi=300)
plt.clf()
n_samples = (PLSR_LDS_Helper.train_data > 0).sum()
print(
'We have {n} training samples'.format(n=n_samples)) # Subset the image dataset with the training image = X
print('The shape of our features is:', PLSR_LDS_Helper.features.shape)
print('The number of Spectra is:', PLSR_LDS_Helper.features.shape[0])
print('The number of bands is:', PLSR_LDS_Helper.features.shape[1])
doc.line(20, 810, 570, 810)
doc.setLineWidth(.3)
doc.setFont('Helvetica-Bold', 14)
doc.drawString(30, 785, 'Section : General Information and Training')
doc.line(20, 770, 570, 770)
doc.setFont('Helvetica', 10)
doc.drawString(30, 700,
'The Image Extend: ' + str(PLSR_LDS_Helper.img.shape[0]) + " x " + str(PLSR_LDS_Helper.img.shape[1]) + " (Rows x Columns)")
doc.drawString(30, 680, 'The number of bands is: ' + str(PLSR_LDS_Helper.features.shape[1]))
# doc.drawString(30, 680,'The shape of our features is: '+str(features.shape))
doc.drawString(30, 660, 'Selected Attribute: ' + str(PLSR_LDS_Helper.attribute))
doc.drawString(30, 640, 'The number of Sample is: ' + str(PLSR_LDS_Helper.features.shape[0]))
doc.drawString(30, 620, '---------------------------------------------------------')
# from sklearn import preprocessing
#
# min_max_scaler = preprocessing.MinMaxScaler()
#
# xscaled = min_max_scaler.fit_transform(features)
# features_ = pd.DataFrame(xscaled)
#
# features_.transpose().plot(figsize=(20, 7))
# plt.legend(bbox_to_anchor=(0.1, -0.1), loc='upper left', ncol=7)
# plt.title('Reference Spectra')
# plt.plot()
#
#
print('Training Features Shape:', PLSR_LDS_Helper.train_features.shape)
print('Training Labels Shape:', PLSR_LDS_Helper.train_labels.shape)
print('Testing Features Shape:', PLSR_LDS_Helper.test_features.shape)
print('Testing Labels Shape:', PLSR_LDS_Helper.test_labels.shape)
#
msemin = np.argmin(PLSR_LDS_Helper.mse)
suggested_comp = msemin + 1
print("Suggested number of components: ", suggested_comp)
fig = plt.figure(figsize=(5, 4))
with plt.style.context(('ggplot')):
plt.plot(PLSR_LDS_Helper.component, np.array(PLSR_LDS_Helper.mse), '-v', color='blue', mfc='blue')
plt.plot(PLSR_LDS_Helper.component[msemin], np.array(PLSR_LDS_Helper.mse)[msemin], 'P', ms=10, mfc='red')
plt.xlabel('Number of PLS components')
plt.ylabel('MSE')
plt.title('PLSR MSE vs. Components')
plt.xlim(left=-1)
plt.savefig(path + "PLSR MSE vs. Components.png", dpi=300)
# plt.show()
imgdata = BytesIO()
fig.savefig(imgdata, format='svg', bbox_inches='tight', pad_inches=0)
imgdata.seek(0) # rewind the data
drawing = svg2rlg(imgdata)
renderPDF.draw(drawing, doc, 50, 150)
fig.clf()
plt.close(fig)
doc.showPage()
# print(sorted_imp)
imp = {}
for i in range(PLSR_LDS_Helper.features.shape[1]):
print('Band {}: {}'.format(i + 1, round(PLSR_LDS_Helper.importance[i], 2)))
imp['Band{}'.format(i + 1)] = round(PLSR_LDS_Helper.importance[i], 2)
data = [(k, v) for k, v in imp.items()]
# dummy = [('Band',1.33),('Band',5),('Band',4),('Band',2),('Band',6),('Band',8),('Band',43),('Band',3),('Band',113),('Band',233),('Band',13),('Band',133)]
# data.extend(dummy)
data2 = data[:]
data2.sort(key=lambda x: x[1], reverse=True)
data.insert(0, ("Band", "Importance"))
data2.insert(0, ("Band", "Importance"))
doc.setFont('Helvetica-Bold', 14)
doc.drawString(30, 710, "Band importance (left: ordered by band number | right: ordered by importance):")
doc.setFont('Helvetica', 10)
if (len(data) > 21):
chunks = (self.chunks(data, 20))
ordered_chunks = (self.chunks(data2, 20))
iterationNumer = 0
for unorder, ordered in zip(chunks, ordered_chunks):
if iterationNumer != 0:
unorder.insert(0, ("Band", "Importance"))
ordered.insert(0, ("Band", "Importance"))
unordered_table = Table(unorder)
ordered_table = Table(ordered)
unordered_table.setStyle(TableStyle([("BOX", (0, 0), (-1, -1), 0.25, colors.black),
('INNERGRID', (0, 0), (-1, -1), 0.25, colors.black)]))
ordered_table.setStyle(TableStyle([("BOX", (0, 0), (-1, -1), 0.25, colors.black),
('INNERGRID', (0, 0), (-1, -1), 0.25, colors.black)]))
if iterationNumer == 0:
unordered_table.wrapOn(doc, 60, 100)
unordered_table.drawOn(doc, 60, 100)
ordered_table.wrapOn(doc, 280, 100)
ordered_table.drawOn(doc, 280, 100)
else:
unordered_table.wrapOn(doc, 60, 400)
unordered_table.drawOn(doc, 60, 400)
ordered_table.wrapOn(doc, 280, 400)
ordered_table.drawOn(doc, 280, 400)
columns = [""]
ordered_table = Table(columns)
unordered_table = Table(columns)
doc.showPage()
iterationNumer += 1
else:
table = Table(data)
table2 = Table(data2)
table.setStyle(TableStyle([("BOX", (0, 0), (-1, -1), 0.25, colors.black),
('INNERGRID', (0, 0), (-1, -1), 0.25, colors.black)]))
table2.setStyle(TableStyle([("BOX", (0, 0), (-1, -1), 0.25, colors.black),
('INNERGRID', (0, 0), (-1, -1), 0.25, colors.black)]))
table.wrapOn(doc, 0, 0)
table.drawOn(doc, 60, 100)
table2.wrapOn(doc, 60, 100)
table2.drawOn(doc, 280, 100)
doc.showPage()
#
errors = abs(PLSR_LDS_Helper.predictions_test_ds - PLSR_LDS_Helper.test_labels)
# Print out the mean absolute error (mae)
# Print out the mean absolute error (mae)
print('-------------')
print('n of the test data: {}'.format(len(PLSR_LDS_Helper.test_labels)))
print('Mean of the variable: {:.2f}'.format(np.mean(PLSR_LDS_Helper.labels)))
print('Standard deviation of the variable: {:.2f}'.format(np.std(PLSR_LDS_Helper.labels)))
print('-------------')
print('Mean Absolute Error: {:.2f}'.format(np.mean(errors)))
mse = mean_squared_error(PLSR_LDS_Helper.test_labels, PLSR_LDS_Helper.predictions_test_ds)
print('Mean squared error: '+ str(round(mse,2)))
print('RMSE: '+str(round(np.sqrt(mse),2)))
#
mape = 100 * (errors / PLSR_LDS_Helper.test_labels)
# Calculate and display accuracy
accuracy = 100 - np.mean(mape)
print('mean absolute percentage error (MAPE) / Accuracy: {:.2f}'.format(accuracy), '%.')
print('-------------')
# The coefficient of determination: 1 is perfect prediction
print('Coefficient of determination r²: {:.2f}'.format(r2_score(PLSR_LDS_Helper.test_labels, PLSR_LDS_Helper.predictions_test_ds)))
# #
doc.drawString(30, 785, 'n of the test data: {}'.format(len(PLSR_LDS_Helper.test_labels)))
doc.drawString(30, 765,'Mean of the variable: {:.2f}'.format(np.mean(PLSR_LDS_Helper.labels)))
doc.drawString(30, 745,'Standard deviation of the variable: {:.2f}'.format(np.std(PLSR_LDS_Helper.labels)))
doc.drawString(30, 735,'------------------------------------------------------')
doc.drawString(30, 715,'Mean Absolute Error: {:.2f}'.format(np.mean(errors)))
mse = mean_squared_error(PLSR_LDS_Helper.test_labels, PLSR_LDS_Helper.predictions_test_ds)
doc.drawString(30, 685,'Mean squared error: {:.2f}'.format(mse))
doc.drawString(30, 670,'RMSE: {:.2f}'.format(np.sqrt(mse)))
mape = 100 * (errors / PLSR_LDS_Helper.test_labels)
# Calculate and display accuracy
accuracy = 100 - np.mean(mape)
doc.drawString(30, 655,"mean absolute percentage error (MAPE) / Accuracy: "+ str(round(accuracy,2))+" %")
doc.drawString(30, 640,'----------------------------------------------------')
fig, ax = plt.subplots(figsize=(5,5))
ax.scatter(PLSR_LDS_Helper.test_labels, PLSR_LDS_Helper.predictions_test_ds)
ax.plot([PLSR_LDS_Helper.test_labels.min(), PLSR_LDS_Helper.test_labels.max()], [PLSR_LDS_Helper.test_labels.min(), PLSR_LDS_Helper.test_labels.max()], 'k--', lw=1)
ax.set_xlabel('Measured')
ax.set_ylabel('Predicted')
imgdata = BytesIO()
plt.savefig(path + "R2CV", dpi=300)
fig.savefig(imgdata, format='svg', bbox_inches='tight', pad_inches=0)
imgdata.seek(0) # rewind the data
drawing = svg2rlg(imgdata)
renderPDF.draw(drawing, doc, 50, 240)
fig.clf()
plt.close(fig)
# mask = np.copy(img[:, :, 0])
# mask[mask > 0.0] = 1.0 # all actual pixels have a value of 1.0
# plot mask
# plt.imshow(mask)
# mask classification an plot
# prediction_ = prediction * mask
#
# plt.subplot(121)
# plt.imshow(prediction, cmap=plt.cm.Spectral, vmax=prediction.mean() + prediction.std() * 2,
# vmin=prediction.mean() - prediction.std() * 2)
# plt.title('prediction unmasked')
#
# plt.subplot(122)
# plt.imshow(prediction_, cmap=plt.cm.Spectral, vmax=prediction_.mean() + prediction_.std() * 2,
# vmin=prediction_.mean() - prediction_.std() * 2)
# plt.title('prediction masked')
#
# plt.show()
#
#
# plt.subplot(121)
# plt.imshow(img[:, :, 0], cmap=plt.cm.Greys_r)
# plt.title('RS image - first band')
#
# plt.subplot(122)
# plt.imshow(prediction, cmap=plt.cm.Spectral, vmax=prediction_.mean() + prediction_.std() * 2,
# vmin=prediction_.mean() - prediction_.std() * 2)
# plt.colorbar()
#
# plt.title('Prediction')
#
# plt.show()
#
return doc
def make_rfr_sss_report(self,doc,reportpath,dir_path,train_data,prediction_map,modelsavepath,img_path,TrainingData,img,attributes,prediction, y_c, y_cv, RFR):
try:
os.mkdir(dir_path + "/Graphs")
path = dir_path + "/Graphs/"
width, height = A4
# print("here........."+reportpath)
print(prediction_map)
print(modelsavepath)
print(img_path)
print(train_data)
doc.setFont('Helvetica-Bold', 14)
doc.drawString(30, 700,"DIRECTORIES:")
doc.setFont('Helvetica', 7)
doc.drawString(30, 680, "Remote Sensing Image: " + img_path)
doc.drawString(30, 660, "Shape file: " + train_data)
doc.drawString(30, 640, "Report Save Path: "+ reportpath)
doc.drawString(30, 620, "Regression image saved to: " + prediction_map)
if modelsavepath !='/':
doc.drawString(30, 600, "Model saved to: "+modelsavepath)
else:
doc.drawString(30, 600, "Model saved to: Model not saved")
doc.line(20, 580, 570, 580)
n_samples = (TrainingData > 0).sum()
print('We have {n} training samples'.format(
n=n_samples)) # Subset the image dataset with the training image = X
roi_positions = np.where(TrainingData > 0)
if img.shape[0] > img.shape[1]:
fig = plt.figure(figsize=(5, 4))
plt.subplot(121)
print(img.shape)
plt.imshow(img[:, :, 0], cmap=plt.cm.Greys_r)
plt.scatter(roi_positions[1], roi_positions[0], marker='x', c='r')
plt.title('RS image - first band')
# plt.subplot(122)
# plt.imshow(TrainingData, cmap=plt.cm.Spectral) # data = roi && cmap = plt.cm.Spectral
# plt.title('Training Image')
# #plt.show()
imgdata = BytesIO()
fig.savefig(imgdata, format='svg', bbox_inches='tight', pad_inches=0)
imgdata.seek(0) # rewind the data
drawing = svg2rlg(imgdata)
renderPDF.draw(drawing, doc, 50, 160)
# else:
# renderPDF.draw(drawing, doc, 50, 250)
fig.clf()
plt.close(fig)
fig = plt.figure(figsize=(5, 4))
plt.imshow(prediction, cmap=plt.cm.Spectral)
plt.colorbar()
plt.title('Prediction')
# plt.show()
imgdata = BytesIO()
fig.savefig(imgdata, format='svg', bbox_inches='tight', pad_inches=0)
imgdata.seek(0) # rewind the data
drawing = svg2rlg(imgdata)
renderPDF.draw(drawing, doc, 400, 160)
fig.clf()
plt.close(fig)
doc.showPage()
if img.shape[0]<= img.shape[1]:
fig = plt.figure(figsize=(6, 4))
plt.subplot(121)
print(img.shape)
plt.imshow(img[:, :, 0], cmap=plt.cm.Greys_r)
plt.scatter(roi_positions[1], roi_positions[0], marker='x', c='r')
plt.title('RS image - first band')
# plt.subplot(122)
# plt.imshow(TrainingData, cmap=plt.cm.Spectral) # data = roi && cmap = plt.cm.Spectral
# plt.title('Training Image')
# #plt.show()
imgdata = BytesIO()
fig.savefig(imgdata, format='svg', bbox_inches='tight', pad_inches=0)
imgdata.seek(0) # rewind the data
drawing = svg2rlg(imgdata)
renderPDF.draw(drawing, doc, 50, 360)
# else:
# renderPDF.draw(drawing, doc, 50, 250)
fig.clf()
plt.close(fig)
fig = plt.figure(figsize=(5, 4))
plt.imshow(prediction, cmap=plt.cm.Spectral)
plt.colorbar()
plt.title('Prediction')
# plt.show()
imgdata = BytesIO()
fig.savefig(imgdata, format='svg', bbox_inches='tight', pad_inches=0)
imgdata.seek(0) # rewind the data
drawing = svg2rlg(imgdata)
renderPDF.draw(drawing, doc, 70, 60)
fig.clf()
plt.close(fig)
doc.showPage()
plt.imshow(img[:, :, 0], cmap=plt.cm.Greys_r)
plt.scatter(roi_positions[1], roi_positions[0], marker='x', c='r')
plt.title('RS image - first band')
plt.savefig(path + "RS image-first_band.png", dpi=300)
plt.clf()
plt.imshow(TrainingData, cmap=plt.cm.Spectral) # data = roi && cmap = plt.cm.Spectral
plt.title('Training Image')
plt.savefig(path + "Training_Image.png", dpi=300)
plt.clf()
plt.imshow(prediction, cmap=plt.cm.Spectral)
plt.colorbar()
plt.title('Prediction')
plt.savefig(path + "Prediction.png", dpi=300)
plt.clf()
settings_sns = {'axes.facecolor': 'white',
'axes.edgecolor': '0',
'axes.grid': True,
'axes.axisbelow': True,
'axes.labelcolor': '.15',
'figure.facecolor': 'white',
'grid.color': '.8',
'grid.linestyle': '--',
'text.color': '0',
'xtick.color': '0',
'ytick.color': '0',
'xtick.direction': 'in',
'ytick.direction': 'in',
'lines.solid_capstyle': 'round',
'patch.edgecolor': 'w',
'patch.force_edgecolor': True,
'image.cmap': 'Greys',
'font.family': ['serif'],
'font.sans-serif': ['Arial', 'Liberation Sans', 'DejaVu Sans', 'Bitstream Vera Sans',
'sans-serif'],
'xtick.bottom': True,
'xtick.top': True,
'ytick.left': True,
'ytick.right': True,
'axes.spines.left': True,
'axes.spines.bottom': True,
'axes.spines.right': True,
'axes.spines.top': True}
X = img[TrainingData > 0, :]
y = TrainingData[TrainingData > 0]
features = pd.DataFrame(X)
band_names = []
for i in range(X.shape[1]):
# for i in range(0,2500):
nband = "Band_" + str(i + 1)
band_names.append(nband)
# print("*******************")
# print(band_names)
# print("*******************")
features.columns = band_names
print('The shape of our features is:', features.shape)
print('The number of Spectra is:', features.shape[0])
print('The number of bands is:', features.shape[1])
features['value'] = y
# min_max_scaler = preprocessing.MinMaxScaler()
#
# xscaled = min_max_scaler.fit_transform(features)
# features_ = pd.DataFrame(xscaled)
#
# features_.transpose().plot(figsize=(20, 7))
# plt.legend(bbox_to_anchor=(0.1, -0.1), loc='upper left', ncol=7)
# plt.title('Reference Spectra')
# plt.plot()
#
# # # In[10]:
# Labels are the values we want to predict
labels =
|
np.array(features['value'])
|
numpy.array
|
import torch
from torch.utils.data import Dataset
import os
import json
from random import randint
from torch import nn
from torch.nn import functional as F
import pandas as pd
import numpy as np
import librosa
from tqdm import tqdm
def make_dataset(directory, class_to_idx):
instances = []
directory = os.path.expanduser(directory)
for target_class in sorted(class_to_idx.keys()):
class_index = class_to_idx[target_class]
target_dir = os.path.join(directory, target_class)
if not os.path.isdir(target_dir):
continue
for root, _, fnames in sorted(os.walk(target_dir, followlinks=True)):
for fname in sorted(fnames):
path = os.path.join(root, fname)
item = path, class_index
instances.append(item)
return instances
class RAVDESS_LANDMARK(Dataset):
def __init__(self, root, samples=None,min_frames=25,n_mels = 128,audio=False,audio_only=False,audio_separate=False, test=False, zero_start=False, contrastive=False, mixmatch=False, random_aug=False, drop_kp=False):
super(RAVDESS_LANDMARK, self).__init__()
self.root = root
classes, class_to_idx = self._find_classes(self.root)
if samples is None:
samples = make_dataset(self.root, class_to_idx)
if len(samples) == 0:
msg = "Found 0 files in subfolders of: {}\n".format(self.root)
raise RuntimeError(msg)
self.classes = classes
self.class_to_idx = class_to_idx
self.samples = samples
self.min_frames = min_frames
self.test = test
self.zero_start = zero_start
self.contrastive = contrastive
self.mixmatch = mixmatch
self.random_aug = random_aug
self.drop_kp = drop_kp
self.audio = audio
self.audio_only = audio_only
self.n_mels = n_mels
self.audio_separate = audio_separate
self.preprocess_landmark(audio=True)
if self.audio:
self.preprocess_audio(audio=True)
def __len__(self):
return len(self.samples)
def _find_classes(self, dir):
"""
Finds the class folders in a dataset.
Args:
dir (string): Root directory path.
Returns:
tuple: (classes, class_to_idx) where classes are relative to (dir), and class_to_idx is a dictionary.
Ensures:
No class is a subdirectory of another.
"""
classes = [d.name for d in os.scandir(dir) if d.is_dir()]
classes.sort()
class_to_idx = {cls_name: i for i, cls_name in enumerate(classes)}
return classes, class_to_idx
def preprocess_landmark(self,audio=False):
new_samples = []
print("preprocessing landmarks")
for idx in tqdm(range(len(self.samples))):
path_ld = self.samples[idx][0]
if audio:
kx = np.zeros((90,68))
ky = np.zeros((90,68))
else:
data = pd.read_csv(path_ld)
kx = data.iloc[:,297:365].to_numpy()
ky = data.iloc[:,365:433].to_numpy()
kx = (kx - np.min(kx))/np.ptp(kx)
ky = (ky - np.min(ky))/np.ptp(ky)
if self.audio:
new_samples.append(([kx,ky],self.samples[idx][1],self.samples[idx][2]))
else:
new_samples.append(([kx,ky],self.samples[idx][1]))
self.samples = new_samples
def preprocess_audio(self,audio=False):
new_samples = []
print("preprocessing audio")
for idx in tqdm(range(len(self.samples))):
path_audio = self.samples[idx][2]
with open(path_audio, 'rb') as f:
mel_spect = np.load(f)
len_seq = mel_spect.shape[0]
# use the db inside the graph does not help, performance are significately worst
if self.audio_separate:
mel_spect = librosa.power_to_db(mel_spect, ref=np.max)
if audio:
new_samples.append(([np.zeros((len_seq,68)),np.zeros((len_seq,68))],self.samples[idx][1],torch.Tensor(mel_spect)))
else:
new_samples.append((self.samples[idx][0],self.samples[idx][1],torch.Tensor(mel_spect)))
self.samples = new_samples
def get_class_sample_count(self):
count =np.zeros(len(self.classes), dtype=int)
for s in self.samples:
count[s[1]] +=1
weight = 1. /count
sample_weight = []
for s in self.samples:
sample_weight.append(weight[s[1]])
return count, torch.Tensor(sample_weight)
def rotate(self, out, origin=(0.5, 0.5), degrees=0):
out_rot = torch.Tensor([])
for p in out:
angle = np.deg2rad(degrees)
R = np.array([[np.cos(angle), -np.sin(angle)],
[np.sin(angle), np.cos(angle)]])
o = np.atleast_2d(origin)
p = np.atleast_2d(p)
p = np.squeeze((R @ (p.T-o.T) + o.T).T)
out_rot = torch.cat((out_rot,torch.Tensor(p).unsqueeze(0)))
return out_rot
def __getitem__(self, index: int):
target = self.samples[index][1]
kx,ky = self.samples[index][0][0], self.samples[index][0][1]
noise = torch.normal(0, 0.003, size=(51, 2))
if self.audio:
mel_spect = self.samples[index][2]
if self.audio_only:
ld = np.array([kx,ky]).T
elif not self.audio_separate:
mel_spect = mel_spect.mean(1).numpy()
mel_spect = np.array([np.repeat(mv,68) for mv in mel_spect])
ld = np.array([kx,ky,mel_spect]).T
else:
ld =
|
np.array([kx,ky])
|
numpy.array
|
import json
import numpy as np
import tkinter
from tkinter import *
from collections import namedtuple
from geometry import Cube, Point
class Scene(object):
def __init__(self):
self.distance = 1000
self.start_point = Point((0, 0, 0))
self.filled = False
self.load_scene()
def load_scene(self):
scene = []
scene.append(Cube((-10, -10, 40), 10, 'red'))
scene.append(Cube((10, -10, 40), 10, 'green'))
scene.append(Cube((10, -10, 60), 10, 'orange'))
scene.append(Cube((-10, -10, 60), 10, 'Turquoise'))
scene.append(Cube((-10, 10, 40), 10, 'Magenta'))
scene.append(Cube((10, 10, 40), 10, 'Lime'))
scene.append(Cube((10, 10, 60), 10, 'white'))
scene.append(Cube((-10, 10, 60), 10, 'yellow'))
self.scene_data = scene
self.render()
def move(self, axis, direction):
transform = np.array(
[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]],
dtype=float)
if axis == "x":
transform[0, 3] = direction
elif axis == "y":
transform[1, 3] = direction
else:
transform[2, 3] = direction
for poly in self.scene_data:
for point in poly.points:
point.transform(transform)
print(transform)
def zoom(self, close):
if close:
self.distance = self.distance + 50
#print(scene.distance)
else:
self.distance = self.distance - 50
print(scene.distance)
def turn(self, axis, direction):
angle = direction * 10 * np.pi / 180.
transform = np.array(
[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]],
dtype=float)
if axis == 'x':
transform[1:3, 1:3] = np.array([[np.cos(angle), -np.sin(angle)],
[np.sin(angle),
np.cos(angle)]])
elif axis == 'y':
transform[0:3, 0:3] = np.array([[np.cos(angle), 0,
np.sin(angle)], [0, 1, 0],
[-np.sin(angle), 0,
|
np.cos(angle)
|
numpy.cos
|
#!/usr/local/sci/bin/python2.7
#*****************************
#
# general utilities & classes for Python gridding.
#
#
#************************************************************************
'''
Author: <NAME>
Created: March 2016
Last update: 12 April 2016
Location: /project/hadobs2/hadisdh/marine/PROGS/Build
-----------------------
CODE PURPOSE AND OUTPUT
-----------------------
A set of class definitions and routines to help with the gridding of HadISDH Marine
-----------------------
LIST OF MODULES
-----------------------
None
-----------------------
DATA
-----------------------
None
-----------------------
HOW TO RUN THE CODE
-----------------------
All routines to be called from external scripts.
-----------------------
OUTPUT
-----------------------
None
-----------------------
VERSION/RELEASE NOTES
-----------------------
Version 2 (26 Sep 2016) <NAME>
---------
Enhancements
This now works with doNOWHOLE which is BC total but no data with whole number flags
Changes
Bug fixes
Version 2 (26 Sep 2016) <NAME>
---------
Enhancements
This can now work with the 3 QC iterations and BC options
This has a ShipOnly option in read_qc_data to pull through only ship data --ShipOnly
Bug fixed to work with ship only bias corrected data - platform_meta[:,2] rather than the QConly platform_meta[:,3]
Changes
Bug fixes
Possible bug fix in set_qc_flag_list
This had an incomplete list of QC flags for the full list and wasn't matching the QC flags up correctly.
This is now based on MDS_RWtools standard list.
Possible number of elements mistake in read_qc_data
This was causing an error where it was trying to treat 'None' as an intefer. I think it was miscounting the elements.
This is now based on MDS_RWtools standard list.
Version 1 (release date)
---------
Enhancements
Changes
Bug fixes
-----------------------
OTHER INFORMATION
-----------------------
'''
import os
import datetime as dt
import numpy as np
import sys
import argparse
import matplotlib
import struct
import netCDF4 as ncdf
import pdb
#*********************************************
class MetVar(object):
'''
Bare bones class for meteorological variable
'''
def __init__(self, name, long_name):
self.name = name
self.long_name = long_name
def __str__(self):
return "variable: {}, long_name: {}".format(self.name, self.long_name)
__repr__ = __str__
#*********************************************
class TimeVar(object):
'''
Bare bones class for times
'''
def __init__(self, name, long_name, units, standard_name):
self.name = name
self.long_name = long_name
self.units = units
self.standard_name = standard_name
def __str__(self):
return "time: {}, long_name: {}, units: {}".format(self.name, self.long_name, self.units)
__repr__ = __str__
#*****************************************************
# KATE modified - added BC options
#def set_qc_flag_list(doBC = False, doUncert = False):
def set_qc_flag_list(doBC = False, doBCtotal = False, doBChgt = False, doBCscn = False, doNOWHOLE = False, doUncert = False):
# end
'''
Set the QC flags present in the raw data
:param bool doBC: run for bias corrected data
:param bool doUncert: work on files with uncertainty information (not currently used)
:returns: QC_FLAGS - np string array
'''
# KATE modified - added BC options
# if doBC:
if doBC | doBCtotal | doBChgt | doBCscn | doNOWHOLE:
# end
# reduced number of QC flags.
return np.array(["day","land","trk","date1","date2","pos","blklst","dup",\
"SSTbud","SSTclim","SSTnonorm","SSTfreez","SSTrep",\
"ATbud","ATclim","ATnonorm","ATround","ATrep",\
"DPTbud","DPTclim","DPTssat","DPTround","DPTrep","DPTrepsat"])
else:
# KATE modified - this doesn't seem to be working and I can't quite see how the subset listed below would work without any former subsetting of the read in data
# This now uses the complete list from MDS_RWtools.py standard version
# full list
return np.array(["day","land","trk","date1","date2","pos","blklst","dup","POSblank1",\
"SSTbud","SSTclim","SSTnonorm","SSTfreez","SSTnoval","SSTnbud","SSTbbud","SSTrep","SSTblank",\
"ATbud","ATclim","ATnonorm","ATblank1","ATnoval","ATround","ATbbud","ATrep","ATblank2",\
"DPTbud","DPTclim","DPTnonorm","DPTssat","DPTnoval","DPTround","DPTbbud","DPTrep","DPTrepsat",\
"few","ntrk","POSblank2","POSblank3","POSblank4","POSblank5","POSblank6","POSblank7"]) # set_qc_flag_list
# # full number
# return np.array(["day","land","trk","date1","date2","pos","blklst","dup",\
#"SSTbud","SSTclim","SSTnonorm","SSTfreez","SSTrep",\
#"ATbud","ATclim","ATnonorm","ATnoval","ATround","ATrep",\
#"DPTbud","DPTclim","DPTnonorm","DPTssat","DPTnoval","DPTround","DPTrep","DPTrepsat"]) # set_qc_flag_list
# end
# RD - kept original flag array here just in case MDS_RWtools isn't used before next read
#np.array(["day","land","trk","date1","date2","pos","blklst","dup","POSblank1",\
#"SSTbud","SSTclim","SSTnonorm","SSTfreez","SSTnoval","SSTnbud","SSTbbud","SSTrep","SSTblank",\
#"ATbud","ATclim","ATnonorm","ATblank1","ATnoval","ATnbud","ATbbud","ATrep","ATblank2",\
#"DPTbud","DPTclim","DPTnonorm","DPTssat","DPTnoval","DPTnbud","DPTbbud","DPTrep","DPTrepsat",\
#"few","ntrk","DUMblank1","DUMblank2","DUMblank3","DUMblank4","DUMblank5","DUMblank6"])
#*****************************************************
# KATE modified - added BC options
#def read_qc_data(filename, location, fieldwidths, doBC = False):
def read_qc_data(filename, location, fieldwidths, doBC = False, doBCtotal = False, doBChgt = False, doBCscn = False, doNOWHOLE = False, ShipOnly = False):
# end
"""
Read in the QC'd data and return
Expects fixed field format
http://stackoverflow.com/questions/4914008/efficient-way-of-parsing-fixed-width-files-in-python
:param str filename: filename to read
:param str location: location of file
:param str fieldwidths: fixed field widths to use
:param bool doBC: run on the bias corrected data
# KATE modified - added BC options
:param bool doBCtotal: run on the full bias corrected data
:param bool doBChgt: run on the height only bias corrected data
:param bool doBCscn: run on the screen only bias corrected data
# end
:param bool doNOWHOLE: run on the bias corrected data with no whole number flags set
# KATE modified - added BC options
:param bool ShipOnly: select only ship platform (0:5) data
# end
:returns: data - np.array of string data
"""
fmtstring = ''.join('%ds' % f for f in fieldwidths)
parse = struct.Struct(fmtstring).unpack_from
platform_data = []
platform_meta = []
platform_obs = []
platform_qc = []
# pdb.set_trace()
with open(os.path.join(location, filename), 'r') as infile:
for line in infile:
try:
if doBC:
# some lines might not be the correct length
assert len(line) == 751
fields = parse(line)
# now unpack and process
platform_data += [fields[: 8]]
dummy_obs = [fields[8: 8+18]] # used to help counting the fields
platform_obs += [fields[8+18: 8+18+14]] # the ???tbc fields
dummy_obs = [fields[8+18+14: 8+18+14+14+14+14]] # ditto
platform_meta += [fields[8+18+14+14+14+14: 8+18+14+14+14+14+12]]
platform_qc += [fields[8+18+14+14+14+14+12:]]
# KATE modified - added BC options
elif doBCtotal | doNOWHOLE:
# some lines might not be the correct length
assert len(line) == 751
fields = parse(line)
# now unpack and process
platform_data += [fields[: 8]]
dummy_obs = [fields[8: 8+18]] # used to help counting the fields
platform_obs += [fields[8+18: 8+18+14]] # the ???tbc fields
dummy_obs = [fields[8+18+14: 8+18+14+14+14+14]] # ditto
platform_meta += [fields[8+18+14+14+14+14: 8+18+14+14+14+14+12]] # 3rd element is PT
platform_qc += [fields[8+18+14+14+14+14+12:]]
elif doBChgt:
# some lines might not be the correct length
assert len(line) == 751
fields = parse(line)
# now unpack and process
platform_data += [fields[: 8]]
dummy_obs = [fields[8: 8+18+14]] # used to help counting the fields
platform_obs += [fields[8+18+14: 8+18+14+14]] # the ???tbc fields
dummy_obs = [fields[8+18+14+14: 8+18+14+14+14+14]] # ditto
platform_meta += [fields[8+18+14+14+14+14: 8+18+14+14+14+14+12]] # 3rd element is PT
platform_qc += [fields[8+18+14+14+14+14+12:]]
elif doBCscn:
# some lines might not be the correct length
assert len(line) == 751
fields = parse(line)
# now unpack and process
platform_data += [fields[: 8]]
dummy_obs = [fields[8: 8+18+14+14]] # used to help counting the fields
platform_obs += [fields[8+18+14+14: 8+18+14+14+14]] # the ???tbc fields
dummy_obs = [fields[8+18+14+14+14: 8+18+14+14+14+14]] # ditto
platform_meta += [fields[8+18+14+14+14+14: 8+18+14+14+14+14+12]] # 3rd element is PT
platform_qc += [fields[8+18+14+14+14+14+12:]]
# end
else:
# some lines might not be the correct length
assert len(line) == 410
fields = parse(line)
# now unpack and process
platform_data += [fields[: 8]]
platform_obs += [fields[8: 8+17]]
# KATE modified - this seems to be wrong
platform_meta += [fields[8+17: 8+17+30]] # 4th element is PT
platform_qc += [fields[8+17+30:]]
#platform_meta += [fields[8+17: 8+17+20]]
#platform_qc += [fields[8+17+20:]]
# end
except AssertionError:
print("skipping line in {} - malformed data".format(filename))
print(line)
except OSError:
print("file {} missing".format(filename))
sys.exit()
# convert to arrays
platform_qc = np.array(platform_qc)
platform_obs = np.array(platform_obs)
platform_meta = np.array(platform_meta)
platform_data = np.array(platform_data)
# KATE modified - copied out as no longer needed for I300 run - already removed PT = 14 in make_and_full_qc.py
# SHOULD HAVE BEEN platform_meta[:,3] anyway!!! so we have accidentally removed any SIDs of 14 - there are some!!!
# # filter PT=14
# PT = np.array([int(x) for x in platform_meta[:,2]])
#
# goods, = np.where(PT != 14)
# # should no longer be needed but retained for completeness
# end
# KATE modified - if ShipOnly is set then pull out only ship data
if ShipOnly:
# filter PT=0:5 only
# If its a BC run then PT is element 2 (3rd) but if its QC only then PT is element 3 (4th)
if doBC | doBCtotal | doBChgt | doBCscn | doNOWHOLE:
PT = np.array([int(x) for x in platform_meta[:,2]])
else:
PT = np.array([int(x) for x in platform_meta[:,3]])
goods, = np.where(PT <= 5)
print("Pulling out SHIPS only ",len(goods))
return platform_data[goods], \
platform_obs[goods].astype(int), \
platform_meta[goods], \
platform_qc[goods].astype(int) # read_qc_data
else:
return platform_data, \
platform_obs.astype(int), \
platform_meta, \
platform_qc.astype(int) # read_qc_data
# end
# KATE modified - copmmented out because above loops make this redundant
# return platform_data[goods], \
# platform_obs[goods].astype(int), \
# platform_meta[goods], \
# platform_qc[goods].astype(int) # read_qc_data
# end
#*****************************************************
# UNC NEW for doBCtotal only
def read_unc_data(filename, location, fieldwidths,
doUSLR = False, doUSCN = False, doUHGT = False, doUR = False, doUM = False, doUC = False, doUTOT = False,
ShipOnly = False):
"""
Read in the uncertainty data and return
Expects fixed field format
:param str filename: filename to read
:param str location: location of file
:param str fieldwidths: fixed field widths to use
# UNC NEW
:param bool doUSLR: work on BC and solar adj uncertainty with correlation
:param bool doUSCN: work on BC and instrument adj uncertainty with correlation
:param bool doUHGT: work on BC and height adj uncertainty with correlation
:param bool doUR: work on BC and rounding uncertainty with no correlation
:param bool doUM: work on BC and measurement uncertainty with no correlation
:param bool doUC: work on BC and climatological uncertainty with no correlation
:param bool doUTOT: work on BC and total uncertainty with no correlation
:param bool ShipOnly: select only ship platform (0:5) data
:returns: data - np.array of string data
"""
fmtstring = ''.join('%ds' % f for f in fieldwidths)
parse = struct.Struct(fmtstring).unpack_from
platform_meta = []
uncSLR_data = []
uncSCN_data = []
uncHGT_data = []
uncR_data = []
uncM_data = []
uncC_data = []
uncTOT_data = []
with open(os.path.join(location, filename), 'r') as infile:
for line in infile:
try:
# some lines might not be the correct length
assert len(line) == 1055
fields = parse(line)
# now unpack and process
dummy_obs = [fields[: 8+14]] # skip over these ones
uncTOT_data += [fields[8+14 : 8+14+14]]
uncSLR_data += [fields[8+14+14 : 8+14+14+14]]
uncSCN_data += [fields[8+14+14+14 : 8+14+14+14+14]]
uncHGT_data += [fields[8+14+14+14+14 : 8+14+14+14+14+14]]
uncM_data += [fields[8+14+14+14+14+14 : 8+14+14+14+14+14+14]]
uncR_data += [fields[8+14+14+14+14+14+14 : 8+14+14+14+14+14+14+14]]
uncC_data += [fields[8+14+14+14+14+14+14+14 : 8+14+14+14+14+14+14+14+14]]
platform_meta += [fields[8+14+14+14+14+14+14+14+14 : 8+14+14+14+14+14+14+14+14+3]]
# there are more elements but we do not need them
except AssertionError:
print("skipping line in {} - malformed data".format(filename))
print(line)
except OSError:
print("file {} missing".format(filename))
sys.exit()
# convert to arrays
platform_meta = np.array(platform_meta)
uncSLR_data = np.array(uncSLR_data)
uncSCN_data = np.array(uncSCN_data)
uncHGT_data = np.array(uncHGT_data)
uncR_data = np.array(uncR_data)
uncM_data = np.array(uncM_data)
uncC_data = np.array(uncC_data)
uncTOT_data = np.array(uncTOT_data)
# KATE modified - if ShipOnly is set then pull out only ship data
if ShipOnly:
# filter PT=0:5 only
# If its a BC (ext or unc) run then PT is element 2 (3rd) but if its QC only then PT is element 3 (4th)
PT = np.array([int(x) for x in platform_meta[:,2]])
goods, =
|
np.where(PT <= 5)
|
numpy.where
|
class DelaunayTriangulation:
def __init__(self, convex_covering_space):
self.__convex_covering_space = convex_covering_space
self.__coordinates_to_points = None
self.__coordinates_number = 0
self.__points_to_coordinates = None
self.__points = None
self.__convex_hull = None
def add_points(self, points, restart=False):
import numpy as np
coordinates, c2p, p2c = self.__convex_covering_space.coordinates(points)
self.__points = self.__convex_covering_space.extend(self.__points, points)
if self.__convex_hull is None:
from scipy.spatial import ConvexHull
if self.__convex_covering_space.infinity is not None:
coordinates = np.vstack((self.__convex_covering_space.infinity, coordinates))
self.__coordinates_to_points = np.hstack(([0], c2p + 1)) - 1
self.__points_to_coordinates = p2c + 1
else:
self.__coordinates_to_points = c2p
self.__points_to_coordinates = p2c
self.__convex_hull = ConvexHull(coordinates, incremental=True)
else:
self.__coordinates_to_points = np.hstack((self.__coordinates_to_points, c2p + self.__points_number))
self.__points_to_coordinates =
|
np.hstack((self.__points_to_coordinates, p2c + self.__coordinates_number))
|
numpy.hstack
|
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
from builtins import range
from builtins import object
import scipy.interpolate
import pdb
import scipy.stats.mstats as mstats
import numpy as np
class Cl_class(object):
def __init__(self, block, config):
# Spectra to use
self.shear = config['shear']
self.intrinsic_alignments = (
config['intrinsic_alignments'], config['GI'], config['II'])
self.position = config['position']
self.ggl = config['ggl']
self.magnification = config['magnification']
self.cmb_kappa = config['cmb_kappa']
self.kappa_shear = config['kappa_shear']
self.kappa_pos = config['kappa_pos']
self.noise = config['noise']
self.bias = config['bias'][0]
self.m_per_bin = config['bias'][1]
shear_cat = config['shear_cat']
pos_cat = config['pos_cat']
self.dobinning = config['binning']
self.window = config['window']
# Relevant parameters for the noise
if self.noise:
self.sigma_gamma = config['shape_dispersion']
self.ngal_shear, self.ngal_pos = config['ngal']
# And the configuration of the theory spectra
self.Nzbin_shear = int(block.get_int(shear_cat, 'nzbin', default=0))
self.Nzbin_pos = int(block.get_int(shear_cat, 'nzbin', default=0))
self.Nzbin_cmb = 1.
if self.shear:
self.zbin_edges_shear = [block[shear_cat, 'edge_%d' % i]
for i in range(1, self.Nzbin_shear + 1)]
if self.intrinsic_alignments[0]:
gg_section = 'shear_cl_gg'
else:
gg_section = 'shear_cl'
self.l_shear = block[gg_section, 'ell']
self.Nl_shear = len(self.l_shear)
if self.dobinning:
self.Nlbin_shear = int(config['nlbin_shear'])
else:
self.Nlbin_shear = self.Nl_shear
if self.position:
self.zbin_edges_pos = [block[pos_cat, 'edge_%d' % i]
for i in range(1, self.Nzbin_pos + 1)]
self.l_pos = block['matter_cl', 'ell']
self.Nl_pos = len(self.l_pos)
if self.dobinning:
self.Nlbin_pos = int(config['nlbin_pos'])
else:
self.Nlbin_pos = self.Nl_pos
if self.ggl:
self.l_ggl = block['matter_cl', 'ell']
self.Nl_ggl = len(self.l_ggl)
if self.dobinning:
self.Nlbin_ggl = int(config['nlbin_ggl'])
else:
self.Nlbin_ggl = self.Nl_ggl
if self.cmb_kappa:
self.l_kk = block['cmb_kappa_cl', 'ell']
self.Nl_kk = len(self.l_pos)
if self.dobinning:
self.Nlbin_kk = int(config['nlbin_kk'])
else:
self.Nlbin_kk = self.Nl_kk
if self.kappa_shear:
self.l_ke = block['cmb_kappa_shear_cl', 'ell']
self.Nl_ke = len(self.l_pos)
if self.dobinning:
self.Nlbin_ke = int(config['nlbin_ke'])
else:
self.Nlbin_ke = self.Nl_ke
if self.kappa_pos:
self.l_kn = block['cmb_kappa_matter_cl', 'ell']
self.Nl_kn = len(self.l_kn)
if self.dobinning:
self.Nlbin_kn = int(config['nlbin_kn'])
else:
self.Nlbin_kn = self.Nl_kn
if self.bias:
self.multiplicative_bias = [block[shear_cat, "m%d" % i]
for i in range(1, self.Nzbin_shear + 1)]
# Finally get the desired binning
self.get_l_bins(config)
def load_and_generate_observable_cls(self, block, names):
# Set up somewhere to put the observable spectra
if self.shear:
self.C_ee = np.zeros(
(self.Nzbin_shear, self.Nzbin_shear, self.Nl_shear))
self.C_ee_binned = np.zeros(
(self.Nzbin_shear, self.Nzbin_shear, self.Nlbin_shear))
if self.position:
self.C_nn = np.zeros((self.Nzbin_pos, self.Nzbin_pos, self.Nl_pos))
self.C_nn_binned = np.zeros(
(self.Nzbin_pos, self.Nzbin_pos, self.Nlbin_pos))
if self.ggl:
self.C_ne = np.zeros(
(self.Nzbin_pos, self.Nzbin_shear, self.Nl_pos))
self.C_ne_binned = np.zeros(
(self.Nzbin_pos, self.Nzbin_shear, self.Nlbin_ggl))
# Then cycle through all the redshift bin combinations
if self.shear:
for i in range(1, self.Nzbin_shear + 1):
for j in range(1, self.Nzbin_shear + 1):
bin = "bin_%d_%d" % (i, j)
bin_tr = "bin_%d_%d" % (j, i)
# The C_GG,II,mm,gg spectra are symmetric
# This is just bookkeeping to account for the fact we only have half of them
if (j < i):
a = bin
else:
a = bin_tr
# GG
if self.intrinsic_alignments[0]:
self.C_ee[i - 1][j -
1] += block.get_double_array_1d("shear_cl_gg", a)
if self.intrinsic_alignments[1]:
# GI
self.C_ee[i - 1][j -
1] += block.get_double_array_1d("shear_cl_gi", bin)
# IG
self.C_ee[i - 1][j -
1] += block.get_double_array_1d("shear_cl_gi", bin_tr)
if self.intrinsic_alignments[2]:
# II
self.C_ee[i - 1][j -
1] += block.get_double_array_1d("shear_cl_ii", a)
else:
self.C_ee[i - 1][j -
1] += block.get_double_array_1d("shear_cl", a)
block["galaxy_shape_cl_unbinned", "ell"] = block.get_double_array_1d(
"shear_cl_gg", "ell")
if self.position:
for i in range(1, self.Nzbin_pos + 1):
for j in range(1, self.Nzbin_pos + 1):
bin = "bin_%d_%d" % (i, j)
bin_tr = "bin_%d_%d" % (j, i)
# The C_GG,II,mm,gg spectra are symmetric
# This is just bookkeeping to account for the fact we only have half of them
if (j < i):
a = bin
else:
a = bin_tr
# gg
self.C_nn[i - 1][j -
1] += block.get_double_array_1d('matter_cl', a)
if self.magnification:
# mg
self.C_nn[i - 1][j - 1] += block.get_double_array_1d(
"galaxy_magnification_cl", bin)
self.C_nn[i - 1][j - 1] += block.get_double_array_1d(
"galaxy_magnification_cl", bin_tr) # gm
self.C_nn[i - 1][j - 1] += block.get_double_array_1d(
"magnification_magnification_cl", a) # mm
block["galaxy_position_cl_unbinned",
"ell"] = block.get_double_array_1d("matter_cl", "ell")
if self.ggl:
block["galaxy_position_shape_cross_cl_unbinned",
"ell"] = block.get_double_array_1d("matter_cl", "ell")
for i in range(1, self.Nzbin_pos + 1):
for j in range(1, self.Nzbin_shear + 1):
bin = "bin_%d_%d" % (i, j)
bin_tr = "bin_%d_%d" % (j, i)
# The C_GG,II,mm,gg spectra are symmetric
# This is just bookkeeping to account for the fact we only have half of them
if (j < i):
a = bin
else:
a = bin_tr
if self.ggl:
# gG
self.C_ne[i - 1][j -
1] += block.get_double_array_1d("ggl_cl", bin)
if self.intrinsic_alignments[0]:
# gI
self.C_ne[i - 1][j -
1] += block.get_double_array_1d("gal_IA_cross_cl", bin)
if self.magnification:
self.C_ne[i - 1][j - 1] += block.get_double_array_1d(
"magnification_intrinsic_cl", bin) # mI
if self.magnification:
# mG
self.C_ne[i - 1][j - 1] += block.get_double_array_1d(
"magnification_shear_cl", bin)
if not self.noise:
# Finally resample the spectra in the survey angular frequency bins
if self.shear:
self.C_ee_binned[i - 1][j - 1] = get_binned_cl(self.C_ee[i - 1][j - 1], self.l_shear,
self.lbin_edges_shear, self.dobinning, self.window)
if self.bias:
self.apply_measurement_bias(i, j, 'shear')
block["galaxy_shape_cl_unbinned",
a] = self.C_ee[i - 1][j - 1]
if self.position:
self.C_nn_binned[i - 1][j - 1] = get_binned_cl(self.C_nn[i - 1][j - 1], self.l_pos,
self.lbin_edges_pos, self.dobinning, self.window)
block["galaxy_position_cl_UNBINNED",
a] = self.C_nn[i - 1][j - 1]
if self.ggl:
self.C_ne_binned[i - 1][j - 1] = get_binned_cl(self.C_ne[i - 1][j - 1], self.l_ggl,
self.lbin_edges_pos, self.dobinning, self.window)
if self.bias:
self.apply_measurement_bias(i, j, 'ggl')
block["galaxy_position_shape_cross_cl_unbinned",
a] = self.C_ne[i - 1][j - 1]
if self.noise:
# Add shot noise if required
self.add_noise(block)
# If noise was added earlier, the binning is done here rather than
# immediately on loading
if self.shear:
for i in range(1, self.Nzbin_shear + 1):
for j in range(1, self.Nzbin_shear + 1):
self.C_ee_binned[i - 1][j - 1] = get_binned_cl(self.C_ee[i - 1][j - 1], self.l_shear,
self.lbin_edges_shear, self.dobinning, self.window)
if self.bias:
self.apply_measurement_bias(i, j, 'shear')
block["galaxy_shape_cl_unbinned", "bin_%d_%d" %
(i, j)] = self.C_ee[i - 1][j - 1]
if self.position:
for i in range(1, self.Nzbin_pos + 1):
for j in range(1, self.Nzbin_pos + 1):
self.C_nn_binned[i - 1][j - 1] = get_binned_cl(self.C_nn[i - 1][j - 1], self.l_pos,
self.lbin_edges_pos, self.dobinning, self.window)
block["galaxy_position_cl_UNBINNED", "bin_%d_%d" %
(i, j)] = self.C_nn[i - 1][j - 1]
print(a)
if self.ggl:
for i in range(1, self.Nzbin_pos + 1):
for j in range(1, self.Nzbin_shear + 1):
self.C_ne_binned[i - 1][j - 1] = get_binned_cl(self.C_ne[i - 1][j - 1], self.l_ggl,
self.lbin_edges_pos, self.dobinning, self.window)
if self.bias:
self.apply_measurement_bias(i, j, 'ggl')
block["galaxy_position_shape_cross_cl_unbinned",
"bin_%d_%d" % (i, j)] = self.C_ne[i - 1][j - 1]
def apply_measurement_bias(self, i, j, mode=None):
if not self.m_per_bin:
m0 = self.multiplicative_bias[0]
# Compute scaling parameter for this pair of redshift bins
if self.m_per_bin:
mi = self.multiplicative_bias[i - 1]
mj = self.multiplicative_bias[j - 1]
else:
mi, mj = m0, m0
# Apply scaling
if mode == 'shear':
self.C_ee_binned[i - 1][j - 1] *= (1 + mi) * (1 + mj)
if mode == 'ggl':
self.C_ne_binned[i - 1][j - 1] *= (1 + mj)
def add_noise(self, block):
n_binned_shear = get_binned_number_densities(
self.Nzbin_shear, self.ngal_shear)
n_binned_pos = get_binned_number_densities(
self.Nzbin_pos, self.ngal_pos)
# Create noise matrices with the same shape as the Cls
# These are diagonal in the x,z plane (fixed l) and constant along the y axis (constant redshift)
N_ee_0 = np.identity(self.Nzbin_shear) * \
self.sigma_gamma**2 / (2. * n_binned_shear)
N_nn_0 = np.identity(self.Nzbin_pos) * 1. / n_binned_pos
N_shot_ee = []
N_shot_nn = []
if self.shear:
for i in range(len(self.C_ee[0][0])):
N_shot_ee += [N_ee_0]
N_shot_ee = np.swapaxes(N_shot_ee, 0, 2)
N_shot_ee = np.swapaxes(N_shot_ee, 0, 1)
if self.position:
for i in range(len(self.C_nn[0][0])):
N_shot_nn += [N_nn_0]
N_shot_nn = np.swapaxes(N_shot_nn, 0, 2)
N_shot_nn = np.swapaxes(N_shot_nn, 0, 1)
# Then add the relevant noise to the Cl matrices
if self.shear:
self.C_ee += N_shot_ee
if self.position:
self.C_nn += N_shot_nn
def get_l_bins(self, config):
if self.dobinning:
# Define some l bins for these galaxy samples
lmin, lmax = config['lmin_shear'], config['lmax_shear']
if self.shear:
self.lbin_edges_shear = np.logspace(
np.log10(lmin), np.log10(lmax), self.Nlbin_shear + 1)
self.l_bins_shear = np.zeros_like(self.lbin_edges_shear[:-1])
if self.window == 'tophat':
self.l_bins_shear = np.exp(
(np.log(self.lbin_edges_shear[1:] * self.lbin_edges_shear[:-1])) / 2.0)
elif self.window == 'tophat-arithmetic':
self.l_bins_shear = (
self.lbin_edges_shear[1:] + self.lbin_edges_shear[:-1]) / 2.0
elif self.window == 'delta':
# Just take the mid point of each bin and sample the Cls there
for i in range(len(self.lbin_edges_shear) - 1):
lmin0 = self.lbin_edges_shear[i]
lmax0 = self.lbin_edges_shear[i + 1]
sel = (self.l_shear > lmin0) & (self.l_shear < lmax0)
l_in_window = self.l_shear[sel]
self.l_bins_shear[i] = l_in_window[len(
l_in_window) / 2]
if self.position:
lmin, lmax = config['lmin_pos'], config['lmax_pos']
self.lbin_edges_pos = np.logspace(
np.log10(lmin), np.log10(lmax), self.Nlbin_pos + 1)
self.l_bins_pos = np.zeros_like(self.lbin_edges_pos[:-1])
if self.window == 'tophat':
self.l_bins_pos = np.exp(
(np.log(self.lbin_edges_pos[1:] * self.lbin_edges_pos[:-1])) / 2.0)
elif self.window == 'tophat-arithmetic':
self.l_bins_pos = (
self.lbin_edges_pos[1:] + self.lbin_edges_pos[:-1]) / 2.0
elif self.window == 'delta':
for i in range(len(self.lbin_edges_pos) - 1):
lmin0 = self.lbin_edges_pos[i]
lmax0 = self.lbin_edges_pos[i + 1]
sel = (self.l_pos > lmin0) & (self.l_pos < lmax0)
l_in_window = self.l_pos[sel]
self.l_bins_pos[i] = l_in_window[len(l_in_window) / 2]
if self.position and self.shear:
lmin, lmax = config['lmin_ggl'], config['lmax_ggl']
self.lbin_edges_ggl = np.logspace(
np.log10(lmin), np.log10(lmax), self.Nlbin_ggl + 1)
self.l_bins_ggl = np.zeros_like(self.lbin_edges_ggl[:-1])
lmin, lmax = config['lmin_ggl'], config['lmax_ggl']
self.lbin_edges_ggl = np.logspace(
np.log10(lmin), np.log10(lmax), self.Nlbin_ggl + 1)
if self.window == 'tophat':
self.l_bins_ggl = np.exp(
(np.log(self.lbin_edges_ggl[1:] * self.lbin_edges_ggl[:-1])) / 2.0)
elif self.window == 'tophat-arithmetic':
self.l_bins_ggl = (
self.lbin_edges_ggl[1:] + self.lbin_edges_ggl[:-1]) / 2.0
elif self.window == 'delta':
for i in range(len(self.lbin_edges_ggl) - 1):
lmin0 = self.lbin_edges_ggl[i]
lmax0 = self.lbin_edges_ggl[i + 1]
sel = (self.l_ggl > lmin0) & (self.l_ggl < lmax0)
l_in_window = self.l_ggl[sel]
self.l_bins_ggl[i] = l_in_window[len(l_in_window) / 2]
if self.cmb_kappa:
lmin, lmax = config['lmin_cmb_kappa'], config['lmax_cmb_kappa']
self.lbin_edges_kk = np.linspace(lmin, lmax, self.Nlbin_kk + 1)
self.l_bins_kk = np.zeros_like(self.lbin_edges_kk[:-1])
if self.window == 'tophat':
self.l_bins_kk = np.exp(
(np.log(self.lbin_edges_kk[1:] * self.lbin_edges_kk[:-1])) / 2.0)
elif self.window == 'tophat-arithmetic':
self.l_bins_kk = (
self.lbin_edges_kk[1:] + self.lbin_edges_kk[:-1]) / 2.0
elif self.window == 'delta':
for i in range(len(self.lbin_edges_kk) - 1):
lmin0 = self.lbin_edges_kk[i]
lmax0 = self.lbin_edges_kk[i + 1]
sel = (self.l_kk > lmin0) & (self.l_kk < lmax0)
l_in_window = self.l_kk[sel]
self.l_bins_kk[i] = l_in_window[len(l_in_window) / 2]
if self.kappa_shear:
lmin, lmax = config['lmin_kappa_shear'], config['lmax_kappa_shear']
self.lbin_edges_ke = np.linspace(lmin, lmax, self.Nlbin_ke + 1)
self.l_bins_ke = np.zeros_like(self.lbin_edges_ke[:-1])
if self.window == 'tophat':
self.l_bins_ke = np.exp(
(
|
np.log(self.lbin_edges_ke[1:] * self.lbin_edges_ke[:-1])
|
numpy.log
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import contextlib
import numpy as np
from decorator_helper import prog_scope
import inspect
from six.moves import filter
import paddle
import paddle.fluid as fluid
from paddle.fluid.layers.device import get_places
import paddle.fluid.nets as nets
from paddle.fluid.framework import Program, program_guard, default_main_program
from paddle.fluid.param_attr import ParamAttr
from paddle.fluid import core
from paddle.fluid.initializer import Constant
import paddle.fluid.layers as layers
from test_imperative_base import new_program_scope
from paddle.fluid.dygraph import nn
from paddle.fluid.dygraph import base
from paddle.fluid.dygraph import to_variable
class LayerTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.seed = 111
@classmethod
def tearDownClass(cls):
pass
def _get_place(self, force_to_use_cpu=False):
# this option for ops that only have cpu kernel
if force_to_use_cpu:
return core.CPUPlace()
else:
if core.is_compiled_with_cuda():
return core.CUDAPlace(0)
return core.CPUPlace()
@contextlib.contextmanager
def static_graph(self):
with new_program_scope():
fluid.default_startup_program().random_seed = self.seed
fluid.default_main_program().random_seed = self.seed
yield
def get_static_graph_result(self,
feed,
fetch_list,
with_lod=False,
force_to_use_cpu=False):
exe = fluid.Executor(self._get_place(force_to_use_cpu))
exe.run(fluid.default_startup_program())
return exe.run(fluid.default_main_program(),
feed=feed,
fetch_list=fetch_list,
return_numpy=(not with_lod))
@contextlib.contextmanager
def dynamic_graph(self, force_to_use_cpu=False):
with fluid.dygraph.guard(
self._get_place(force_to_use_cpu=force_to_use_cpu)):
fluid.default_startup_program().random_seed = self.seed
fluid.default_main_program().random_seed = self.seed
yield
class TestLayer(LayerTest):
def test_custom_layer_with_kwargs(self):
class CustomLayer(fluid.Layer):
def __init__(self, input_size, linear1_size=4):
super(CustomLayer, self).__init__()
self.linear1 = nn.Linear(
input_size, linear1_size, bias_attr=False)
self.linear2 = nn.Linear(linear1_size, 1, bias_attr=False)
def forward(self, x, do_linear2=False):
ret = self.linear1(x)
if do_linear2:
ret = self.linear2(ret)
return ret
with self.dynamic_graph():
inp = np.ones([3, 3], dtype='float32')
x = base.to_variable(inp)
custom = CustomLayer(input_size=3, linear1_size=2)
ret = custom(x, do_linear2=False)
self.assertTrue(np.array_equal(ret.numpy().shape, [3, 2]))
ret = custom(x, do_linear2=True)
self.assertTrue(np.array_equal(ret.numpy().shape, [3, 1]))
def test_linear(self):
inp = np.ones([3, 32, 32], dtype='float32')
with self.static_graph():
t = layers.data(
name='data',
shape=[3, 32, 32],
dtype='float32',
append_batch_size=False)
linear = nn.Linear(
32, 4, bias_attr=fluid.initializer.ConstantInitializer(value=1))
ret = linear(t)
static_ret = self.get_static_graph_result(
feed={'data': inp}, fetch_list=[ret])[0]
with self.dynamic_graph():
t = base.to_variable(inp)
linear = nn.Linear(
32, 4, bias_attr=fluid.initializer.ConstantInitializer(value=1))
dy_ret = linear(t)
dy_ret_value = dy_ret.numpy()
self.assertTrue(np.array_equal(static_ret, dy_ret_value))
def test_layer_norm(self):
inp =
|
np.ones([3, 32, 32], dtype='float32')
|
numpy.ones
|
"""Interactive phaseogram."""
import warnings
import copy
import argparse
from stingray.pulse.pulsar import fold_events, pulse_phase, get_TOA
from stingray.utils import assign_value_if_none
from stingray.events import EventList
import numpy as np
from scipy.signal import savgol_filter
from scipy import optimize
from astropy.stats import poisson_conf_interval
from astropy import log
from .io import load_events, filter_energy
try:
from tqdm import tqdm as show_progress
except ImportError:
def show_progress(a):
return a
try:
import pint.toa as toa
# import pint
HAS_PINT = True
except ImportError:
warnings.warn(
"PINT is not installed. "
"Some pulsar functionality will not be available"
)
HAS_PINT = False
from .base import deorbit_events
def _load_and_prepare_TOAs(mjds, errs_us=None, ephem="DE405"):
errs_us = assign_value_if_none(errs_us, np.zeros_like(mjds))
toalist = [None] * len(mjds)
for i, m in enumerate(mjds):
toalist[i] = toa.TOA(
m, error=errs_us[i], obs="Barycenter", scale="tdb"
)
toalist = toa.TOAs(toalist=toalist)
if "tdb" not in toalist.table.colnames:
toalist.compute_TDBs()
if "ssb_obs_pos" not in toalist.table.colnames:
toalist.compute_posvels(ephem, False)
return toalist
def create_template_from_profile_sins(
phase, profile, profile_err, imagefile="template.png", norm=1
):
"""
Parameters
----------
phase: :class:`np.array`
profile: :class:`np.array`
profile_err: :class:`np.array`
Phase, pulse profile, and error bars
imagefile: str
norm: float or :class:`np.array`
Returns
-------
template: :class:`np.array`
The calculated template
additional_phase: float
Examples
--------
>>> phase = np.arange(0.0, 1, 0.001)
>>> profile = np.cos(2 * np.pi * phase)
>>> profile_err = profile * 0
>>> template, additional_phase = create_template_from_profile_sins(
... phase, profile, profile_err)
...
>>> np.allclose(template, profile, atol=0.01)
True
"""
import matplotlib.pyplot as plt
prof = np.concatenate((profile, profile, profile))
proferr = np.concatenate((profile_err, profile_err, profile_err))
fit_pars_save, _, _ = fit_profile_with_sinusoids(
prof, proferr, nperiods=3, baseline=True, debug=False
)
template = std_fold_fit_func(fit_pars_save, phase)
fig = plt.figure()
plt.plot(phase, profile, drawstyle="steps-mid")
plt.plot(phase, template, drawstyle="steps-mid")
plt.savefig(imagefile)
plt.close(fig)
# start template from highest bin!
template *= norm
template_fine = std_fold_fit_func(fit_pars_save, np.arange(0, 1, 0.001))
additional_phase = np.argmax(template_fine) / len(template_fine)
return template, additional_phase
def create_template_from_profile(
phase, profile, profile_err, imagefile="template.png", norm=1
):
"""
Parameters
----------
phase: :class:`np.array`
profile: :class:`np.array`
profile_err: :class:`np.array`
Phase, pulse profile, and error bars
imagefile: str
norm: float or :class:`np.array`
Returns
-------
template: :class:`np.array`
The calculated template
additional_phase: float
Examples
--------
>>> phase = np.arange(0.0, 1, 0.01)
>>> profile = np.cos(2 * np.pi * phase)
>>> profile_err = profile * 0
>>> template, additional_phase = create_template_from_profile(
... phase, profile, profile_err)
...
>>> np.allclose(template, profile, atol=0.001)
True
"""
from scipy.interpolate import splrep, splev
import matplotlib.pyplot as plt
ph = np.concatenate((phase - 1, phase, phase + 1))
prof = np.concatenate((profile, profile, profile))
proferr = np.concatenate((profile_err, profile_err, profile_err))
weights = 1 / proferr if np.all(proferr != 0) else None
# template = savgol_filter(profile, 5, 3, mode='wrap')
spl = splrep(ph, prof, w=weights, s=0)
phases_fine = np.arange(0, 1, 0.001)
template_fine = splev(phases_fine, spl)
template = splev(phase, spl)
fig = plt.figure()
plt.plot(phase, profile, drawstyle="steps-mid")
plt.plot(phase, template, drawstyle="steps-mid")
plt.savefig(imagefile)
plt.close(fig)
additional_phase = np.argmax(template_fine) / len(template_fine)
return template, additional_phase
def get_TOAs_from_events(
events, folding_length, *frequency_derivatives, **kwargs
):
"""Get TOAs of pulsation.
Parameters
----------
events : array-like
event arrival times
folding_length : float
length of sub-intervals to fold
*frequency_derivatives : floats
pulse frequency, first derivative, second derivative, etc.
Other parameters
----------------
pepoch : float, default None
Epoch of timing solution, in the same units as ev_times. If none, the
first event time is used.
mjdref : float, default None
Reference MJD
template : array-like, default None
The pulse template
nbin : int, default 16
The number of bins in the profile (overridden by the dimension of the
template)
timfile : str, default 'out.tim'
file to save the TOAs to (if PINT is installed)
gti: [[g0_0, g0_1], [g1_0, g1_1], ...]
Good time intervals. Defaults to None
quick: bool
If True, use a quicker fitting algorithms for TOAs. Defaults to False
position: `astropy.SkyCoord` object
Position of the object
Returns
-------
toas : array-like
list of times of arrival. If ``mjdref`` is specified, they are
expressed as MJDs, otherwise in MET
toa_err : array-like
errorbars on TOAs, in the same units as TOAs.
"""
template = kwargs["template"] if "template" in kwargs else None
mjdref = kwargs["mjdref"] if "mjdref" in kwargs else None
nbin = kwargs["nbin"] if "nbin" in kwargs else 16
pepoch = kwargs["pepoch"] if "pepoch" in kwargs else None
timfile = kwargs["timfile"] if "timfile" in kwargs else "out.tim"
gti = kwargs["gti"] if "gti" in kwargs else None
label = kwargs["label"] if "label" in kwargs else None
quick = kwargs["quick"] if "quick" in kwargs else False
pepoch = assign_value_if_none(pepoch, events[0])
gti = np.asarray(assign_value_if_none(gti, [[events[0], events[-1]]]))
# run exposure correction only if there are less than 1000 pulsations
# in the interval
length = gti.max() - gti.min()
expocorr = folding_length < (1000 / frequency_derivatives[0])
if template is not None:
nbin = len(template)
additional_phase = np.argmax(template) / nbin
else:
phase, profile, profile_err = fold_events(
copy.deepcopy(events),
*frequency_derivatives,
ref_time=pepoch,
gtis=copy.deepcopy(gti),
expocorr=expocorr,
nbin=nbin,
)
template, additional_phase = create_template_from_profile(
phase,
profile,
profile_err,
imagefile=timfile.replace(".tim", "") + ".png",
norm=folding_length / length,
)
starts = np.arange(gti[0, 0], gti[-1, 1], folding_length)
toas = []
toa_errs = []
for start in show_progress(starts):
stop = start + folding_length
good = (events >= start) & (events < stop)
events_tofold = events[good]
if len(events_tofold) < nbin:
continue
gtis_tofold = copy.deepcopy(
gti[(gti[:, 0] < stop) & (gti[:, 1] > start)]
)
gtis_tofold[0, 0] = start
gtis_tofold[-1, 1] = stop
local_f = frequency_derivatives[0]
for i_f, f in enumerate(frequency_derivatives[1:]):
local_f += (
1
/ np.math.factorial(i_f + 1)
* (start - pepoch) ** (i_f + 1)
* f
)
fder = copy.deepcopy(list(frequency_derivatives))
fder[0] = local_f
phase, profile, profile_err = fold_events(
events_tofold,
*fder,
ref_time=start,
gtis=gtis_tofold,
expocorr=expocorr,
nbin=nbin,
)
# BAD!BAD!BAD!
# [[Pay attention to time reference here.
# We are folding wrt pepoch, and calculating TOAs wrt start]]
toa, toaerr = get_TOA(
profile,
1 / frequency_derivatives[0],
start,
template=template,
additional_phase=additional_phase,
quick=quick,
debug=True,
)
toas.append(toa)
toa_errs.append(toaerr)
toas, toa_errs = np.array(toas), np.array(toa_errs)
if mjdref is not None:
toas = toas / 86400 + mjdref
toa_errs = toa_errs * 1e6
if HAS_PINT:
label = assign_value_if_none(label, "hendrics")
toa_list = _load_and_prepare_TOAs(toas, errs_us=toa_errs)
# workaround until PR #368 is accepted in pint
toa_list.table["clkcorr"] = 0
toa_list.write_TOA_file(timfile, name=label, format="Tempo2")
log.info("TOA(MJD) TOAerr(us)")
else:
log.info("TOA(MET) TOAerr(us)")
for t, e in zip(toas, toa_errs):
log.info(f"{t}, {e}")
return toas, toa_errs
def _check_odd(n):
return n // 2 * 2 + 1
def dbl_cos_fit_func(p, x):
# the frequency is fixed
"""
A double sinus (fundamental + 1st harmonic) used as a fit function
"""
startidx = 0
base = 0
if len(p) % 2 != 0:
base = p[0]
startidx = 1
first_harm = p[startidx] * np.cos(
2 * np.pi * x + 2 * np.pi * p[startidx + 1]
)
second_harm = p[startidx + 2] * np.cos(
4.0 * np.pi * x + 4 * np.pi * p[startidx + 3]
)
return base + first_harm + second_harm
def std_fold_fit_func(p, x):
"""Chooses the fit function used in the fit."""
return dbl_cos_fit_func(p, x)
def std_residuals(p, x, y):
"""The residual function used in the fit."""
return std_fold_fit_func(p, x) - y
def adjust_amp_phase(pars):
"""Give the phases in the interval between 0 and 1.
The calculation is based on the amplitude and phase given as input
pars[0] is the initial amplitude; pars[1] is the initial phase
If amplitude is negative, it makes it positive and changes the phase
accordingly
Examples
--------
>>> np.allclose(adjust_amp_phase([-0.5, 0.2]), [0.5, 0.7])
True
>>> np.allclose(adjust_amp_phase([0.5, -1.2]), [0.5, 0.8])
True
>>> np.allclose(adjust_amp_phase([0.5, 1.2]), [0.5, 0.2])
True
"""
if pars[0] < 0:
pars[0] = -pars[0]
pars[1] += 0.5
pars[1] = pars[1] - np.floor(pars[1])
return pars
def fit_profile_with_sinusoids(
profile, profile_err, debug=False, nperiods=1, baseline=False
):
"""
Fit a folded profile with the std_fold_fit_func.
Tries a number of different initial values for the fit, and returns the
result of the best chi^2 fit
Parameters
----------
profile : array of floats
The folded profile
profile_err : array of floats
the error on the folded profile elements
Other parameters
----------------
debug : bool, optional
print debug info
nperiods : int, optional
number of periods in the folded profile. Default 1.
Returns
-------
fit_pars : array-like
the best-fit parameters
success : bool
whether the fit succeeded or not
chisq : float
the best chi^2
"""
x = np.arange(0, len(profile) * nperiods, nperiods) / float(len(profile))
guess_pars = [
max(profile) - np.mean(profile),
x[np.argmax(profile[: len(profile) // nperiods])],
0,
0.25,
]
startidx = 0
if baseline:
guess_pars = [np.mean(profile)] + guess_pars
if debug:
log.debug(guess_pars)
startidx = 1
chisq_save = 1e32
fit_pars_save = guess_pars
success_save = -1
if debug:
import matplotlib.pyplot as plt
fig = plt.figure("Debug profile")
plt.title("Debug profile")
plt.errorbar(x, profile, drawstyle="steps-mid")
plt.plot(x, std_fold_fit_func(guess_pars, x), "r--")
for phase in np.arange(0.0, 1.0, 0.1):
guess_pars[3 + startidx] = phase
if debug:
log.debug(guess_pars)
plt.plot(x, std_fold_fit_func(guess_pars, x), "r--")
fit_pars, success = optimize.leastsq(
std_residuals, guess_pars[:], args=(x, profile)
)
if debug:
plt.plot(x, std_fold_fit_func(fit_pars, x), "g--")
fit_pars[startidx : startidx + 2] = adjust_amp_phase(
fit_pars[startidx : startidx + 2]
)
fit_pars[startidx + 2 : startidx + 4] = adjust_amp_phase(
fit_pars[startidx + 2 : startidx + 4]
)
chisq = np.sum(
(profile - std_fold_fit_func(fit_pars, x)) ** 2 / profile_err ** 2
) / (len(profile) - (startidx + 4))
if debug:
plt.plot(x, std_fold_fit_func(fit_pars, x), "b--")
if chisq < chisq_save:
chisq_save = chisq
fit_pars_save = fit_pars[:]
success_save = success
if debug:
plt.savefig("debug_fit_profile.png")
plt.close(fig)
return fit_pars_save, success_save, chisq_save
def fit_profile(
profile,
profile_err,
debug=False,
nperiods=1,
phaseref="default",
baseline=False,
):
return fit_profile_with_sinusoids(
profile, profile_err, debug=debug, nperiods=nperiods, baseline=baseline
)
def run_folding(
file,
freq,
fdot=0,
fddot=0,
nbin=16,
nebin=16,
tref=None,
test=False,
emin=None,
emax=None,
norm="to1",
smooth_window=None,
deorbit_par=None,
pepoch=None,
**opts,
):
from matplotlib.gridspec import GridSpec
import matplotlib.pyplot as plt
file_label = ""
ev = load_events(file)
if deorbit_par is not None:
ev = deorbit_events(ev, deorbit_par)
plot_energy = True
ev, elabel = filter_energy(ev, emin, emax)
times, energy = ev.time, ev.energy
if emin is None:
emin = np.min(energy)
if emax is None:
emax = np.max(energy)
if elabel == "":
plot_energy = False
if tref is not None and pepoch is not None:
raise ValueError("Only specify one between tref and pepoch")
elif pepoch is not None:
tref = (pepoch - ev.mjdref) * 86400
elif tref is None:
tref = times[0]
phases = pulse_phase(times - tref, freq, fdot, fddot, to_1=True)
binx = np.linspace(0, 1, nbin + 1)
if plot_energy:
biny = np.percentile(energy, np.linspace(0, 100, nebin + 1))
biny[0] = emin
biny[-1] = emax
profile, _ = np.histogram(phases, bins=binx)
if smooth_window is None:
smooth_window = np.min([len(profile), 10])
smooth_window = _check_odd(smooth_window)
smoothed_profile = savgol_filter(
profile, window_length=smooth_window, polyorder=3, mode="wrap"
)
profile = np.concatenate((profile, profile))
smooth = np.concatenate((smoothed_profile, smoothed_profile))
if plot_energy:
histen, _ = np.histogram(energy, bins=biny)
hist2d, _, _ = np.histogram2d(
phases.astype(np.float64), energy, bins=(binx, biny)
)
binx = np.concatenate((binx[:-1], binx + 1))
meanbins = (binx[:-1] + binx[1:]) / 2
if plot_energy:
hist2d = np.vstack((hist2d, hist2d))
hist2d_save = np.copy(hist2d)
X, Y = np.meshgrid(binx, biny)
if norm == "ratios":
hist2d /= smooth[:, np.newaxis]
hist2d *= histen[np.newaxis, :]
file_label = "_ratios"
else:
hist2d /= histen[np.newaxis, :]
factor = np.max(hist2d, axis=0)[np.newaxis, :]
hist2d /= factor
file_label = "_to1"
plt.figure(figsize=(8, 8))
if plot_energy:
gs = GridSpec(2, 2, height_ratios=(1.5, 3))
ax0 = plt.subplot(gs[0, 0])
ax1 = plt.subplot(gs[1, 0], sharex=ax0)
ax2 = plt.subplot(gs[1, 1], sharex=ax0)
ax3 = plt.subplot(gs[0, 1])
else:
ax0 = plt.subplot()
# Plot pulse profile
max = np.max(smooth)
min = np.min(smooth)
ax0.plot(meanbins, profile, drawstyle="steps-mid", color="white", zorder=2)
ax0.plot(
meanbins,
smooth,
drawstyle="steps-mid",
label="Smooth profile "
"(P.F. = {:.1f}%)".format(100 * (max - min) / max),
color="k",
zorder=3,
)
err_low, err_high = poisson_conf_interval(
smooth, interval="frequentist-confidence", sigma=3
)
try:
ax0.fill_between(
meanbins,
err_low,
err_high,
color="grey",
zorder=1,
alpha=0.5,
label="3-sigma confidence",
step="mid",
)
except AttributeError:
# MPL < 2
ax0.fill_between(
meanbins,
err_low,
err_high,
color="grey",
zorder=1,
alpha=0.5,
label="3-sigma confidence",
)
ax0.axhline(max, lw=1, color="k")
ax0.axhline(min, lw=1, color="k")
mean = np.mean(profile)
ax0.fill_between(
meanbins, mean - np.sqrt(mean), mean +
|
np.sqrt(mean)
|
numpy.sqrt
|
__all__=['trainig_loop','ResNIHCM']
import math
import pandas as pd
import os
import pickle
import datetime
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import torch
import torch.optim as optim
import pyro
import pyro.distributions as dist
from pyro.infer import SVI, TraceMeanField_ELBO
import torch
import torch.optim as optim
import torch.nn as nn
from torch.optim import Adam
from torch.distributions import constraints
from torch.utils.data import Dataset, DataLoader
import pyro
from pyro.optim import MultiStepLR, ExponentialLR
import pyro
import pyro.distributions as dist
from pyro.infer import SVI, Trace_ELBO,TraceMeanField_ELBO
def trainig_loop(n_epochs, optimizer, model, loss_fn, train_loader,cuda=False,priors=None,prior_network=False,new_data=False,missing_data=False):
if cuda:
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if device=="cpu":
print("Cuda is not available. CPU is used.")
else:
device="cpu"
model=model.to(device)
svi = SVI(model.model, model.guide, optimizer, loss=loss_fn)
loss_list=[]
for epoch in range(1, n_epochs + 1):
loss_train = 0.0
for ix, (y_net, t_out,i_heat,i_heat_on,i_heat_off,i_cool,i_cool_on,i_cool_off,i_aux,i_aux_on,i_aux_off,i_heat_df) in enumerate(train_loader):
y_net = y_net.to(device=device) # <1>
t_out = t_out.to(device=device) # <1>
i_heat = i_heat.to(device=device) # <1>
i_heat_on = i_heat_on.to(device=device) # <1>
i_heat_off = i_heat_off.to(device=device) # <1>
i_cool = i_cool.to(device=device) # <1>
i_cool_on = i_cool_on.to(device=device) # <1>
i_cool_off = i_cool_off.to(device=device) # <1>
i_aux = i_aux.to(device=device) # <1>
i_aux_on = i_aux_on.to(device=device) # <1>
i_aux_off = i_aux_off.to(device=device) # <1>
i_heat_df =i_heat_df.to(device=device)
if priors is None:
loss=svi.step(y_net=y_net, t_out=t_out,i_heat=i_heat,i_heat_on=i_heat_on,i_heat_off=i_heat_off,i_cool=i_cool,i_cool_on=i_cool_on,i_cool_off=i_cool_off,i_aux=i_aux,i_aux_on=i_aux_on,i_aux_off=i_aux_off,i_heat_df=i_heat_df,priors=None)
else:
# no model update. Just in-prior computation (actually, it doesn't exist)
raise ValueError("Training is not in any case. check priors, new_data, prior_network params")
loss_train += loss
loss_list.append(loss_train / len(train_loader))
if epoch == 1 or epoch % 5 == 0:
print('{} Epoch {}, Training loss {}'.format(
datetime.datetime.now(), epoch,
loss_train / len(train_loader)))
if (epoch==1 or epoch%10==0) and (type(optimizer)==pyro.optim.lr_scheduler.PyroLRScheduler) :
optimizer.step()
#print(f'learning rate {next(iter(svi.optim.optim_objs.values())).get_last_lr()[0]}')
return loss_list
class ResNIHCM(nn.Module):
def __init__(self):
super().__init__()
# define dimensions
# Use ELU see Murphy p.397
self.elu=nn.ELU()
self.relu=nn.ReLU()
self.softmax=nn.Softmax(dim=1)
self.tanh=nn.Tanh()
self.softplus=nn.Softplus()
def calculate_concentration(self,mu,sigma):
concentration_alpha=((1-mu)/(sigma**2)-1/mu)*(mu**2)
concentration_beta=concentration_alpha*(1/mu-1)
return concentration_alpha, concentration_beta
def model(self, y_net, t_out,
i_heat,i_heat_on,i_heat_off,
i_cool,i_cool_on,i_cool_off,
i_aux,i_aux_on,i_aux_off,i_heat_df,
priors=None):
# it is hard to generalize the process.
# we may have matrix,
# initial network
self.batch_sz=t_out.shape[0]
device=t_out.device
if priors is None:
add_noise=0 # no noise addition for priors
noise_scale=0.01
noise_mean=0
priors={
"mu_misc":np.array([-3.0])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # E_misc~logN(-3,2.5) [0.0004,0.05,6.783]
"sigma_misc":np.array([1.5])+np.random.normal(noise_mean,noise_scale,1)*add_noise,
"mu_beta0_heat":np.array([-3.0])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # beta0~N(-2,1.0) [-4.0~0.0]
"sigma_beta0_heat":np.array([0.5])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # exp(beta0) [0.018~1]
"mu_beta1_heat":np.array([-4.])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # beta1~logN(-1.5,0.8) [0.04~1.0]
"sigma_beta1_heat":np.array([0.5])+np.random.normal(noise_mean,noise_scale,1)*add_noise, #
"sigma_heat":np.array([0.5])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # E_heat~logN(-1.2,0.6) [0.093,0.30,1.0]
"mu_heat_on":np.array([0.5])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # eta_heat_on~Beta(mu_heat_on=0.5,sigma_heat_on=1/12) # 0~1 flat
"sigma_heat_on":np.sqrt(np.array([1/12]))+np.random.normal(noise_mean,noise_scale,1)*add_noise,
"mu_heat_off":np.array([0.5])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # eta_heat_off~Beta(mu_heat_on=0.5,sigma_heat_on=1/12) # 0~1 flat
"sigma_heat_off":np.sqrt(np.array([1/12]))+np.random.normal(noise_mean,noise_scale,1)*add_noise,
"mu_beta0_cool":np.array([-2.0])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # beta0~N(-2,1.0) [-4.0~0.0]
"sigma_beta0_cool":np.array([0.5])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # exp(beta0) [0.018~1]
"mu_beta1_cool":np.array([-4.0])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # beta1~logN(-1.5,0.8) [0.04~1.0]
"sigma_beta1_cool":np.array([0.5])+np.random.normal(noise_mean,noise_scale,1)*add_noise, #
"sigma_cool":np.array([0.5])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # E_cool~logN(-1.2,0.6) [0.093,0.30,1.0]
"mu_cool_on":np.array([0.5])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # eta_cool_on~Beta(mu_cool_on=0.5,sigma_cool_on=1/12) # 0~1 flat
"sigma_cool_on":np.sqrt(np.array([1/12]))+np.random.normal(noise_mean,noise_scale,1)*add_noise,
"mu_cool_off":np.array([0.5])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # eta_cool_off~Beta(mu_cool_on=0.5,sigma_cool_on=1/12) # 0~1 flat
"sigma_cool_off":np.sqrt(np.array([1/12]))+np.random.normal(noise_mean,noise_scale,1)*add_noise,
"mu_aux":np.array([-3.])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # E_aux~logN(-0.4,0.4) [0.3,0.67,1.43]
"sigma_aux":np.array([1.0])+np.random.normal(noise_mean,noise_scale,1)*add_noise,
"mu_heat_df":np.array([-4.])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # E_aux~logN(-0.4,0.4) [0.3,0.67,1.43]
"sigma_heat_df":np.array([0.5])+np.random.normal(noise_mean,noise_scale,1)*add_noise,
"mu_aux_on":np.array([0.5])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # eta_aux_on~Beta(mu_aux_on=0.5,sigma_aux_on=1/12) # 0~1 flat
"sigma_aux_on":np.sqrt(np.array([1/12]))+np.random.normal(noise_mean,noise_scale,1)*add_noise,
"mu_aux_off":np.array([0.5])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # eta_aux_off~Beta(mu_aux_on=0.5,sigma_aux_on=1/12) # 0~1 flat
"sigma_aux_off":np.sqrt(np.array([1/12]))+np.random.normal(noise_mean,noise_scale,1)*add_noise,
# "mu_phi_df":np.array([-1/3])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # phi~N(-1/3,1/6) give [-2/3,-1/3,0] which is [-10,0,10] in real scale
# "sigma_phi_df":np.array([1/6])+np.random.normal(noise_mean,noise_scale,1)*add_noise,
# "mu_psi":np.array([0.5])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # psi~Beta(mu_psi=0.5,sigma_psi=1/12) # 0~1 flat
# "sigma_psi":np.sqrt(np.array([1/12]))+np.random.normal(noise_mean,noise_scale,1)*add_noise, #
"mu_sigma_net":np.array([-4.])+np.random.normal(noise_mean,noise_scale,1)*add_noise, #
"sigma_sigma_net":np.array([0.5])+np.random.normal(noise_mean,noise_scale,1)*add_noise #
}
mu_misc=torch.tensor(priors['mu_misc'],dtype=torch.float32).to(device)
sigma_misc=torch.tensor(priors['sigma_misc'],dtype=torch.float32).to(device)
mu_beta0_heat=torch.tensor(priors['mu_beta0_heat'],dtype=torch.float32).to(device)
sigma_beta0_heat=torch.tensor(priors['sigma_beta0_heat'],dtype=torch.float32).to(device)
mu_beta1_heat=torch.tensor(priors['mu_beta1_heat'],dtype=torch.float32).to(device)
sigma_beta1_heat=torch.tensor(priors['sigma_beta1_heat'],dtype=torch.float32).to(device)
sigma_heat=torch.tensor(priors['sigma_heat'],dtype=torch.float32).to(device)
mu_heat_on=torch.tensor(priors['mu_heat_on'],dtype=torch.float32).to(device)
sigma_heat_on=torch.tensor(priors['sigma_heat_on'],dtype=torch.float32).to(device)
mu_heat_off=torch.tensor(priors['mu_heat_off'],dtype=torch.float32).to(device)
sigma_heat_off=torch.tensor(priors['sigma_heat_off'],dtype=torch.float32).to(device)
mu_beta0_cool=torch.tensor(priors['mu_beta0_cool'],dtype=torch.float32).to(device)
sigma_beta0_cool=torch.tensor(priors['sigma_beta0_cool'],dtype=torch.float32).to(device)
mu_beta1_cool=torch.tensor(priors['mu_beta1_cool'],dtype=torch.float32).to(device)
sigma_beta1_cool=torch.tensor(priors['sigma_beta1_cool'],dtype=torch.float32).to(device)
sigma_cool=torch.tensor(priors['sigma_cool'],dtype=torch.float32).to(device)
mu_cool_on=torch.tensor(priors['mu_cool_on'],dtype=torch.float32).to(device)
sigma_cool_on=torch.tensor(priors['sigma_cool_on'],dtype=torch.float32).to(device)
mu_cool_off=torch.tensor(priors['mu_cool_off'],dtype=torch.float32).to(device)
sigma_cool_off=torch.tensor(priors['sigma_cool_off'],dtype=torch.float32).to(device)
mu_aux=torch.tensor(priors['mu_aux'],dtype=torch.float32).to(device)
sigma_aux=torch.tensor(priors['sigma_aux'],dtype=torch.float32).to(device)
mu_aux_on=torch.tensor(priors['mu_aux_on'],dtype=torch.float32).to(device)
sigma_aux_on=torch.tensor(priors['sigma_aux_on'],dtype=torch.float32).to(device)
mu_aux_off=torch.tensor(priors['mu_aux_off'],dtype=torch.float32).to(device)
sigma_aux_off=torch.tensor(priors['sigma_aux_off'],dtype=torch.float32).to(device)
mu_heat_df=torch.tensor(priors['mu_heat_df'],dtype=torch.float32).to(device)
sigma_heat_df=torch.tensor(priors['sigma_heat_df'],dtype=torch.float32).to(device)
#mu_phi_df=torch.tensor(priors['mu_phi_df'],dtype=torch.float32).to(device)
#sigma_phi_df=torch.tensor(priors['sigma_phi_df'],dtype=torch.float32).to(device)
#mu_psi=torch.tensor(priors['mu_psi'],dtype=torch.float32).to(device)
#sigma_psi=torch.tensor(priors['sigma_psi'],dtype=torch.float32).to(device)
mu_sigma_net=torch.tensor(priors['mu_sigma_net'],dtype=torch.float32).to(device)
sigma_sigma_net=torch.tensor(priors['sigma_sigma_net'],dtype=torch.float32).to(device)
E_misc=self.softplus(pyro.sample("E_misc",dist.Normal(mu_misc,sigma_misc).to_event(1)))
# here mu_heat is not real scale. E_heat~LogNormal(mu_heat,sigma_heat)
beta0_heat=pyro.sample("beta0_heat",dist.Normal(mu_beta0_heat,sigma_beta0_heat).to_event(1))
beta1_heat=self.softplus(pyro.sample("beta1_heat",dist.Normal(mu_beta1_heat,sigma_beta1_heat).to_event(1)))
mu_heat=pyro.deterministic("mu_heat",beta0_heat+beta1_heat*t_out)
E_heat=self.softplus(pyro.sample("E_heat",dist.Normal(mu_heat,sigma_heat).to_event(1)))
#print(f"E_heat shape is {E_heat.shape}")
beta0_cool=pyro.sample("beta0_cool",dist.Normal(mu_beta0_cool,sigma_beta0_cool).to_event(1))
beta1_cool=self.softplus(pyro.sample("beta1_cool",dist.Normal(mu_beta1_cool,sigma_beta1_cool).to_event(1)))
mu_cool=pyro.deterministic("mu_cool",beta0_cool+beta1_cool*t_out)
E_cool=self.softplus(pyro.sample("E_cool",dist.Normal(mu_cool,sigma_cool).to_event(1)))
E_aux=self.softplus(pyro.sample("E_aux",dist.Normal(mu_aux,sigma_aux).to_event(1)))
# phi_df=pyro.sample("phi_df",dist.Normal(mu_phi_df,sigma_phi_df).to_event(1))
# mu_psi_alpha,mu_psi_beta=self.calculate_concentration(mu=mu_psi,sigma=sigma_psi)
# psi=pyro.sample("psi",dist.Beta(concentration1=mu_psi_alpha ,concentration0=mu_psi_beta).to_event(1))
eta_heat=i_heat.clone()
mu_heat_on_alpha,mu_heat_on_beta=self.calculate_concentration(mu=mu_heat_on,sigma=sigma_heat_on)
mu_heat_off_alpha,mu_heat_off_beta=self.calculate_concentration(mu=mu_heat_off,sigma=sigma_heat_off)
# print(f'mu_heat_on_alpha is {mu_heat_on_alpha}')
# print(f'mu_heat_on_beta is {mu_heat_on_beta}')
eta_heat_on=pyro.sample("eta_heat_on",dist.Beta(concentration1=mu_heat_on_alpha ,concentration0=mu_heat_on_beta).to_event(1))
eta_heat_off=pyro.sample("eta_heat_off",dist.Beta(concentration1=mu_heat_off_alpha ,concentration0=mu_heat_off_beta).to_event(1))
eta_heat[i_heat_on==1]=eta_heat_on#[i_heat_on==1]
eta_heat[i_heat_off==1]=eta_heat_off#[i_heat_off==1]
eta_cool=i_cool.clone()
mu_cool_on_alpha,mu_cool_on_beta=self.calculate_concentration(mu=mu_cool_on,sigma=sigma_cool_on)
mu_cool_off_alpha,mu_cool_off_beta=self.calculate_concentration(mu=mu_cool_off,sigma=sigma_cool_off)
eta_cool_on=pyro.sample("eta_cool_on",dist.Beta(concentration1=mu_cool_on_alpha ,concentration0=mu_cool_on_beta).to_event(1))
eta_cool_off=pyro.sample("eta_cool_off",dist.Beta(concentration1=mu_cool_off_alpha ,concentration0=mu_cool_off_beta).to_event(1))
eta_cool[i_cool_on==1]=eta_cool_on#[i_cool_on==1]
eta_cool[i_cool_off==1]=eta_cool_off#[i_cool_off==1]
eta_aux=i_aux.clone()
mu_aux_on_alpha,mu_aux_on_beta=self.calculate_concentration(mu=mu_aux_on,sigma=sigma_aux_on)
mu_aux_off_alpha,mu_aux_off_beta=self.calculate_concentration(mu=mu_aux_off,sigma=sigma_aux_off)
eta_aux_on=pyro.sample("eta_aux_on",dist.Beta(concentration1=mu_aux_on_alpha ,concentration0=mu_aux_on_beta).to_event(1))
eta_aux_off=pyro.sample("eta_aux_off",dist.Beta(concentration1=mu_aux_off_alpha ,concentration0=mu_aux_off_beta).to_event(1))
eta_aux[i_aux_on==1]=eta_aux_on#[i_aux_on==1]
eta_aux[i_aux_off==1]=eta_aux_off#[i_aux_off==1]
E_heat_df=self.softplus(pyro.sample("E_heat_df",dist.Normal(mu_heat_df,sigma_heat_df).to_event(1)))
#i_df=torch.zeros_like(i_heat).to(device)
# https://pytorch.org/docs/stable/distributions.html#torch.distributions.beta.Beta.concentration1
# concentration1 (float or Tensor) – 1st concentration parameter of the distribution (often referred to as alpha)
# concentration0 (float or Tensor) – 2nd concentration parameter of the distribution (often referred to as beta)
#with pyro.plate("Emisc", size=t_out.shape[0]):
#i_df_on=pyro.sample("i_df_on",dist.Binomial(total_count=1,probs=psi))
#i_df=torch.where((i_heat==torch.tensor(1,dtype=torch.float32))&(t_out<phi_df),i_df_on,i_df)
y_nan=torch.any(torch.cat([torch.isnan(i_heat)[:,None],
torch.isnan(i_cool)[:,None],
torch.isnan(i_aux)[:,None],
torch.isnan(t_out)[:,None],
torch.isnan(i_heat_df)[:,None],
torch.isnan(y_net)[:,None]
],dim=1),axis=1)
#print(f'y_nan is {y_nan}')
# print(f'eta_heat is {eta_heat}')
# print(f'i_heat is {i_heat}')
mu_net_=eta_heat*i_heat*E_heat+eta_cool*i_cool*E_cool+(eta_aux*i_aux)*E_aux+(i_heat_df)*E_heat_df+E_misc
mu_net=pyro.deterministic("mu_net",mu_net_[~y_nan])
sigma_net = self.softplus(pyro.sample("sigma_t_unit", dist.Normal(mu_sigma_net,sigma_sigma_net).to_event(1)))
#print(f"sigma_net is {sigma_net}")
y_net_=y_net.flatten()[~y_nan]
with pyro.plate("data", size=mu_net.shape[0]):
obs_net=pyro.sample("obs_net", dist.Normal(mu_net, sigma_net).to_event(1), obs=y_net_.flatten()) # .to_event(1)
return mu_net,priors
def guide(self, y_net, t_out,
i_heat,i_heat_on,i_heat_off,
i_cool,i_cool_on,i_cool_off,
i_aux,i_aux_on,i_aux_off,i_heat_df,
priors=None):
self.batch_sz=t_out.shape[0]
device=t_out.device
if priors is None:
# add noise for priors
add_noise=1
noise_scale=0.001
noise_mean=0
priors={
"mu_misc":np.array([-3.0])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # E_misc~logN(-3,2.5) [0.0004,0.05,6.783]
"sigma_misc":np.array([1.5])+np.random.normal(noise_mean,noise_scale,1)*add_noise,
"mu_beta0_heat":np.array([-3.0])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # beta0~N(-2,1.0) [-4.0~0.0]
"sigma_beta0_heat":np.array([0.5])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # exp(beta0) [0.018~1]
"mu_beta1_heat":np.array([-4.])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # beta1~logN(-1.5,0.8) [0.04~1.0]
"sigma_beta1_heat":np.array([0.5])+np.random.normal(noise_mean,noise_scale,1)*add_noise, #
"sigma_heat":np.array([0.5])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # E_heat~logN(-1.2,0.6) [0.093,0.30,1.0]
"mu_heat_on":np.array([0.5])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # eta_heat_on~Beta(mu_heat_on=0.5,sigma_heat_on=1/12) # 0~1 flat
"sigma_heat_on":np.sqrt(np.array([1/12]))+np.random.normal(noise_mean,noise_scale,1)*add_noise,
"mu_heat_off":np.array([0.5])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # eta_heat_off~Beta(mu_heat_on=0.5,sigma_heat_on=1/12) # 0~1 flat
"sigma_heat_off":np.sqrt(np.array([1/12]))+np.random.normal(noise_mean,noise_scale,1)*add_noise,
"mu_beta0_cool":np.array([-2.0])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # beta0~N(-2,1.0) [-4.0~0.0]
"sigma_beta0_cool":np.array([0.5])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # exp(beta0) [0.018~1]
"mu_beta1_cool":np.array([-4.0])+np.random.normal(noise_mean,noise_scale,1)*add_noise, # beta1~logN(-1.5,0.8) [0.04~1.0]
"sigma_beta1_cool":np.array([0.5])+np.random.normal(noise_mean,noise_scale,1)*add_noise, #
"sigma_cool":np.array([0.5])+
|
np.random.normal(noise_mean,noise_scale,1)
|
numpy.random.normal
|
from __future__ import division
import numpy as np
import scipy.stats.kde as kde
def calc_min_interval(x, alpha):
"""Internal method to determine the minimum interval of a given width
Assumes that x is sorted numpy array.
"""
n = len(x)
cred_mass = 1.0-alpha
interval_idx_inc = int(np.floor(cred_mass*n))
n_intervals = n - interval_idx_inc
interval_width = x[interval_idx_inc:] - x[:n_intervals]
if len(interval_width) == 0:
raise ValueError('Too few elements for interval calculation')
min_idx = np.argmin(interval_width)
hdi_min = x[min_idx]
hdi_max = x[min_idx+interval_idx_inc]
return hdi_min, hdi_max
def hdi(x, alpha=0.05):
"""Calculate highest posterior density (HPD) of array for given alpha.
The HPD is the minimum width Bayesian credible interval (BCI).
:Arguments:
x : Numpy array
An array containing MCMC samples
alpha : float
Desired probability of type I error (defaults to 0.05)
"""
# Make a copy of trace
x = x.copy()
# For multivariate node
if x.ndim > 1:
# Transpose first, then sort
tx = np.transpose(x, list(range(x.ndim))[1:]+[0])
dims =
|
np.shape(tx)
|
numpy.shape
|
from time import process_time
import numpy as np
from numpy.testing import assert_equal, assert_allclose
import pytest
from skan.csr import Skeleton, summarize
from skan._testdata import (
tinycycle, tinyline, skeleton0, skeleton1, skeleton2, skeleton3d,
topograph1d, skeleton4, junction_first
)
def test_tiny_cycle():
skeleton = Skeleton(tinycycle, junction_mode='centroid')
assert skeleton.paths.shape == (1, 5)
def test_skeleton1_topo():
skeleton = Skeleton(skeleton1, junction_mode='centroid')
assert skeleton.paths.shape == (4, 21)
paths_list = skeleton.paths_list()
reference_paths = [[8, 6, 1, 2, 3, 4, 5, 7, 11, 10, 13], [8, 9, 13],
[8, 12, 14], [13, 15, 16, 17]]
d0 = 1 + np.sqrt(2)
reference_distances = [5 * d0, d0, d0, 1 + d0]
for path in reference_paths:
assert path in paths_list or path[::-1] in paths_list
assert_allclose(
sorted(skeleton.path_lengths()), sorted(reference_distances)
)
def test_skeleton1_float():
image = np.zeros(skeleton1.shape, dtype=float)
image[skeleton1] = 1 + np.random.random(np.sum(skeleton1))
skeleton = Skeleton(image, junction_mode='centroid')
path, data = skeleton.path_with_data(0)
assert 1.0 < np.mean(data) < 2.0
def test_skeleton_coordinates():
skeleton = Skeleton(skeleton1, junction_mode='centroid')
last_path_coordinates = skeleton.path_coordinates(3)
assert_allclose(last_path_coordinates, [[3, 3], [4, 4], [4, 5], [4, 6]])
def test_path_length_caching():
skeleton = Skeleton(skeleton3d, junction_mode='centroid')
t0 = process_time()
distances = skeleton.path_lengths()
t1 = process_time()
distances2 = skeleton.path_lengths()
t2 = process_time()
assert t2 - t1 < t1 - t0
assert np.all((distances > 0.99 + np.sqrt(2))
& (distances < 5.01 + 5 * np.sqrt(2)))
def test_tip_junction_edges():
skeleton = Skeleton(skeleton4, junction_mode='centroid')
reference_paths = [[1, 2], [2, 4, 5], [2, 7]]
paths_list = skeleton.paths_list()
for path in reference_paths:
assert path in paths_list or path[::-1] in paths_list
def test_path_stdev():
image = np.zeros(skeleton1.shape, dtype=float)
image[skeleton1] = 1 + np.random.random(np.sum(skeleton1))
skeleton = Skeleton(image, junction_mode='centroid')
# longest_path should be 0, but could change.
longest_path = np.argmax(skeleton.path_lengths())
dev = skeleton.path_stdev()[longest_path]
assert 0.09 < dev < 0.44 # chance is < 1/10K that this will fail
# second check: first principles.
skeleton2 = Skeleton(image**2, junction_mode='centroid')
# (Var = StDev**2 = E(X**2) - (E(X))**2)
assert_allclose(
skeleton.path_stdev()**2,
skeleton2.path_means() - skeleton.path_means()**2
)
def test_junction_first():
"""Ensure no self-edges exist in multi-pixel junctions.
Before commit 64047622, the skeleton class would include self-edges
within junctions in its paths list, but only when the junction was visited
before any of its adjacent branches. This turns out to be tricky to achieve
but not impossible in 2D.
"""
assert [1, 1] not in Skeleton(
junction_first, junction_mode='centroid'
).paths_list()
def test_skeleton_summarize():
image = np.zeros(skeleton2.shape, dtype=float)
image[skeleton2] = 1 + np.random.random(np.sum(skeleton2))
skeleton = Skeleton(image, junction_mode='centroid')
summary = summarize(skeleton)
assert set(summary['skeleton-id']) == {1, 2}
assert (
|
np.all(summary['mean-pixel-value'] < 2)
|
numpy.all
|
import pandas as pd
import numpy as np
import imp
from chefboost.commons import functions, evaluate
from chefboost.training import Preprocess, Training
from chefboost import Chefboost as cb
from tqdm import tqdm
import gc
def findPrediction(row):
epoch = row['Epoch']
row = row.drop(labels=['Epoch'])
columns = row.shape[0]
params = []
for j in range(0, columns-1):
params.append(row[j])
moduleName = "outputs/rules/rules%s" % (epoch-1)
fp, pathname, description = imp.find_module(moduleName)
myrules = imp.load_module(moduleName, fp, pathname, description)
#prediction = int(myrules.findDecision(params))
prediction = myrules.findDecision(params)
return prediction
def regressor(df, config, header, dataset_features, validation_df = None, process_id = None):
models = []
#we will update decisions in every epoch, this will be used to restore
base_actuals = df.Decision.values
algorithm = config['algorithm']
enableRandomForest = config['enableRandomForest']
num_of_trees = config['num_of_trees']
enableMultitasking = config['enableMultitasking']
enableGBM = config['enableGBM']
epochs = config['epochs']
learning_rate = config['learning_rate']
enableAdaboost = config['enableAdaboost']
#------------------------------
boosted_from = 0; boosted_to = 0
#------------------------------
base_df = df.copy()
#gbm will manipulate actuals. store its raw version.
target_values = base_df['Decision'].values
num_of_instances = target_values.shape[0]
root = 1
file = "outputs/rules/rules0.py"; json_file = "outputs/rules/rules0.json"
functions.createFile(file, header)
functions.createFile(json_file, "[\n")
Training.buildDecisionTree(df,root,file, config, dataset_features
, parent_level = 0, leaf_id = 0, parents = 'root') #generate rules0
#functions.storeRule(json_file," {}]")
df = base_df.copy()
base_df['Boosted_Prediction'] = 0
#------------------------------
best_epoch_idx = 0; best_epoch_loss = 1000000
pbar = tqdm(range(1, epochs+1), desc='Boosting')
#for index in range(1,epochs+1):
#for index in tqdm(range(1,epochs+1), desc='Boosting'):
for index in pbar:
#print("epoch ",index," - ",end='')
loss = 0
#run data(i-1) and rules(i-1), save data1
#dynamic import
moduleName = "outputs/rules/rules%s" % (index-1)
fp, pathname, description = imp.find_module(moduleName)
myrules = imp.load_module(moduleName, fp, pathname, description) #rules0
models.append(myrules)
new_data_set = "outputs/data/data%s.csv" % (index)
f = open(new_data_set, "w")
#put header in the following file
columns = df.shape[1]
mae = 0
#----------------------------------------
df['Epoch'] = index
df['Prediction'] = df.apply(findPrediction, axis=1)
base_df['Boosted_Prediction'] += df['Prediction']
loss = (base_df['Boosted_Prediction'] - base_df['Decision']).pow(2).sum()
current_loss = loss / num_of_instances #mse
if index == 1:
boosted_from = current_loss * 1
elif index == epochs:
boosted_to = current_loss * 1
if current_loss < best_epoch_loss:
best_epoch_loss = current_loss * 1
best_epoch_idx = index * 1
df['Decision'] = int(learning_rate)*(df['Decision'] - df['Prediction'])
df = df.drop(columns = ['Epoch', 'Prediction'])
#---------------------------------
df.to_csv(new_data_set, index=False)
#data(i) created
#---------------------------------
file = "outputs/rules/rules"+str(index)+".py"
json_file = "outputs/rules/rules"+str(index)+".json"
functions.createFile(file, header)
functions.createFile(json_file, "[\n")
current_df = df.copy()
Training.buildDecisionTree(df,root,file, config, dataset_features
, parent_level = 0, leaf_id = 0, parents = 'root', main_process_id = process_id)
#functions.storeRule(json_file," {}]")
df = current_df.copy() #numeric features require this restoration to apply findDecision function
#rules(i) created
loss = loss / num_of_instances
#print("epoch ",index," - loss: ",loss)
#print("loss: ",loss)
pbar.set_description("Epoch %d. Loss: %d. Process: " % (index, loss))
gc.collect()
#---------------------------------
print("The best epoch is ", best_epoch_idx," with ", best_epoch_loss," loss value")
models = models[0:best_epoch_idx]
config["epochs"] = best_epoch_idx
print("MSE of ",num_of_instances," instances are boosted from ",boosted_from," to ",best_epoch_loss," in ",epochs," epochs")
return models
def classifier(df, config, header, dataset_features, validation_df = None, process_id = None):
models = []
print("gradient boosting for classification")
epochs = config['epochs']
enableParallelism = config['enableParallelism']
temp_df = df.copy()
original_dataset = df.copy()
worksheet = df.copy()
classes = df['Decision'].unique()
boosted_predictions = np.zeros([df.shape[0], len(classes)])
pbar = tqdm(range(0, epochs), desc='Boosting')
#store actual set, we will use this to calculate loss
actual_set = pd.DataFrame(np.zeros([df.shape[0], len(classes)]), columns=classes)
for i in range(0, len(classes)):
current_class = classes[i]
actual_set[current_class] = np.where(df['Decision'] == current_class, 1, 0)
actual_set = actual_set.values #transform it to numpy array
best_accuracy_idx = 0; best_accuracy_value = 0
accuracies = []
#for epoch in range(0, epochs):
for epoch in pbar:
for i in range(0, len(classes)):
current_class = classes[i]
if epoch == 0:
temp_df['Decision'] = np.where(df['Decision'] == current_class, 1, 0)
worksheet['Y_'+str(i)] = temp_df['Decision']
else:
temp_df['Decision'] = worksheet['Y-P_'+str(i)]
predictions = []
#change data type for decision column
temp_df[['Decision']].astype('int64')
root = 1
file_base = "outputs/rules/rules-for-"+current_class+"-round-"+str(epoch)
file = file_base+".py"
functions.createFile(file, header)
if enableParallelism == True:
json_file = file_base+".json"
functions.createFile(json_file, "[\n")
Training.buildDecisionTree(temp_df, root, file, config, dataset_features
, parent_level = 0, leaf_id = 0, parents = 'root', main_process_id = process_id)
#decision rules created
#----------------------------
#dynamic import
moduleName = "outputs/rules/rules-for-"+current_class+"-round-"+str(epoch)
fp, pathname, description = imp.find_module(moduleName)
myrules = imp.load_module(moduleName, fp, pathname, description) #rules0
models.append(myrules)
num_of_columns = df.shape[1]
for row, instance in df.iterrows():
features = []
for j in range(0, num_of_columns-1): #iterate on features
features.append(instance[j])
actual = temp_df.loc[row]['Decision']
prediction = myrules.findDecision(features)
predictions.append(prediction)
#----------------------------
if epoch == 0:
worksheet['F_'+str(i)] = 0
else:
worksheet['F_'+str(i)] = pd.Series(predictions).values
boosted_predictions[:,i] = boosted_predictions[:,i] + worksheet['F_'+str(i)].values.astype(np.float32)
#print(boosted_predictions[0:5,:])
worksheet['P_'+str(i)] = 0
#----------------------------
temp_df = df.copy() #restoration
for row, instance in worksheet.iterrows():
f_scores = []
for i in range(0, len(classes)):
f_scores.append(instance['F_'+str(i)])
probabilities = functions.softmax(f_scores)
for j in range(0, len(probabilities)):
instance['P_'+str(j)] = probabilities[j]
worksheet.loc[row] = instance
for i in range(0, len(classes)):
worksheet['Y-P_'+str(i)] = worksheet['Y_'+str(i)] - worksheet['P_'+str(i)]
prediction_set = np.zeros([df.shape[0], len(classes)])
for i in range(0, boosted_predictions.shape[0]):
predicted_index =
|
np.argmax(boosted_predictions[i])
|
numpy.argmax
|
import numpy as np
from statsmodels.genmod.bayes_mixed_glm import (BinomialBayesMixedGLM,
PoissonBayesMixedGLM)
import pandas as pd
from scipy import sparse
from numpy.testing import assert_allclose, assert_equal
from scipy.optimize import approx_fprime
def gen_simple_logit(nc, cs, s):
np.random.seed(3799)
exog_vc = np.kron(np.eye(nc), np.ones((cs, 1)))
exog_fe = np.random.normal(size=(nc * cs, 2))
vc = s * np.random.normal(size=nc)
lp = np.dot(exog_fe, np.r_[1, -1]) + np.dot(exog_vc, vc)
pr = 1 / (1 + np.exp(-lp))
y = 1 * (np.random.uniform(size=nc * cs) < pr)
ident = np.zeros(nc, dtype=np.int)
return y, exog_fe, exog_vc, ident
def gen_simple_poisson(nc, cs, s):
np.random.seed(3799)
exog_vc = np.kron(np.eye(nc), np.ones((cs, 1)))
exog_fe = np.random.normal(size=(nc * cs, 2))
vc = s * np.random.normal(size=nc)
lp = np.dot(exog_fe, np.r_[0.1, -0.1]) + np.dot(exog_vc, vc)
r = np.exp(lp)
y = np.random.poisson(r)
ident = np.zeros(nc, dtype=np.int)
return y, exog_fe, exog_vc, ident
def gen_crossed_logit(nc, cs, s1, s2):
np.random.seed(3799)
a = np.kron(np.eye(nc), np.ones((cs, 1)))
b = np.kron(np.ones((cs, 1)), np.eye(nc))
exog_vc = np.concatenate((a, b), axis=1)
exog_fe = np.random.normal(size=(nc * cs, 1))
vc = s1 * np.random.normal(size=2 * nc)
vc[nc:] *= s2 / s1
lp = np.dot(exog_fe, np.r_[-0.5]) + np.dot(exog_vc, vc)
pr = 1 / (1 + np.exp(-lp))
y = 1 * (np.random.uniform(size=nc * cs) < pr)
ident = np.zeros(2 * nc, dtype=np.int)
ident[nc:] = 1
return y, exog_fe, exog_vc, ident
def gen_crossed_poisson(nc, cs, s1, s2):
np.random.seed(3799)
a = np.kron(np.eye(nc), np.ones((cs, 1)))
b = np.kron(np.ones((cs, 1)), np.eye(nc))
exog_vc = np.concatenate((a, b), axis=1)
exog_fe = np.random.normal(size=(nc * cs, 1))
vc = s1 * np.random.normal(size=2 * nc)
vc[nc:] *= s2 / s1
lp = np.dot(exog_fe, np.r_[-0.5]) + np.dot(exog_vc, vc)
r = np.exp(lp)
y = np.random.poisson(r)
ident = np.zeros(2 * nc, dtype=np.int)
ident[nc:] = 1
return y, exog_fe, exog_vc, ident
def gen_crossed_logit_pandas(nc, cs, s1, s2):
np.random.seed(3799)
a = np.kron(np.arange(nc), np.ones(cs))
b = np.kron(np.ones(cs), np.arange(nc))
fe = np.ones(nc * cs)
vc = np.zeros(nc * cs)
for i in np.unique(a):
ii = np.flatnonzero(a == i)
vc[ii] += s1 * np.random.normal()
for i in np.unique(b):
ii = np.flatnonzero(b == i)
vc[ii] += s2 * np.random.normal()
lp = -0.5 * fe + vc
pr = 1 / (1 + np.exp(-lp))
y = 1 * (np.random.uniform(size=nc * cs) < pr)
ident = np.zeros(2 * nc, dtype=np.int)
ident[nc:] = 1
df = pd.DataFrame({"fe": fe, "a": a, "b": b, "y": y})
return df
def test_simple_logit_map():
y, exog_fe, exog_vc, ident = gen_simple_logit(10, 10, 2)
exog_vc = sparse.csr_matrix(exog_vc)
glmm = BinomialBayesMixedGLM(y, exog_fe, exog_vc, ident, vcp_p=0.5)
rslt = glmm.fit_map()
assert_allclose(
glmm.logposterior_grad(rslt.params),
np.zeros_like(rslt.params),
atol=1e-3)
# Test the predict method
for linear in False, True:
for exog in None, exog_fe:
pr1 = rslt.predict(linear=linear, exog=exog)
pr2 = glmm.predict(rslt.params, linear=linear, exog=exog)
assert_allclose(pr1, pr2)
if not linear:
assert_equal(pr1.min() >= 0, True)
assert_equal(pr1.max() <= 1, True)
def test_simple_poisson_map():
y, exog_fe, exog_vc, ident = gen_simple_poisson(10, 10, 0.2)
exog_vc = sparse.csr_matrix(exog_vc)
glmm1 = PoissonBayesMixedGLM(y, exog_fe, exog_vc, ident, vcp_p=0.5)
rslt1 = glmm1.fit_map()
assert_allclose(
glmm1.logposterior_grad(rslt1.params),
np.zeros_like(rslt1.params),
atol=1e-3)
# This should give the same answer as above
glmm2 = PoissonBayesMixedGLM(y, exog_fe, exog_vc, ident, vcp_p=0.5)
rslt2 = glmm2.fit_map()
assert_allclose(rslt1.params, rslt2.params, atol=1e-4)
# Test the predict method
for linear in False, True:
for exog in None, exog_fe:
pr1 = rslt1.predict(linear=linear, exog=exog)
pr2 = rslt2.predict(linear=linear, exog=exog)
pr3 = glmm1.predict(rslt1.params, linear=linear, exog=exog)
pr4 = glmm2.predict(rslt2.params, linear=linear, exog=exog)
assert_allclose(pr1, pr2, rtol=1e-5)
assert_allclose(pr2, pr3, rtol=1e-5)
assert_allclose(pr3, pr4, rtol=1e-5)
if not linear:
assert_equal(pr1.min() >= 0, True)
assert_equal(pr2.min() >= 0, True)
assert_equal(pr3.min() >= 0, True)
# Check dimensions and PSD status of cov_params
for rslt in rslt1, rslt2:
cp = rslt.cov_params()
p = len(rslt.params)
assert_equal(cp.shape, np.r_[p, p])
np.linalg.cholesky(cp)
def test_crossed_logit_map():
y, exog_fe, exog_vc, ident = gen_crossed_logit(10, 10, 1, 2)
exog_vc = sparse.csr_matrix(exog_vc)
glmm = BinomialBayesMixedGLM(y, exog_fe, exog_vc, ident, vcp_p=0.5)
rslt = glmm.fit_map()
assert_allclose(
glmm.logposterior_grad(rslt.params),
np.zeros_like(rslt.params),
atol=1e-4)
# Check dimensions and PSD status of cov_params
cp = rslt.cov_params()
p = len(rslt.params)
assert_equal(cp.shape, np.r_[p, p])
np.linalg.cholesky(cp)
def test_crossed_poisson_map():
y, exog_fe, exog_vc, ident = gen_crossed_poisson(10, 10, 1, 1)
exog_vc = sparse.csr_matrix(exog_vc)
glmm = PoissonBayesMixedGLM(y, exog_fe, exog_vc, ident, vcp_p=0.5)
rslt = glmm.fit_map()
assert_allclose(
glmm.logposterior_grad(rslt.params),
np.zeros_like(rslt.params),
atol=1e-4)
# Check dimensions and PSD status of cov_params
cp = rslt.cov_params()
p = len(rslt.params)
assert_equal(cp.shape, np.r_[p, p])
np.linalg.cholesky(cp)
def test_logit_map_crossed_formula():
data = gen_crossed_logit_pandas(10, 10, 1, 0.5)
fml = "y ~ fe"
fml_vc = {"a": "0 + C(a)", "b": "0 + C(b)"}
glmm = BinomialBayesMixedGLM.from_formula(fml, fml_vc, data, vcp_p=0.5)
rslt = glmm.fit_map()
assert_allclose(
glmm.logposterior_grad(rslt.params),
|
np.zeros_like(rslt.params)
|
numpy.zeros_like
|
import os
import shutil
import numpy as np
import tensorflow as tf
from scipy.stats import gamma
from matplotlib import pyplot as plt
from neural_inverse_cdf_utils import InvertibleNeuralNetworkLayer, train, result_plot
class GammaCDF(object):
def __init__(self, theta_max=15, base_dir=os.getcwd()):
# define theta range
self.theta_min = 0
self.theta_max = theta_max
# log directory
self.log_dir = os.path.join(base_dir, 'InverseCDF', 'Gamma', 'logdir')
if os.path.exists(self.log_dir):
shutil.rmtree(self.log_dir)
# checkpoint directory
self.mdl_dir = os.path.join(base_dir, 'InverseCDF', 'Gamma', 'checkpoint', 'gamma')
if os.path.exists(self.mdl_dir):
shutil.rmtree(self.mdl_dir)
def sample_training_points(self, thetas_per_batch, samples_per_theta):
# sample thetas
thetas = np.random.random(thetas_per_batch) * 2 * self.theta_max - self.theta_max
thetas[thetas < 0] = np.exp(thetas[thetas < 0])
# loop over theta samples
z = []
u = []
theta = []
for i in range(len(thetas)):
# sample z
z.append(np.random.gamma(shape=thetas[i], size=samples_per_theta))
# compute target u
u.append(gamma.cdf(x=z[-1], a=thetas[i]))
# up-sample theta
theta.append(thetas[i] * np.ones(samples_per_theta))
# convert to arrays
z = np.concatenate(z)
u = np.concatenate(u)
theta = np.concatenate(theta)
return z, u, theta
def sample_test_points(self, theta, num_points=100):
# compute target theta quantile
theta = theta * np.ones(num_points)
# compute evaluation points
# z = np.linspace(0, self.z_max, num_points)
z = np.random.gamma(shape=theta, size=num_points)
z = np.sort(z)
# compute target
u = gamma.cdf(z, theta)
return z, u, theta
@staticmethod
def u_clamp(u):
# return clamped value--THIS CLAMP MUST BE INVERTIBLE!
return tf.nn.sigmoid(u)
@staticmethod
def z_clamp(z):
# return clamped value--THIS CLAMP MUST BE INVERTIBLE!
return tf.nn.elu(z) + 1
class NeuralInverseCDF(object):
"""
Neural CDF Forward: F(z; theta) --> u
Neural CDF Reverse: F_inv(u; theta) --> z
"""
def __init__(self, target, fwd_direction='cdf', trainable=True):
# save target object
self.target = target
# check and save learning direction
assert fwd_direction == 'cdf' or fwd_direction == 'inv_cdf'
self.fwd_direction = fwd_direction
# configure dimensions
self.inn_dim = 2
self.inn_layers = 8
# declare the Invertible Neural Network Blocks
self.inn = []
for i in range(self.inn_layers):
self.inn.append(InvertibleNeuralNetworkLayer(self.inn_dim, 'inn{:d}'.format(i+1), trainable=trainable))
# training placeholders
self.z_ph = tf.placeholder(dtype=tf.float32, shape=[None, 1], name='z')
self.u_ph = tf.placeholder(dtype=tf.float32, shape=[None, 1], name='u')
self.theta_ph = tf.placeholder(dtype=tf.float32, shape=[None, 1], name='theta')
# training outputs (None by default--will be overwritten by self.loss(*))
self.u_hat = None
self.z_hat = None
# configure training
self.thetas_per_batch = 100
self.samples_per_theta = 100
self.learning_rate = 5e-4
self.num_epochs = 1000
self.optimizer = tf.train.AdamOptimizer(self.learning_rate)
def _forward_eval(self, x, theta):
# run the forward direction
for i in range(self.inn_layers):
x = self.inn[i].forward_evaluation(x, theta)
return x
def _inverse_eval(self, x, theta):
# run the inverse direction
for i in range(self.inn_layers):
x = self.inn[self.inn_layers - 1 - i].inverse_evaluation(x, theta)
return x
def _load_feed_dict(self, z, u, theta):
# expand dimensions if needed
if len(np.shape(z)) < 2:
z = np.expand_dims(z, axis=-1)
if len(np.shape(u)) < 2:
u = np.expand_dims(u, axis=-1)
if len(np.shape(theta)) < 2:
theta = np.expand_dims(theta, axis=-1)
# initialize feed dictionary
feed_dict = dict()
# load dictionary
feed_dict.update({self.z_ph: z, self.u_ph: u, self.theta_ph: theta})
return feed_dict
def loss(self):
# build the forward model (double input to achieve even dimensions)
if self.fwd_direction == 'cdf':
x_fwd = self._forward_eval(tf.concat((self.z_ph, self.z_ph), axis=1), self.theta_ph)
else:
x_fwd = self._forward_eval(tf.concat((self.u_ph, self.u_ph), axis=1), self.theta_ph)
# apply forward model clamps and build backward model
if self.fwd_direction == 'cdf':
self.u_hat = self.target.u_clamp(x_fwd)
self.z_hat = self._inverse_eval(x_fwd, self.theta_ph)
else:
self.z_hat = self.target.z_clamp(x_fwd)
self.u_hat = self._inverse_eval(x_fwd, self.theta_ph)
# compute both losses
u_loss = tf.reduce_mean(tf.abs(self.u_ph - self.u_hat))
z_loss = tf.reduce_mean(tf.abs(self.z_ph - self.z_hat))
# if cdf direction, loss is w.r.t. u
if self.fwd_direction == 'cdf':
loss = u_loss
# otherwise it is w.r.t. z
else:
loss = z_loss
return loss, u_loss, z_loss
def load_feed_dict_train(self):
# sample training points
z, u, theta = self.target.sample_training_points(self.thetas_per_batch, self.samples_per_theta)
return self._load_feed_dict(z, u, theta)
def sample_operation(self, u, theta):
# expand dimensions if needed
if len(u.get_shape().as_list()) < 2:
u = tf.expand_dims(u, axis=-1)
if len(theta.get_shape().as_list()) < 2:
theta = tf.expand_dims(theta, axis=-1)
# cdf is the forward direction
if self.fwd_direction == 'cdf':
# run the inverse direction (double input to achieve even dimensions)
z = self._inverse_eval(tf.concat((u, u), axis=1), theta)
# inverse cdf is the forward direction
else:
# run the forward direction (double input to achieve even dimensions)
z = self._forward_eval(tf.concat((u, u), axis=1), theta)
# apply the z clamp and take the mean of the two dimensions
return tf.reduce_mean(self.target.z_clamp(z), axis=-1)
def restore(self, sess, var_list=None):
# variable list not provide
if var_list is None:
# restore all checkpoint variables
tf_saver = tf.train.Saver()
tf_saver.restore(sess, self.target.mdl_dir)
# variable list provided
else:
# restore all checkpoint variables
tf_saver = tf.train.Saver(var_list=var_list)
tf_saver.restore(sess, self.target.mdl_dir)
if __name__ == '__main__':
# set random seeds
|
np.random.seed(123)
|
numpy.random.seed
|
import numpy as np
class MCTSNode:
"""
Code for this algorithm inspired by the tutorial given on:
https://ai-boson.github.io/mcts/
(Link also listed as a reference in our written report.)
"""
def __init__(self, state, player_number, origin, parent=None, parent_action=None):
self.state = state
self.want_to_win = origin
self.player_number = player_number
self.parent = parent
self.parent_action = parent_action
self.children = []
self.actions = self.state.get_legal_actions()
self.wins = 0
self.losses = 0
self.num_sims = 0
def expand(self):
action = self.actions.pop()
next_state = self.state.move_MTCS(self.player_number, action)
player = 0
if self.player_number == 1:
player = 2
else:
player = 1
child = MCTSNode(
next_state, player, self.want_to_win, parent=self, parent_action=action
)
self.children.append(child)
return child
def rollout(self):
while not self.state.is_game_over():
possible_moves = self.state.get_legal_actions()
action = self.choose_move(possible_moves)
self.state = self.state.move_MTCS(self.player_number, action)
return self.state.game_result(self.player_number)
def backpropagate(self, result):
self.num_sims += 1.0
if result == 1:
self.wins += 1
elif result == -1:
self.losses += 1
if self.parent:
self.parent.backpropagate(result)
def UCT(self, N, c, good):
UCT = good * (self.wins - self.losses / self.num_sims) + c * np.sqrt(
(2 * np.log(N) / self.num_sims)
)
return UCT
def best_child(self, c=1.4):
good = 1 if self.player_number == self.want_to_win else -1
# multiply weights by -1 if it isn't the player we want to win
weights = [child.UCT(self.num_sims, c, good) for child in self.children]
return self.children[
|
np.argmax(weights)
|
numpy.argmax
|
import numpy as np
from scipy.spatial.distance import correlation as _correlation
np.seterr(divide='ignore', invalid='ignore')
def minkowski(x1, x2, power):
"""Minkowski Distance Metric
Parameters
----------
x1: numpy.ndarray
Vector one
x2: numpy.ndarray
Vector two
power: int
L_{power} norm order
Returns
-------
distance: float
Minkowski distance between `x1` and `x2`
"""
return np.linalg.norm(x1 - x2, power)
def cosine(x1, x2):
"""Cosine Distance Metric
Parameters
----------
x1: numpy.ndarray
Vector one
x2: numpy.ndarray
Vector two
Returns
-------
distance: float
Cosine distance between `x1` and `x2`
"""
return np.dot(x1.T, x2) / (np.linalg.norm(x1, 2) *
|
np.linalg.norm(x2, 2)
|
numpy.linalg.norm
|
import numpy as np
class PyHMC():
def __init__(self, log_prob, grad_log_prob, invmetric_diag=None):
self.log_prob, self.grad_log_prob = log_prob, grad_log_prob
self.V = lambda x : self.log_prob(x)*-1.
#self.V_g = lambda x : self.grad_log_prob(x)*-1.
self.leapcount, self.Vgcount, self.Hcount = 0, 0, 0
if invmetric_diag is None: self.invmetric_diag = 1.
else: self.invmetric_diag = invmetric_diag
self.metricstd = self.invmetric_diag**-0.5
self.KE = lambda p: 0.5*(p**2 * self.invmetric_diag).sum()
self.KE_g = lambda p: p * self.invmetric_diag
def V_g(self, x):
self.Vgcount += 1
return self.grad_log_prob(x)*-1.
def unit_norm_KE(self, p):
return 0.5 * (p**2).sum()
def unit_norm_KE_g(self, p):
return p
def H(self, q,p):
self.Hcount += 1
return self.V(q) + self.KE(p)
def leapfrog(self, q, p, N, step_size):
self.leapcount += 1
q0, p0 = q, p
try:
p = p - 0.5*step_size * self.V_g(q)
for i in range(N-1):
q = q + step_size * self.KE_g(p)
p = p - step_size * self.V_g(q)
q = q + step_size * self.KE_g(p)
p = p - 0.5*step_size * self.V_g(q)
return q, p
except Exception as e:
print(e)
return q0, p0
def metropolis(self, qp0, qp1):
q0, p0 = qp0
q1, p1 = qp1
H0 = self.H(q0, p0)
H1 = self.H(q1, p1)
prob = np.exp(H0 - H1)
#prob = min(1., np.exp(H0 - H1))
if np.isnan(prob) or np.isinf(prob) or (q0-q1).sum()==0:
return q0, p0, 2., [H0, H1]
elif np.random.uniform(0., 1., size=1) > min(1., prob):
return q0, p0, 0., [H0, H1]
else: return q1, p1, 1., [H0, H1]
def hmc_step(self, q, N, step_size):
self.leapcount, self.Vgcount, self.Hcount = 0, 0, 0
p = np.random.normal(size=q.size).reshape(q.shape) * self.metricstd
q1, p1 = self.leapfrog(q, p, N, step_size)
q, p, accepted, prob = self.metropolis([q, p], [q1, p1])
return q, p, accepted, prob, [self.Hcount, self.Vgcount, self.leapcount]
##
class PyHMC_2step():
def __init__(self, log_prob, grad_log_prob, KE=None, KE_g=None):
self.log_prob, self.grad_log_prob = log_prob, grad_log_prob
self.V = lambda x : self.log_prob(x)*-1.
self.V_g = lambda x : self.grad_log_prob(x)*-1.
if KE is None or KE_g is None:
self.KE = self.unit_norm_KE
self.KE_g = self.unit_norm_KE_g
else: self.KE, self.KE_g = KE, KE_g
def unit_norm_KE(self, p):
return 0.5 * (p**2).sum()
def unit_norm_KE_g(self, p):
return p
def H(self, q,p):
return self.V(q) + self.KE(p)
def leapfrog(self, q, p, N, step_size):
q0, p0 = q, p
try:
p = p - 0.5*step_size * self.V_g(q)
for i in range(N-1):
q = q + step_size * self.KE_g(p)
p = p - step_size * self.V_g(q)
q = q + step_size * self.KE_g(p)
p = p - 0.5*step_size * self.V_g(q)
return q, p
except Exception as e:
print(e)
return q0, p0
def hmc_step(self, q, N, step_size, two_factor):
p = np.random.normal(size=q.size).reshape(q.shape)
q1, p1 = self.leapfrog(q, p, N, step_size)
accepted = False
H0 = self.H(q, p)
H1 = self.H(q1, p1)
prob1 = np.exp(H0 - H1)
if np.isnan(prob1) or np.isinf(prob1) or (q-q1).sum()==0:
prob1 = 0. ##since prob1 = 1. if q == q1
accepted = False
elif np.random.uniform(0., 1., size=1) > min(1., prob1):
accepted = False
else:
accepted = True
#
if accepted:
return q1, p1, 1., [H0, H1, H0, H1]
else:
N2 = int(N*two_factor)
s2 = step_size/two_factor
q2, p2 = self.leapfrog(q, p, N2, s2)
H2 = self.H(q2, p2)
prob2 = np.exp(H0 - H2)
if np.isnan(prob2) or np.isinf(prob2) or (q-q2).sum()==0:
return q, p, -1., [H0, H1, H2, H1]
accepted = False
else:
q21, p21 = self.leapfrog(q2, -p2, N, step_size)
H21 = self.H(q21, p21)
prob21 = np.exp(H2 - H21)
#if np.isnan(prob1): prob1 = 0.
#if np.isnan(prob21) or np.isinf(prob21):
if prob1 == 1:
import sys
print("prob1 should not be 1")
#sys.exit()
prob = prob2 * (1.-prob21)/(1.-prob1)
if np.isnan(prob) :
return q, p, -1, [H0, H1, H2, H21]
elif np.random.uniform(size=1) > min(1., prob):
return q, p, 0., [H0, H1, H2, H21]
else:
return q2, p2, 2., [H0, H1, H2, H21]
class PyHMC_multistep_tries():
def __init__(self, log_prob, grad_log_prob, invmetric_diag=None):
self.log_prob, self.grad_log_prob = log_prob, grad_log_prob
self.V = lambda x : self.log_prob(x)*-1.
#self.V_g = lambda x : self.grad_log_prob(x)*-1.
self.leapcount, self.Vgcount, self.Hcount = 0, 0, 0
if invmetric_diag is None: self.invmetric_diag = 1.
else: self.invmetric_diag = invmetric_diag
self.metricstd = self.invmetric_diag**-0.5
self.KE = lambda p: 0.5*(p**2 * self.invmetric_diag).sum()
self.KE_g = lambda p: p * self.invmetric_diag
def V_g(self, x):
self.Vgcount += 1
return self.grad_log_prob(x)*-1.
def unit_norm_KE(self, p):
return 0.5 * (p**2).sum()
def unit_norm_KE_g(self, p):
return p
def H(self, q, p):
self.Hcount +=1
return self.V(q) + self.KE(p)
def leapfrog(self, q, p, N, step_size):
self.leapcount += 1
q0, p0 = q, p
try:
p = p - 0.5*step_size * self.V_g(q)
for i in range(N-1):
q = q + step_size * self.KE_g(p)
p = p - step_size * self.V_g(q)
q = q + step_size * self.KE_g(p)
p = p - 0.5*step_size * self.V_g(q)
return q, p
except Exception as e:
#print(e)
return q0, p0
def get_num(self, m, q, p, N, ss, fsub):
avec = np.zeros(m)
ptries = np.ones(m)
H0 = self.H(q, p)
for j in range(m):
fac = fsub**(j)
qj, pj = self.leapfrog(q, p, int(N*fac), ss/fac)
Hj = self.H(qj, pj)
pfac = np.exp(H0 - Hj)
if np.isnan(pfac) or np.isinf(pfac): ptries[j] = 1
else: ptries[j] = 1 - min(1. , pfac)
if (q - qj).sum()==0:
pfac = 0.
if j:
den = np.prod(1-avec[:j]) * np.prod(ptries[:j])
num = self.get_num(j, qj, -pj, N, ss, fsub)
prob = pfac*num/den
else:
prob = pfac
if np.isnan(prob) or np.isinf(prob):
return 0. #np.nan
else: avec[j] = min(1., prob)
if np.prod(1-avec): pass
else:
return np.prod(1-avec)
return np.prod(1-avec) * np.prod(ptries)
def multi_step(self, m, q0, N, ss, fsub):
self.leapcount, self.Vgcount, self.Hcount = 0, 0, 0
p0 = np.random.normal(size=q0.size).reshape(q0.shape) * self.metricstd
avec = np.zeros(m)
ptries = np.ones(m)
H0 = self.H(q0, p0)
q1, p1 = self.leapfrog(q0, p0, N, ss)
H1 = self.H(q1, p1)
pfac = np.exp(H0 - H1)
prob = pfac
if np.isnan(prob) or np.isinf(prob) or (q0 - q1).sum()==0:
prob = 0.
prob = min(1., prob)
avec[0] = prob
if np.isnan(pfac) or np.isinf(pfac): ptries[0] = 1
else: ptries[0] = 1 - min(1. , pfac)
acc = np.random.uniform()
if acc <= avec[0]:
return q1, p1, 0, avec, [self.Hcount, self.Vgcount, self.leapcount], ptries
else:
for j in range(1, m):
if
|
np.random.uniform()
|
numpy.random.uniform
|
from functools import partial
from itertools import product
import numpy as np
import pandas as pd
import pytest
from numpy.testing import assert_array_almost_equal as aaae
from estimagic.differentiation.derivatives import first_derivative
from estimagic.parameters.parameter_conversion import get_derivative_conversion_function
from estimagic.parameters.parameter_conversion import get_reparametrize_functions
from estimagic.parameters.process_constraints import process_constraints
from estimagic.parameters.reparametrize import _multiply_from_left
from estimagic.parameters.reparametrize import _multiply_from_right
from estimagic.parameters.reparametrize import post_replace
from estimagic.parameters.reparametrize import post_replace_jacobian
from estimagic.parameters.reparametrize import pre_replace
from estimagic.parameters.reparametrize import pre_replace_jacobian
from estimagic.parameters.reparametrize import reparametrize_from_internal
from estimagic.parameters.reparametrize import reparametrize_to_internal
to_test = list(
product(
[
"basic_probability",
"uncorrelated_covariance",
"basic_covariance",
"basic_fixed",
"basic_increasing",
"basic_equality",
"query_equality",
"basic_sdcorr",
"normalized_covariance",
],
[0, 1, 2],
)
)
def reduce_params(params, constraints):
all_locs = []
for constr in constraints:
if "query" in constr:
all_locs = ["i", "j1", "j2"]
elif isinstance(constr["loc"], tuple):
all_locs.append(constr["loc"][0])
elif isinstance(constr["loc"], list):
all_locs.append(constr["loc"][0][0])
else:
all_locs.append(constr["loc"])
all_locs = sorted(set(all_locs))
return params.loc[all_locs].copy()
@pytest.mark.parametrize("case, number", to_test)
def test_reparametrize_to_internal(example_params, all_constraints, case, number):
constraints = all_constraints[case]
params = reduce_params(example_params, constraints)
params["value"] = params[f"value{number}"]
keep = params[f"internal_value{number}"].notnull()
expected_internal_values = params[f"internal_value{number}"][keep]
expected_internal_lower = params["internal_lower"]
expected_internal_upper = params["internal_upper"]
to_internal, _ = get_reparametrize_functions(
params=params,
constraints=constraints,
scaling_factor=None,
scaling_offset=None,
)
_, pp = process_constraints(constraints, params)
calculated_internal_values_np = to_internal(pp["value"].to_numpy())
calculated_internal_values_pd = to_internal(pp)
calculated_internal_lower = pp["_internal_lower"]
calculated_internal_upper = pp["_internal_upper"]
aaae(calculated_internal_values_np, calculated_internal_values_pd)
aaae(calculated_internal_values_np, expected_internal_values)
aaae(calculated_internal_lower, expected_internal_lower)
aaae(calculated_internal_upper, expected_internal_upper)
@pytest.mark.parametrize("case, number", to_test)
def test_reparametrize_from_internal(example_params, all_constraints, case, number):
constraints = all_constraints[case]
params = reduce_params(example_params, constraints)
params["value"] = params[f"value{number}"]
keep = params[f"internal_value{number}"].notnull()
_, from_internal = get_reparametrize_functions(
params=params,
constraints=constraints,
scaling_factor=None,
scaling_offset=None,
)
internal_p = params[f"internal_value{number}"][keep].to_numpy()
calculated_external_value = from_internal(internal_p)
expected_external_value = params["value"].to_numpy()
aaae(calculated_external_value, expected_external_value)
def test_scaling_cancels_itself():
params = pd.DataFrame()
params["value"] = np.arange(10) + 10
params["lower_bound"] = np.arange(10)
params["upper_bound"] = 25
to_internal, from_internal = get_reparametrize_functions(
params=params,
constraints=[],
scaling_factor=np.arange(10) + 1,
scaling_offset=np.ones(10),
)
internal = to_internal(params["value"].to_numpy())
external = from_internal(internal)
aaae(external, params["value"].to_numpy())
@pytest.mark.parametrize("case, number", to_test)
def test_reparametrize_from_internal_jacobian(
example_params, all_constraints, case, number
):
constraints = all_constraints[case]
params = reduce_params(example_params, constraints)
params["value"] = params[f"value{number}"]
_, pp = process_constraints(constraints, params)
n_free = int(pp._internal_free.sum())
scaling_factor = np.ones(n_free) * 2 # np.arange(n_free) + 1
scaling_offset = np.arange(n_free) - 1
params_to_internal, params_from_internal = get_reparametrize_functions(
params=params,
constraints=constraints,
scaling_factor=scaling_factor,
scaling_offset=scaling_offset,
)
internal_p = params_to_internal(params["value"].to_numpy())
numerical_jacobian = first_derivative(params_from_internal, internal_p)
derivative_to_internal = get_derivative_conversion_function(
params=params,
constraints=constraints,
scaling_factor=scaling_factor,
scaling_offset=scaling_offset,
)
# calling convert_external_derivative with identity matrix as external derivative
# is just a trick to get out the jacobian of reparametrize_from_internal
jacobian = derivative_to_internal(
external_derivative=np.eye(len(params)),
internal_values=internal_p,
)
aaae(jacobian, numerical_jacobian["derivative"])
@pytest.mark.parametrize("case, number", to_test)
def test_pre_replace_jacobian(example_params, all_constraints, case, number):
constraints = all_constraints[case]
params = reduce_params(example_params, constraints)
params["value"] = params[f"value{number}"]
keep = params[f"internal_value{number}"].notnull()
pc, pp = process_constraints(constraints, params)
internal_p = params[f"internal_value{number}"][keep].to_numpy()
fixed_val = pp["_internal_fixed_value"].to_numpy()
pre_repl = pp["_pre_replacements"].to_numpy()
func = partial(
pre_replace, **{"fixed_values": fixed_val, "pre_replacements": pre_repl}
)
numerical_deriv = first_derivative(func, internal_p)["derivative"]
numerical_deriv[np.isnan(numerical_deriv)] = 0
deriv = pre_replace_jacobian(pre_repl, len(internal_p))
aaae(deriv, numerical_deriv)
@pytest.mark.parametrize("case, number", to_test)
def test_post_replace_jacobian(example_params, all_constraints, case, number):
constraints = all_constraints[case]
params = reduce_params(example_params, constraints)
params["value"] = params[f"value{number}"]
keep = params[f"internal_value{number}"].notnull()
pc, pp = process_constraints(constraints, params)
internal_p = params[f"internal_value{number}"][keep].to_numpy()
fixed_val = pp["_internal_fixed_value"].to_numpy()
pre_repl = pp["_pre_replacements"].to_numpy()
post_repl = pp["_post_replacements"].to_numpy()
external = pre_replace(internal_p, fixed_val, pre_repl)
external[np.isnan(external)] = 0 # if not set to zero the numerical differentiation
# fails due to potential np.nan.
func = partial(post_replace, **{"post_replacements": post_repl})
numerical_deriv = first_derivative(func, external)
deriv = post_replace_jacobian(post_repl)
|
aaae(deriv, numerical_deriv["derivative"])
|
numpy.testing.assert_array_almost_equal
|
# -*- coding: utf-8 -*-
"""
Created on Mon May 18 03:59:43 2020
All the functions are directly taken from https://github.com/sebp/scikit-survival
"""
import numbers
import warnings
import numpy
from scipy.linalg import solve
from sklearn.base import BaseEstimator
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils.validation import check_array, check_is_fitted
from sklearn.utils import check_consistent_length
from utilities import check_arrays_survival, prepare_data3
class StepFunction(object):
"""Callable step function.
.. math::
f(z) = a * y_i + b,
x_i \\leq z < x_{i + 1}
Parameters
----------
x : ndarray, shape = [n_points,]
Values on the x axis in ascending order.
y : ndarray, shape = [n_points,]
Corresponding values on the y axis.
a : float, optional
Constant to multiply
"""
def __init__(self, x, y, a=1., b=0.):
check_consistent_length(x, y)
self.x = x
self.y = y
self.a = a
self.b = b
def __call__(self, x):
if not numpy.isfinite(x):
raise ValueError("x must be finite")
if x < self.x[0] or x > self.x[-1]:
raise ValueError(
"x must be within [%f; %f], but was %f" % (self.x[0], self.x[-1], x))
i = numpy.searchsorted(self.x, x, side='left')
if self.x[i] != x:
i -= 1
return self.a * self.y[i] + self.b
def __repr__(self):
return "StepFunction(x=%r, y=%r)" % (self.x, self.y)
def _compute_counts(event, time, order=None):
"""Count right censored and uncensored samples at each unique time point.
Parameters
----------
event : array
Boolean event indicator.
time : array
Survival time or time of censoring.
order : array or None
Indices to order time in ascending order.
If None, order will be computed.
Returns
-------
times : array
Unique time points.
n_events : array
Number of events at each time point.
n_at_risk : array
Number of samples that are censored or have an event at each time point.
"""
n_samples = event.shape[0]
if order is None:
order = numpy.argsort(time, kind="mergesort")
uniq_times = numpy.empty(n_samples, dtype=time.dtype)
uniq_events = numpy.empty(n_samples, dtype=numpy.int_)
uniq_counts = numpy.empty(n_samples, dtype=numpy.int_)
i = 0
prev_val = time[order[0]]
j = 0
while True:
count_event = 0
count = 0
while i < n_samples and prev_val == time[order[i]]:
if event[order[i]]:
count_event += 1
count += 1
i += 1
uniq_times[j] = prev_val
uniq_events[j] = count_event
uniq_counts[j] = count
j += 1
if i == n_samples:
break
prev_val = time[order[i]]
times =
|
numpy.resize(uniq_times, j)
|
numpy.resize
|
import matplotlib
# from astropy.timeseries import LombScargle
import matplotlib.gridspec as gridspec
import importlib
from hydroDL import kPath, utils
from hydroDL.app import waterQuality
from hydroDL.master import basins
from hydroDL.data import usgs, gageII, gridMET, ntn
from hydroDL.master import slurm
from hydroDL.post import axplot, figplot
import numpy as np
import matplotlib.pyplot as plt
import os
import pandas as pd
import json
import scipy
dirSel = os.path.join(kPath.dirData, 'USGS', 'inventory', 'siteSel')
with open(os.path.join(dirSel, 'dictRB_Y30N5.json')) as f:
dictSite = json.load(f)
code = '00300'
siteNoLst = dictSite[code]
nSite = len(siteNoLst)
dataName = 'rbWN5'
# load all sequence
dictLSTMLst = list()
# LSTM
label = 'QTFP_C'
dictLSTM = dict()
trainSet = '{}-B10'.format('comb')
outName = '{}-{}-{}-{}'.format(dataName, 'comb', label, trainSet)
for k, siteNo in enumerate(siteNoLst):
print('\t site {}/{}'.format(k, len(siteNoLst)), end='\r')
df = basins.loadSeq(outName, siteNo)
dictLSTM[siteNo] = df
# WRTDS
dictWRTDS = dict()
dirWRTDS = os.path.join(kPath.dirWQ, 'modelStat', 'Linear-W', 'B10Q', 'output')
for k, siteNo in enumerate(siteNoLst):
print('\t site {}/{}'.format(k, len(siteNoLst)), end='\r')
saveFile = os.path.join(dirWRTDS, siteNo)
df = pd.read_csv(saveFile, index_col=None).set_index('date')
# df = utils.time.datePdf(df)
dictWRTDS[siteNo] = df
# Observation
dictObs = dict()
for k, siteNo in enumerate(siteNoLst):
print('\t site {}/{}'.format(k, len(siteNoLst)), end='\r')
df = waterQuality.readSiteTS(siteNo, varLst=[code], freq='W')
dictObs[siteNo] = df
# code = '00010'
# # calculate correlation
tt = np.datetime64('2010-01-01')
ind1 = np.where(df.index.values < tt)[0]
ind2 =
|
np.where(df.index.values >= tt)
|
numpy.where
|
from design_bench.disk_resource import DiskResource
import numpy as np
import abc
def default_uniform_distribution(ranks):
"""Accepts the rank of a set of designs as input and returns an
un-normalized uniform probability distribution
Arguments:
ranks: np.ndarray
a numpy array representing the rank order of a set of designs given
by their y values in a model-based optimization dataset
Returns:
probabilities: np.ndarray
an un-normalized probability distribution that is passed to
np.random.choice to subsample a model-based optimization dataset
"""
return np.ones(ranks.shape, dtype=np.float32)
def default_linear_distribution(ranks):
"""Accepts the rank of a set of designs as input and returns an
un-normalized linear probability distribution
Arguments:
ranks: np.ndarray
a numpy array representing the rank order of a set of designs given
by their y values in a model-based optimization dataset
Returns:
probabilities: np.ndarray
an un-normalized probability distribution that is passed to
np.random.choice to subsample a model-based optimization dataset
"""
ranks = ranks.astype(np.float32)
ranks = ranks / ranks.max()
return 1.0 - ranks
def default_quadratic_distribution(ranks):
"""Accepts the rank of a set of designs as input and returns an
un-normalized quadratic probability distribution
Arguments:
ranks: np.ndarray
a numpy array representing the rank order of a set of designs given
by their y values in a model-based optimization dataset
Returns:
probabilities: np.ndarray
an un-normalized probability distribution that is passed to
np.random.choice to subsample a model-based optimization dataset
"""
ranks = ranks.astype(np.float32)
ranks = ranks / ranks.max()
return (1.0 - ranks)**2
def default_circular_distribution(ranks):
"""Accepts the rank of a set of designs as input and returns an
un-normalized circular probability distribution
Arguments:
ranks: np.ndarray
a numpy array representing the rank order of a set of designs given
by their y values in a model-based optimization dataset
Returns:
probabilities: np.ndarray
an un-normalized probability distribution that is passed to
np.random.choice to subsample a model-based optimization dataset
"""
ranks = ranks.astype(np.float32)
ranks = ranks / ranks.max()
return 1.0 - np.sqrt(1.0 - (ranks - 1.0)**2)
def default_exponential_distribution(ranks, c=3.0):
"""Accepts the rank of a set of designs as input and returns an
un-normalized exponential probability distribution
Arguments:
ranks: np.ndarray
a numpy array representing the rank order of a set of designs given
by their y values in a model-based optimization dataset
Returns:
probabilities: np.ndarray
an un-normalized probability distribution that is passed to
np.random.choice to subsample a model-based optimization dataset
"""
ranks = ranks.astype(np.float32)
ranks = ranks / ranks.max()
return np.exp(-c * ranks)
class DatasetBuilder(abc.ABC):
"""An abstract base class that defines a common set of functions
and attributes for a model-based optimization dataset, where the
goal is to find a design 'x' that maximizes a prediction 'y':
max_x { y = f(x) }
Public Attributes:
name: str
An attribute that specifies the name of a model-based optimization
dataset, which might be used when labelling plots in a diagram of
performance in a research paper using design-bench
x_name: str
An attribute that specifies the name of designs in a model-based
optimization dataset, which might be used when labelling plots
in a visualization of performance in a research paper
y_name: str
An attribute that specifies the name of predictions in a model-based
optimization dataset, which might be used when labelling plots
in a visualization of performance in a research paper
x: np.ndarray
the design values 'x' for a model-based optimization problem
represented as a numpy array of arbitrary type
input_shape: Tuple[int]
the shape of a single design values 'x', represented as a list of
integers similar to calling np.ndarray.shape
input_size: int
the total number of components in the design values 'x', represented
as a single integer, the product of its shape entries
input_dtype: np.dtype
the data type of the design values 'x', which is typically either
floating point or integer (np.float32 or np.int32)
y: np.ndarray
the prediction values 'y' for a model-based optimization problem
represented by a scalar floating point value per 'x'
output_shape: Tuple[int]
the shape of a single prediction value 'y', represented as a list of
integers similar to calling np.ndarray.shape
output_size: int
the total number of components in the prediction values 'y',
represented as a single integer, the product of its shape entries
output_dtype: np.dtype
the data type of the prediction values 'y', which is typically a
type of floating point (np.float32 or np.float16)
dataset_size: int
the total number of paired design values 'x' and prediction values
'y' in the dataset, represented as a single integer
dataset_distribution: Callable[np.ndarray, np.ndarray]
the target distribution of the model-based optimization dataset
marginal p(y) used for controlling the sampling distribution
dataset_max_percentile: float
the percentile between 0 and 100 of prediction values 'y' above
which are hidden from access by members outside the class
dataset_min_percentile: float
the percentile between 0 and 100 of prediction values 'y' below
which are hidden from access by members outside the class
dataset_max_output: float
the specific cutoff threshold for prediction values 'y' above
which are hidden from access by members outside the class
dataset_min_output: float
the specific cutoff threshold for prediction values 'y' below
which are hidden from access by members outside the class
internal_batch_size: int
the integer number of samples per batch that is used internally
when processing the dataset and generating samples
freeze_statistics: bool
a boolean indicator that when set to true prevents methods from
changing the normalization and sub sampling statistics
is_normalized_x: bool
a boolean indicator that specifies whether the design values
in the dataset are being normalized
x_mean: np.ndarray
a numpy array that is automatically calculated to be the mean
of visible design values in the dataset
x_standard_dev: np.ndarray
a numpy array that is automatically calculated to be the standard
deviation of visible design values in the dataset
is_normalized_y: bool
a boolean indicator that specifies whether the prediction values
in the dataset are being normalized
y_mean: np.ndarray
a numpy array that is automatically calculated to be the mean
of visible prediction values in the dataset
y_standard_dev: np.ndarray
a numpy array that is automatically calculated to be the standard
deviation of visible prediction values in the dataset
Public Methods:
iterate_batches(batch_size: int, return_x: bool,
return_y: bool, drop_remainder: bool)
-> Iterable[Tuple[np.ndarray, np.ndarray]]:
Returns an object that supports iterations, which yields tuples of
design values 'x' and prediction values 'y' from a model-based
optimization data set for training a model
iterate_samples(return_x: bool, return_y: bool):
-> Iterable[Tuple[np.ndarray, np.ndarray]]:
Returns an object that supports iterations, which yields tuples of
design values 'x' and prediction values 'y' from a model-based
optimization data set for training a model
subsample(max_samples: int,
distribution: Callable[np.ndarray, np.ndarray],
max_percentile: float,
min_percentile: float):
a function that exposes a subsampled version of a much larger
model-based optimization dataset containing design values 'x'
whose prediction values 'y' are skewed
relabel(relabel_function:
Callable[[np.ndarray, np.ndarray], np.ndarray]):
a function that accepts a function that maps from a dataset of
design values 'x' and prediction values y to a new set of
prediction values 'y' and relabels the model-based optimization dataset
clone(subset: set, shard_size: int,
to_disk: bool, disk_target: str, is_absolute: bool):
Generate a cloned copy of a model-based optimization dataset
using the provided name and shard generation settings; useful
when relabelling a dataset buffer from the dis
split(fraction: float, subset: set, shard_size: int,
to_disk: bool, disk_target: str, is_absolute: bool):
split a model-based optimization data set into a training set and
a validation set allocating 'fraction' of the data set to the
validation set and the rest to the training set
normalize_x(new_x: np.ndarray) -> np.ndarray:
a helper function that accepts floating point design values 'x'
as input and standardizes them so that they have zero
empirical mean and unit empirical variance
denormalize_x(new_x: np.ndarray) -> np.ndarray:
a helper function that accepts floating point design values 'x'
as input and undoes standardization so that they have their
original empirical mean and variance
normalize_y(new_x: np.ndarray) -> np.ndarray:
a helper function that accepts floating point prediction values 'y'
as input and standardizes them so that they have zero
empirical mean and unit empirical variance
denormalize_y(new_x: np.ndarray) -> np.ndarray:
a helper function that accepts floating point prediction values 'y'
as input and undoes standardization so that they have their
original empirical mean and variance
map_normalize_x():
a destructive function that standardizes the design values 'x'
in the class dataset in-place so that they have zero empirical
mean and unit variance
map_denormalize_x():
a destructive function that undoes standardization of the
design values 'x' in the class dataset in-place which are expected
to have zero empirical mean and unit variance
map_normalize_y():
a destructive function that standardizes the prediction values 'y'
in the class dataset in-place so that they have zero empirical
mean and unit variance
map_denormalize_y():
a destructive function that undoes standardization of the
prediction values 'y' in the class dataset in-place which are
expected to have zero empirical mean and unit variance
"""
@property
@abc.abstractmethod
def name(self):
"""Attribute that specifies the name of a model-based optimization
dataset, which might be used when labelling plots in a diagram of
performance in a research paper using design-bench
"""
raise NotImplementedError
@property
@abc.abstractmethod
def x_name(self):
"""Attribute that specifies the name of designs in a model-based
optimization dataset, which might be used when labelling plots
in a visualization of performance in a research paper
"""
raise NotImplementedError
@property
@abc.abstractmethod
def y_name(self):
"""Attribute that specifies the name of predictions in a model-based
optimization dataset, which might be used when labelling plots
in a visualization of performance in a research paper
"""
raise NotImplementedError
@property
@abc.abstractmethod
def subclass_kwargs(self):
"""Generate a dictionary containing class initialization keyword
arguments that are specific to sub classes; for example, may contain
the number of classes in a discrete dataset
"""
raise NotImplementedError
@property
@abc.abstractmethod
def subclass(self):
"""Specifies the primary subclass of an instance of DatasetBuilder
that can be instantiated on its own using self.rebuild_dataset
and typically either DiscreteDataset or ContinuousDataset
"""
raise NotImplementedError
def __init__(self, x_shards, y_shards, internal_batch_size=32,
is_normalized_x=False, is_normalized_y=False,
max_samples=None, distribution=None,
max_percentile=100.0, min_percentile=0.0):
"""Initialize a model-based optimization dataset and prepare
that dataset by loading that dataset from disk and modifying
its distribution of designs and predictions
Arguments:
x_shards: Union[ np.ndarray, RemoteResource,
Iterable[np.ndarray], Iterable[RemoteResource]]
a single shard or a list of shards representing the design values
in a model-based optimization dataset; shards are loaded lazily
if RemoteResource otherwise loaded in memory immediately
y_shards: Union[ np.ndarray, RemoteResource,
Iterable[np.ndarray], Iterable[RemoteResource]]
a single shard or a list of shards representing prediction values
in a model-based optimization dataset; shards are loaded lazily
if RemoteResource otherwise loaded in memory immediately
internal_batch_size: int
the number of samples per batch to use when computing
normalization statistics of the data set and while relabeling
the prediction values of the data set
is_normalized_x: bool
a boolean indicator that specifies whether the designs
in the dataset are being normalized
is_normalized_y: bool
a boolean indicator that specifies whether the predictions
in the dataset are being normalized
max_samples: int
the maximum number of samples to include in the visible dataset;
if more than this number of samples would be present, samples
are randomly removed from the visible dataset
distribution: Callable[np.ndarray, np.ndarray]
a function that accepts an array of the ranks of designs as
input and returns the probability to sample each according to
a distribution---for example, a geometric distribution
max_percentile: float
the percentile between 0 and 100 of prediction values 'y' above
which are hidden from access by members outside the class
min_percentile: float
the percentile between 0 and 100 of prediction values 'y' below
which are hidden from access by members outside the class
"""
# save the provided dataset shards to be loaded into batches
self.x_shards = (x_shards,) if \
isinstance(x_shards, np.ndarray) or \
isinstance(x_shards, DiskResource) else x_shards
self.y_shards = (y_shards,) if \
isinstance(y_shards, np.ndarray) or \
isinstance(y_shards, DiskResource) else y_shards
# download the remote resources if they are given
self.num_shards = 0
for x_shard, y_shard in zip(self.x_shards, self.y_shards):
self.num_shards += 1
if isinstance(x_shard, DiskResource) \
and not x_shard.is_downloaded:
x_shard.download()
if isinstance(y_shard, DiskResource) \
and not y_shard.is_downloaded:
y_shard.download()
# update variables that describe the data set
self.dataset_min_percentile = 0.0
self.dataset_max_percentile = 100.0
self.dataset_min_output = np.NINF
self.dataset_max_output = np.PINF
self.dataset_distribution = None
# initialize the normalization state to False
self.internal_batch_size = internal_batch_size
self.is_normalized_x = False
self.is_normalized_y = False
# special flag that control when the dataset is mutable
self.freeze_statistics = False
self._disable_transform = False
self._disable_subsample = False
# initialize statistics for data set normalization
self.x_mean = None
self.y_mean = None
self.x_standard_dev = None
self.y_standard_dev = None
# assign variables that describe the design values 'x'
self._disable_transform = True
self._disable_subsample = True
for x in self.iterate_samples(return_y=False):
self.input_shape = x.shape
self.input_size = int(np.prod(x.shape))
self.input_dtype = x.dtype
break # only sample a single design from the data set
# assign variables that describe the prediction values 'y'
self.output_shape = [1]
self.output_size = 1
self.output_dtype = np.float32
# check the output format and count the number of samples
self.dataset_size = 0
for i, y in enumerate(self.iterate_samples(return_x=False)):
self.dataset_size += 1 # assume the data set is large
if i == 0 and len(y.shape) != 1 or y.shape[0] != 1:
raise ValueError(f"predictions must have shape [N, 1]")
# initialize a default set of visible designs
self._disable_transform = False
self._disable_subsample = False
self.dataset_visible_mask = np.full(
[self.dataset_size], True, dtype=np.bool)
# handle requests to normalize and subsample the dataset
if is_normalized_x:
self.map_normalize_x()
if is_normalized_y:
self.map_normalize_y()
self.subsample(max_samples=max_samples,
distribution=distribution,
min_percentile=min_percentile,
max_percentile=max_percentile)
def get_num_shards(self):
"""A helper function that returns the number of shards in a
model-based optimization data set, which is useful when the data set
is too large to be loaded inot memory all at once
Returns:
num_shards: int
an integer representing the number of shards in a model-based
optimization data set that can be loaded
"""
return self.num_shards
def get_shard_x(self, shard_id):
"""A helper function used for retrieving the data associated with a
particular shard specified by shard_id containing design values
in a model-based optimization data set
Arguments:
shard_id: int
an integer representing the particular identifier of the shard
to be loaded from a model-based optimization data set
Returns:
shard_data: np.ndarray
a numpy array that represents the data encoded in the shard
specified by the integer identifier shard_id
"""
# check the shard id is in bounds
if 0 < shard_id >= self.get_num_shards():
raise ValueError(f"shard id={shard_id} out of bounds")
# if that shard entry is a numpy array
if isinstance(self.x_shards[shard_id], np.ndarray):
return self.x_shards[shard_id]
# if that shard entry is stored on the disk
elif isinstance(self.x_shards[shard_id], DiskResource):
return np.load(self.x_shards[shard_id].disk_target)
def get_shard_y(self, shard_id):
"""A helper function used for retrieving the data associated with a
particular shard specified by shard_id containing prediction values
in a model-based optimization data set
Arguments:
shard_id: int
an integer representing the particular identifier of the shard
to be loaded from a model-based optimization data set
Returns:
shard_data: np.ndarray
a numpy array that represents the data encoded in the shard
specified by the integer identifier shard_id
"""
# check the shard id is in bounds
if 0 < shard_id >= self.get_num_shards():
raise ValueError(f"shard id={shard_id} out of bounds")
# if that shard entry is a numpy array
if isinstance(self.y_shards[shard_id], np.ndarray):
return self.y_shards[shard_id]
# if that shard entry is stored on the disk
elif isinstance(self.y_shards[shard_id], DiskResource):
return np.load(self.y_shards[shard_id].disk_target)
def set_shard_x(self, shard_id, shard_data,
to_disk=None, disk_target=None, is_absolute=None):
"""A helper function used for assigning the data associated with a
particular shard specified by shard_id containing design values
in a model-based optimization data set
Arguments:
shard_id: int
an integer representing the particular identifier of the shard
to be loaded from a model-based optimization data set
shard_data: np.ndarray
a numpy array that represents the data to be encoded in the
shard specified by the integer identifier shard_id
to_disk: boolean
a boolean that indicates whether to store the data set
in memory as numpy arrays or to the disk
disk_target: str
a string that determines the name and sub folder of the saved
data set if to_disk is set to be true
is_absolute: boolean
a boolean that indicates whether the disk_target path is taken
relative to the benchmark data folder
"""
# check that all arguments are set when saving to disk
if to_disk is not None and to_disk and \
(disk_target is None or is_absolute is None):
raise ValueError("must specify location when saving to disk")
# check the shard id is in bounds
if 0 < shard_id >= self.get_num_shards():
raise ValueError(f"shard id={shard_id} out of bounds")
# store shard in memory as a numpy array
if (to_disk is not None and not to_disk) or \
(to_disk is None and isinstance(
self.x_shards[shard_id], np.ndarray)):
self.x_shards[shard_id] = shard_data
# write shard to a new resource file given by "disk_target"
if to_disk is not None and to_disk:
disk_target = f"{disk_target}-x-{shard_id}.npy"
self.x_shards[shard_id] = DiskResource(disk_target,
is_absolute=is_absolute)
# possibly write shard to an existing file on disk
if isinstance(self.x_shards[shard_id], DiskResource):
np.save(self.x_shards[shard_id].disk_target, shard_data)
def set_shard_y(self, shard_id, shard_data,
to_disk=None, disk_target=None, is_absolute=None):
"""A helper function used for assigning the data associated with a
particular shard specified by shard_id containing prediction values
in a model-based optimization data set
Arguments:
shard_id: int
an integer representing the particular identifier of the shard
to be loaded from a model-based optimization data set
shard_data: np.ndarray
a numpy array that represents the data to be encoded in the
shard specified by the integer identifier shard_id
to_disk: boolean
a boolean that indicates whether to store the data set
in memory as numpy arrays or to the disk
disk_target: str
a string that determines the name and sub folder of the saved
data set if to_disk is set to be true
is_absolute: boolean
a boolean that indicates whether the disk_target path is taken
relative to the benchmark data folder
"""
# check that all arguments are set when saving to disk
if to_disk is not None and to_disk and \
(disk_target is None or is_absolute is None):
raise ValueError("must specify location when saving to disk")
# check the shard id is in bounds
if 0 < shard_id >= self.get_num_shards():
raise ValueError(f"shard id={shard_id} out of bounds")
# store shard in memory as a numpy array
if (to_disk is not None and not to_disk) or \
(to_disk is None and isinstance(
self.y_shards[shard_id], np.ndarray)):
self.y_shards[shard_id] = shard_data
# write shard to a new resource file given by "disk_target"
if to_disk is not None and to_disk:
disk_target = f"{disk_target}-y-{shard_id}.npy"
self.y_shards[shard_id] = DiskResource(disk_target,
is_absolute=is_absolute)
# possibly write shard to an existing file on disk
if isinstance(self.y_shards[shard_id], DiskResource):
np.save(self.y_shards[shard_id].disk_target, shard_data)
def batch_transform(self, x_batch, y_batch,
return_x=True, return_y=True):
"""Apply a transformation to batches of samples from a model-based
optimization data set, including sub sampling and normalization
and potentially other used defined transformations
Arguments:
x_batch: np.ndarray
a numpy array representing a batch of design values sampled
from a model-based optimization data set
y_batch: np.ndarray
a numpy array representing a batch of prediction values sampled
from a model-based optimization data set
return_x: bool
a boolean indicator that specifies whether the generator yields
design values at every iteration; note that at least one of
return_x and return_y must be set to True
return_y: bool
a boolean indicator that specifies whether the generator yields
prediction values at every iteration; note that at least one
of return_x and return_y must be set to True
Returns:
x_batch: np.ndarray
a numpy array representing a batch of design values sampled
from a model-based optimization data set
y_batch: np.ndarray
a numpy array representing a batch of prediction values sampled
from a model-based optimization data set
"""
# normalize the design values in the batch
if self.is_normalized_x and return_x:
x_batch = self.normalize_x(x_batch)
# normalize the prediction values in the batch
if self.is_normalized_y and return_y:
y_batch = self.normalize_y(y_batch)
# return processed batches of designs an predictions
return (x_batch if return_x else None,
y_batch if return_y else None)
def iterate_batches(self, batch_size, return_x=True,
return_y=True, drop_remainder=False):
"""Returns an object that supports iterations, which yields tuples of
design values 'x' and prediction values 'y' from a model-based
optimization data set for training a model
Arguments:
batch_size: int
a positive integer that specifies the batch size of samples
taken from a model-based optimization data set; batches
with batch_size elements are yielded
return_x: bool
a boolean indicator that specifies whether the generator yields
design values at every iteration; note that at least one of
return_x and return_y must be set to True
return_y: bool
a boolean indicator that specifies whether the generator yields
prediction values at every iteration; note that at least one
of return_x and return_y must be set to True
drop_remainder: bool
a boolean indicator representing whether the last batch
should be dropped in the case it has fewer than batch_size
elements; the default behavior is not to drop the smaller batch.
Returns:
generator: Iterator
a python iterable that yields samples from a model-based
optimization data set and returns once finished
"""
# check whether the generator arguments are valid
if batch_size < 1 or (not return_x and not return_y):
raise ValueError("invalid arguments passed to batch generator")
# track a list of incomplete batches between shards
y_batch_size = 0
x_batch = [] if return_x else None
y_batch = [] if return_y else None
# iterate through every registered shard
sample_id = 0
for shard_id in range(self.get_num_shards()):
x_shard_data = self.get_shard_x(shard_id) if return_x else None
y_shard_data = self.get_shard_y(shard_id)
# loop once per batch contained in the shard
shard_position = 0
while shard_position < y_shard_data.shape[0]:
# how many samples will be attempted to read
target_size = batch_size - y_batch_size
# slice out a component of the current shard
x_sliced = x_shard_data[shard_position:(
shard_position + target_size)] if return_x else None
y_sliced = y_shard_data[shard_position:(
shard_position + target_size)]
# store the batch_size of samples read
samples_read = y_sliced.shape[0]
# take a subset of the sliced arrays using a pre-defined
# transformation that sub-samples
if not self._disable_subsample:
# compute which samples are exposed in the dataset
indices = np.where(self.dataset_visible_mask[
sample_id:sample_id + y_sliced.shape[0]])[0]
# sub sample the design and prediction values
x_sliced = x_sliced[indices] if return_x else None
y_sliced = y_sliced[indices] if return_y else None
# take a subset of the sliced arrays using a pre-defined
# transformation that normalizes
if not self._disable_transform:
# apply a transformation to the dataset
x_sliced, y_sliced = self.batch_transform(
x_sliced, y_sliced,
return_x=return_x, return_y=return_y)
# update the read position in the shard tensor
shard_position += target_size
sample_id += samples_read
# update the current batch to be yielded
y_batch_size += (y_sliced if
return_y else x_sliced).shape[0]
x_batch.append(x_sliced) if return_x else None
y_batch.append(y_sliced) if return_y else None
# yield the current batch when enough samples are loaded
if y_batch_size >= batch_size \
or (shard_position >= y_shard_data.shape[0]
and shard_id + 1 == self.get_num_shards()
and not drop_remainder):
try:
# determine which tensors to yield
if return_x and return_y:
yield np.concatenate(x_batch, axis=0), \
np.concatenate(y_batch, axis=0)
elif return_x:
yield np.concatenate(x_batch, axis=0)
elif return_y:
yield np.concatenate(y_batch, axis=0)
# reset the buffer for incomplete batches
y_batch_size = 0
x_batch = [] if return_x else None
y_batch = [] if return_y else None
except GeneratorExit:
# handle cleanup when break is called
return
def iterate_samples(self, return_x=True, return_y=True):
"""Returns an object that supports iterations, which yields tuples of
design values 'x' and prediction values 'y' from a model-based
optimization data set for training a model
Arguments:
return_x: bool
a boolean indicator that specifies whether the generator yields
design values at every iteration; note that at least one of
return_x and return_y must be set to True
return_y: bool
a boolean indicator that specifies whether the generator yields
prediction values at every iteration; note that at least one
of return_x and return_y must be set to True
Returns:
generator: Iterator
a python iterable that yields samples from a model-based
optimization data set and returns once finished
"""
# generator that only returns single samples
for batch in self.iterate_batches(
self.internal_batch_size,
return_x=return_x, return_y=return_y):
# yield a tuple if both x and y are returned
if return_x and return_y:
for i in range(batch[0].shape[0]):
yield batch[0][i], batch[1][i]
# yield a tuple if only x and y or returned
elif return_x or return_y:
for i in range(batch.shape[0]):
yield batch[i]
def __iter__(self):
"""Returns an object that supports iterations, which yields tuples of
design values 'x' and prediction values 'y' from a model-based
optimization data set for training a model
Returns:
generator: Iterator
a python iterable that yields samples from a model-based
optimization data set and returns once finished
"""
# generator that returns batches of designs and predictions
for x_batch, y_batch in \
self.iterate_batches(self.internal_batch_size):
yield x_batch, y_batch
def update_x_statistics(self):
"""A helpful function that calculates the mean and standard deviation
of the designs and predictions in a model-based optimization dataset
either iteratively or all at once using numpy
"""
# check that statistics are not frozen for this dataset
if self.freeze_statistics:
raise ValueError("cannot update dataset when it is frozen")
# make sure the statistics are calculated from original samples
original_is_normalized_x = self.is_normalized_x
self.is_normalized_x = False
# iterate through the entire dataset a first time
samples = x_mean = 0
for x_batch in self.iterate_batches(
self.internal_batch_size, return_y=False):
# calculate how many samples are actually in the current batch
batch_size = np.array(x_batch.shape[0], dtype=np.float32)
# update the running mean using dynamic programming
x_mean = x_mean * (samples / (samples + batch_size)) + \
np.sum(x_batch,
axis=0, keepdims=True) / (samples + batch_size)
# update the number of samples used in the calculation
samples += batch_size
# iterate through the entire dataset a second time
samples = x_variance = 0
for x_batch in self.iterate_batches(
self.internal_batch_size, return_y=False):
# calculate how many samples are actually in the current batch
batch_size = np.array(x_batch.shape[0], dtype=np.float32)
# update the running variance using dynamic programming
x_variance = x_variance * (samples / (samples + batch_size)) + \
np.sum(np.square(x_batch - x_mean),
axis=0, keepdims=True) / (samples + batch_size)
# update the number of samples used in the calculation
samples += batch_size
# expose the calculated mean and standard deviation
self.x_mean = x_mean
self.x_standard_dev = np.sqrt(x_variance)
# remove zero standard deviations to prevent singularities
self.x_standard_dev = np.where(
self.x_standard_dev == 0.0, 1.0, self.x_standard_dev)
# reset the normalized state to what it originally was
self.is_normalized_x = original_is_normalized_x
def update_y_statistics(self):
"""A helpful function that calculates the mean and standard deviation
of the designs and predictions in a model-based optimization dataset
either iteratively or all at once using numpy
"""
# check that statistics are not frozen for this dataset
if self.freeze_statistics:
raise ValueError("cannot update dataset when it is frozen")
# make sure the statistics are calculated from original samples
original_is_normalized_y = self.is_normalized_y
self.is_normalized_y = False
# iterate through the entire dataset a first time
samples = y_mean = 0
for y_batch in self.iterate_batches(
self.internal_batch_size, return_x=False):
# calculate how many samples are actually in the current batch
batch_size = np.array(y_batch.shape[0], dtype=np.float32)
# update the running mean using dynamic programming
y_mean = y_mean * (samples / (samples + batch_size)) + \
np.sum(y_batch,
axis=0, keepdims=True) / (samples + batch_size)
# update the number of samples used in the calculation
samples += batch_size
# iterate through the entire dataset a second time
samples = y_variance = 0
for y_batch in self.iterate_batches(
self.internal_batch_size, return_x=False):
# calculate how many samples are actually in the current batch
batch_size = np.array(y_batch.shape[0], dtype=np.float32)
# update the running variance using dynamic programming
y_variance = y_variance * (samples / (samples + batch_size)) + \
np.sum(np.square(y_batch - y_mean),
axis=0, keepdims=True) / (samples + batch_size)
# update the number of samples used in the calculation
samples += batch_size
# expose the calculated mean and standard deviation
self.y_mean = y_mean
self.y_standard_dev = np.sqrt(y_variance)
# remove zero standard deviations to prevent singularities
self.y_standard_dev = np.where(
self.y_standard_dev == 0.0, 1.0, self.y_standard_dev)
# reset the normalized state to what it originally was
self.is_normalized_y = original_is_normalized_y
def subsample(self, max_samples=None, distribution=None,
max_percentile=100.0, min_percentile=0.0):
"""a function that exposes a subsampled version of a much larger
model-based optimization dataset containing design values 'x'
whose prediction values 'y' are skewed
Arguments:
max_samples: int
the maximum number of samples to include in the visible dataset;
if more than this number of samples would be present, samples
are randomly removed from the visible dataset
distribution: Callable[np.ndarray, np.ndarray]
a function that accepts an array of the ranks of designs as
input and returns the probability to sample each according to
a distribution---for example, a geometric distribution
max_percentile: float
the percentile between 0 and 100 of prediction values 'y' above
which are hidden from access by members outside the class
min_percentile: float
the percentile between 0 and 100 of prediction values 'y' below
which are hidden from access by members outside the class
"""
# check that statistics are not frozen for this dataset
if self.freeze_statistics:
raise ValueError("cannot update dataset when it is frozen")
# return an error is the arguments are invalid
if max_samples is not None and max_samples <= 0:
raise ValueError("dataset cannot be made empty")
# return an error is the arguments are invalid
if min_percentile > max_percentile:
raise ValueError("invalid arguments provided")
# convert the original prediction generator to a numpy tensor
self._disable_subsample = True
self._disable_transform = True
y = np.concatenate(list(self.iterate_batches(
self.internal_batch_size, return_x=False)), axis=0)
self._disable_subsample = False
self._disable_transform = False
# calculate the min threshold for predictions in the dataset
min_output = np.percentile(y[:, 0], min_percentile) \
if min_percentile > 0.0 else np.NINF
self.dataset_min_percentile = min_percentile
self.dataset_min_output = min_output
# calculate the max threshold for predictions in the dataset
max_output = np.percentile(y[:, 0], max_percentile) \
if max_percentile < 100.0 else np.PINF
self.dataset_max_percentile = max_percentile
self.dataset_max_output = max_output
# calculate indices of samples that are within range
indices = np.arange(y.shape[0])[np.where(
np.logical_and(y <= max_output, y >= min_output))[0]]
max_samples = indices.size \
if max_samples is None else min(indices.size, max_samples)
# replace default distributions with their implementations
if distribution in {None, "uniform"}:
distribution = default_uniform_distribution
elif distribution == "linear":
distribution = default_linear_distribution
elif distribution == "quadratic":
distribution = default_quadratic_distribution
elif distribution == "exponential":
distribution = default_exponential_distribution
elif distribution == "circular":
distribution = default_circular_distribution
# calculate the probability to subsample individual designs
probs = distribution(y[indices, 0].argsort().argsort())
probs = np.asarray(probs, dtype=np.float32)
probs = np.broadcast_to(probs, (indices.size,))
indices = indices[np.random.choice(
indices.size, max_samples, replace=False, p=probs / probs.sum())]
# binary mask that determines which samples are visible
visible_mask = np.full([y.shape[0]], False, dtype=np.bool)
visible_mask[indices] = True
self.dataset_visible_mask = visible_mask
self.dataset_size = indices.size
self.dataset_distribution = distribution
# update normalization statistics for design values
if self.is_normalized_x:
self.update_x_statistics()
# update normalization statistics for prediction values
if self.is_normalized_y:
self.update_y_statistics()
@property
def x(self) -> np.ndarray:
"""A helpful function for loading the design values from disk in case
the dataset is set to load all at once rather than lazily and is
overridden with a numpy array once loaded
Returns:
x: np.ndarray
processed design values 'x' for a model-based optimization problem
represented as a numpy array of arbitrary type
"""
return np.concatenate([x for x in self.iterate_batches(
self.internal_batch_size, return_y=False)], axis=0)
@property
def y(self) -> np.ndarray:
"""A helpful function for loading prediction values from disk in case
the dataset is set to load all at once rather than lazily and is
overridden with a numpy array once loaded
Returns:
y: np.ndarray
processed prediction values 'y' for a model-based optimization
problem represented as a numpy array of arbitrary type
"""
return np.concatenate([y for y in self.iterate_batches(
self.internal_batch_size, return_x=False)], axis=0)
def relabel(self, relabel_function,
to_disk=None, disk_target=None, is_absolute=None):
"""a function that accepts a function that maps from a dataset of
design values 'x' and prediction values y to a new set of
prediction values 'y' and relabels a model-based optimization dataset
Arguments:
relabel_function: Callable[[np.ndarray, np.ndarray], np.ndarray]
a function capable of mapping from a numpy array of design
values 'x' and prediction values 'y' to new predictions 'y'
using batching to prevent memory overflow
to_disk: boolean
a boolean that indicates whether to store the data set
in memory as numpy arrays or to the disk
disk_target: str
a string that determines the name and sub folder of the saved
data set if to_disk is set to be true
is_absolute: boolean
a boolean that indicates whether the disk_target path is taken
relative to the benchmark data folder
"""
# check that statistics are not frozen for this dataset
if self.freeze_statistics:
raise ValueError("cannot update dataset when it is frozen")
# check that all arguments are set when saving to disk
if to_disk is not None and to_disk and \
(disk_target is None or is_absolute is None):
raise ValueError("must specify location when saving to disk")
# prevent the data set for being sub-sampled or normalized
self._disable_subsample = True
examples = self.y.shape[0]
examples_processed = 0
# track a list of incomplete batches between shards
y_shard = []
y_shard_size = 0
# calculate the appropriate size of the first shard
shard_id = 0
shard = self.get_shard_y(shard_id)
shard_size = shard.shape[0]
# relabel the prediction values of the internal data set
for x_batch, y_batch in \
self.iterate_batches(self.internal_batch_size):
# calculate the new prediction values to be stored as shards
y_batch = relabel_function(x_batch, y_batch)
read_position = 0
# remove potential normalization on the predictions
if self.is_normalized_y:
y_batch = self.denormalize_y(y_batch)
# loop once per batch contained in the shard
while read_position < y_batch.shape[0]:
# calculate the intended number of samples to serialize
target_size = shard_size - y_shard_size
# slice out a component of the current shard
y_slice = y_batch[read_position:read_position + target_size]
samples_read = y_slice.shape[0]
# increment the read position in the prediction tensor
# and update the number of shards and examples processed
read_position += target_size
examples_processed += samples_read
# update the current shard to be serialized
y_shard.append(y_slice)
y_shard_size += samples_read
# yield the current batch when enough samples are loaded
if y_shard_size >= shard_size \
or examples_processed >= examples:
# serialize the value of the new shard data
self.set_shard_y(shard_id, np.concatenate(y_shard, axis=0),
to_disk=to_disk, disk_target=disk_target,
is_absolute=is_absolute)
# reset the buffer for incomplete batches
y_shard = []
y_shard_size = 0
# calculate the appropriate size for the next shard
if not examples_processed >= examples:
shard_id += 1
shard = self.get_shard_y(shard_id)
shard_size = shard.shape[0]
# re-sample the data set and recalculate statistics
self._disable_subsample = False
self.subsample(max_samples=self.dataset_size,
distribution=self.dataset_distribution,
max_percentile=self.dataset_max_percentile,
min_percentile=self.dataset_min_percentile)
def rebuild_dataset(self, x_shards, y_shards, visible_mask):
"""Initialize a model-based optimization dataset and prepare
that dataset by loading that dataset from disk and modifying
its distribution of designs and predictions
Arguments:
x_shards: Union[ np.ndarray, RemoteResource,
Iterable[np.ndarray], Iterable[RemoteResource]]
a single shard or a list of shards representing the design values
in a model-based optimization dataset; shards are loaded lazily
if RemoteResource otherwise loaded in memory immediately
y_shards: Union[ np.ndarray, RemoteResource,
Iterable[np.ndarray], Iterable[RemoteResource]]
a single shard or a list of shards representing prediction values
in a model-based optimization dataset; shards are loaded lazily
if RemoteResource otherwise loaded in memory immediately
visible_mask: np.ndarray
a numpy array of shape [dataset_size] containing boolean entries
specifying which samples are visible in the provided Iterable
Returns:
dataset: DatasetBuilder
an instance of a data set builder subclass containing a copy
of all statistics associated with this dataset
"""
# new dataset that shares statistics with this one
kwargs = dict(internal_batch_size=self.internal_batch_size)
kwargs.update(self.subclass_kwargs)
dataset = self.subclass(x_shards, y_shards, **kwargs)
# carry over the names of the parent
dataset.name = self.name
dataset.x_name = self.x_name
dataset.y_name = self.y_name
# carry over the normalize statistics of the parent
dataset.is_normalized_x = self.is_normalized_x
dataset.x_mean = self.x_mean
dataset.x_standard_dev = self.x_standard_dev
# carry over the normalize statistics of the parent
dataset.is_normalized_y = self.is_normalized_y
dataset.y_mean = self.y_mean
dataset.y_standard_dev = self.y_standard_dev
# carry over the sub sampling statistics of the parent
dataset.dataset_min_percentile = self.dataset_min_percentile
dataset.dataset_max_percentile = self.dataset_max_percentile
dataset.dataset_min_output = self.dataset_min_output
dataset.dataset_max_output = self.dataset_max_output
dataset.dataset_distribution = self.dataset_distribution
# calculate indices of samples that are visible
dataset.dataset_visible_mask = visible_mask
dataset.dataset_size = dataset.y.shape[0]
return dataset
def clone(self, subset=None, shard_size=5000,
to_disk=False, disk_target="dataset", is_absolute=True):
"""Generate a cloned copy of a model-based optimization dataset
using the provided name and shard generation settings; useful
when relabelling a dataset buffer from the disk
Arguments:
subset: set
a python set of integers representing the ids of the samples
to be included in the generated shards
shard_size: int
an integer representing the number of samples from a model-based
optimization data set to save per shard
to_disk: boolean
a boolean that indicates whether to store the split data set
in memory as numpy arrays or to the disk
disk_target: str
a string that determines the name and sub folder of the saved
data set if to_disk is set to be true
is_absolute: boolean
a boolean that indicates whether the disk_target path is taken
relative to the benchmark data folder
Returns:
dataset: DatasetBuilder
an instance of a data set builder subclass containing a copy
of all data originally associated with this dataset
"""
# check if the subset is empty
if subset is not None and len(subset) == 0:
raise ValueError("cannot pass an empty subset")
# disable transformations and check the size of the data set
self._disable_subsample = True
self._disable_transform = True
visible_mask = []
# create lists to store shards and numpy arrays
partial_shard_x, partial_shard_y = [], []
x_shards, y_shards = [], []
# iterate once through the entire data set
for sample_id, (x, y) in enumerate(self.iterate_samples()):
# add the sampled x and y to the dataset
if subset is None or sample_id in subset:
partial_shard_x.append(x)
partial_shard_y.append(y)
# record whether this sample was already visible
visible_mask.append(self.dataset_visible_mask[sample_id])
# if the validation shard is large enough then write it
if (sample_id + 1 == self.dataset_visible_mask.size and
len(partial_shard_x) > 0) or \
len(partial_shard_x) >= shard_size:
# stack the sampled x and y values into a shard
shard_x = np.stack(partial_shard_x, axis=0)
shard_y = np.stack(partial_shard_y, axis=0)
if to_disk:
# write the design values shard first to a new file
x_resource = DiskResource(
f"{disk_target}-x-{len(x_shards)}.npy",
is_absolute=is_absolute,
download_method=None, download_target=None)
|
np.save(x_resource.disk_target, shard_x)
|
numpy.save
|
import numpy as np
import matplotlib.pyplot as plt
from math import sqrt
from scipy import stats
from scipy.stats import kurtosis, skew
def pdf(costh, P_mu):
# define our probability density function
return 0.5 * (1.0 - 1.0 / 3.0 * P_mu * costh)
def inv_cdf_pos(r, P_mu):
# inverse of the cumulative density function
# since we have positive and negative solutions for x(r), we have to consider both
return 3./P_mu*(1.+np.sqrt(1.+2./3.*P_mu*(P_mu/6.+1.-2.*r)))
def inv_cdf_neg(r, P_mu):
return 3./P_mu*(1.-np.sqrt(1.+2./3.*P_mu*(P_mu/6.+1.-2.*r)))
def inv_trans(N_measurements, P_mu):
r =
|
np.random.uniform(0.0, 1.0, size=N_measurements)
|
numpy.random.uniform
|
import numpy as np
import unittest
from numpy.testing import *
from src.tabular.policies import TabularQPolicy, Qdict2array
from src.tabular.TD import QLearning
from src.envs.dummy_envs import *
class TestQLearning(unittest.TestCase):
def setUp(self):
self.env = ChainEnv(6)
self.env_multi = GridEnv(n=3, multi_discrete_action=True,
goal_reward=1)
self.model = QLearning(TabularQPolicy, self.env)
self.model_multi = QLearning(TabularQPolicy, self.env_multi)
def test_Q_learning(self):
self.model.learn(2000)
targetQ = np.array([[.95, .96],
[.95, .97],
[.96, .98],
[.97, .99],
[.98, 1.0],
[0.0, 0.0]])
assert_array_almost_equal(targetQ, Qdict2array(self.model.get_parameters()['Q']))
def test_predict(self):
# Check that predicts does not use softmax but greedy policy by default
self.model.learn(2000)
targetQ = np.array([[.95, .96],
[.95, .97],
[.96, .98],
[.97, .99],
[.98, 1.0],
[0.0, 0.0]])
for i in range(100):
assert_equal(self.model.predict(0)[0], 1)
def test_Q_learning_multi(self):
"""
Test Q learning with multi-discrete action and state space.
"""
self.model_multi.learn(8000)
targetQ = np.array([[.97, .97, .96, .96],
[.98, .98, .96, .97],
[.98, .99, .97, .98],
[.98, .98, .97, .96],
[.99, .99, .97, .97],
[.99, 1.0, .98, .98],
[.99, .98, .98, .97],
[1.0, .99, .98, .98],
[0.0, 0.0, 0.0, 0.0]])
assert_array_almost_equal(targetQ, Qdict2array(self.model_multi.get_parameters()['Q']), decimal=4)
class TestTabularQPolicy(unittest.TestCase):
def setUp(self):
self.env = GridEnv(n=3, multi_discrete_action=True,
goal_reward=1)
self.policy = TabularQPolicy(self.env.observation_space, self.env.action_space)
def test(self):
# Update the Q values
s = [(0, 0), (0, 0)]
a = [(0, 1), (0, 0)]
vals = [1, 2]
self.policy.update_Q(s, a, vals)
# Check the probability distribution and the sampling are right
denominator = 2 * np.exp(0) + np.exp(1) + np.exp(2)
target = [
|
np.exp(2)
|
numpy.exp
|
from __future__ import absolute_import
from __future__ import division
import base64
import sys
import numpy as np
from scipy.ndimage import binary_dilation, binary_erosion, convolve
import unittest
import pytest
import centrosome.filter as F
from six.moves import range
"""Perform line-integration per-column of the image"""
VERTICAL = "vertical"
"""Perform line-integration per-row of the image"""
HORIZONTAL = "horizontal"
"""Perform line-integration along diagonals from top left to bottom right"""
DIAGONAL = "diagonal"
"""Perform line-integration along diagonals from top right to bottom left"""
ANTI_DIAGONAL = "anti-diagonal"
class TestStretch(unittest.TestCase):
def test_00_00_empty(self):
result = F.stretch(np.zeros((0,)))
self.assertEqual(len(result), 0)
def test_00_01_empty_plus_mask(self):
result = F.stretch(np.zeros((0,)), np.zeros((0,), bool))
self.assertEqual(len(result), 0)
def test_00_02_zeros(self):
result = F.stretch(np.zeros((10, 10)))
self.assertTrue(np.all(result == 0))
def test_00_03_zeros_plus_mask(self):
result = F.stretch(np.zeros((10, 10)), np.ones((10, 10), bool))
self.assertTrue(np.all(result == 0))
def test_00_04_half(self):
result = F.stretch(np.ones((10, 10)) * 0.5)
self.assertTrue(np.all(result == 0.5))
def test_00_05_half_plus_mask(self):
result = F.stretch(np.ones((10, 10)) * 0.5, np.ones((10, 10), bool))
self.assertTrue(np.all(result == 0.5))
def test_01_01_rescale(self):
np.random.seed(0)
image = np.random.uniform(-2, 2, size=(10, 10))
image[0, 0] = -2
image[9, 9] = 2
expected = (image + 2.0) / 4.0
result = F.stretch(image)
self.assertTrue(np.all(result == expected))
def test_01_02_rescale_plus_mask(self):
np.random.seed(0)
image = np.random.uniform(-2, 2, size=(10, 10))
mask = np.zeros((10, 10), bool)
mask[1:9, 1:9] = True
image[0, 0] = -4
image[9, 9] = 4
image[1, 1] = -2
image[8, 8] = 2
expected = (image[1:9, 1:9] + 2.0) / 4.0
result = F.stretch(image, mask)
self.assertTrue(np.all(result[1:9, 1:9] == expected))
class TestMedianFilter(unittest.TestCase):
def test_00_00_zeros(self):
"""The median filter on an array of all zeros should be zero"""
result = F.median_filter(np.zeros((10, 10)), np.ones((10, 10), bool), 3)
self.assertTrue(np.all(result == 0))
def test_00_01_all_masked(self):
"""Test a completely masked image
Regression test of IMG-1029"""
result = F.median_filter(np.zeros((10, 10)), np.zeros((10, 10), bool), 3)
self.assertTrue(np.all(result == 0))
def test_00_02_all_but_one_masked(self):
mask = np.zeros((10, 10), bool)
mask[5, 5] = True
result = F.median_filter(np.zeros((10, 10)), mask, 3)
def test_01_01_mask(self):
"""The median filter, masking a single value"""
img = np.zeros((10, 10))
img[5, 5] = 1
mask = np.ones((10, 10), bool)
mask[5, 5] = False
result = F.median_filter(img, mask, 3)
self.assertTrue(np.all(result[mask] == 0))
def test_02_01_median(self):
"""A median filter larger than the image = median of image"""
np.random.seed(0)
img = np.random.uniform(size=(9, 9))
result = F.median_filter(img, np.ones((9, 9), bool), 20)
self.assertEqual(result[0, 0], np.median(img))
self.assertTrue(np.all(result == np.median(img)))
def test_02_02_median_bigger(self):
"""Use an image of more than 255 values to test approximation"""
np.random.seed(0)
img = np.random.uniform(size=(20, 20))
result = F.median_filter(img, np.ones((20, 20), bool), 40)
sorted = np.ravel(img)
sorted.sort()
min_acceptable = sorted[198]
max_acceptable = sorted[202]
self.assertTrue(np.all(result >= min_acceptable))
self.assertTrue(np.all(result <= max_acceptable))
def test_03_01_shape(self):
"""Make sure the median filter is the expected octagonal shape"""
radius = 5
a_2 = int(radius / 2.414213)
i, j = np.mgrid[-10:11, -10:11]
octagon = np.ones((21, 21), bool)
#
# constrain the octagon mask to be the points that are on
# the correct side of the 8 edges
#
octagon[i < -radius] = False
octagon[i > radius] = False
octagon[j < -radius] = False
octagon[j > radius] = False
octagon[i + j < -radius - a_2] = False
octagon[j - i > radius + a_2] = False
octagon[i + j > radius + a_2] = False
octagon[i - j > radius + a_2] = False
np.random.seed(0)
img = np.random.uniform(size=(21, 21))
result = F.median_filter(img, np.ones((21, 21), bool), radius)
sorted = img[octagon]
sorted.sort()
min_acceptable = sorted[len(sorted) // 2 - 1]
max_acceptable = sorted[len(sorted) // 2 + 1]
self.assertTrue(result[10, 10] >= min_acceptable)
self.assertTrue(result[10, 10] <= max_acceptable)
def test_04_01_half_masked(self):
"""Make sure that the median filter can handle large masked areas."""
img = np.ones((20, 20))
mask = np.ones((20, 20), bool)
mask[10:, :] = False
img[~mask] = 2
img[1, 1] = 0 # to prevent short circuit for uniform data.
result = F.median_filter(img, mask, 5)
# in partial coverage areas, the result should be only from the masked pixels
self.assertTrue(np.all(result[:14, :] == 1))
# in zero coverage areas, the result should be the lowest valud in the valid area
self.assertTrue(np.all(result[15:, :] == np.min(img[mask])))
@pytest.mark.skipif(sys.version_info > (3, 0), reason="requires Python 2.7")
class TestBilateralFilter(unittest.TestCase):
def test_00_00_zeros(self):
"""Test the bilateral filter of an array of all zeros"""
result = F.bilateral_filter(
np.zeros((10, 10)), np.ones((10, 10), bool), 5.0, 0.1
)
self.assertTrue(np.all(result == 0))
def test_00_01_all_masked(self):
"""Test the bilateral filter of a completely masked array"""
np.random.seed(0)
image = np.random.uniform(size=(10, 10))
result = F.bilateral_filter(image, np.zeros((10, 10), bool), 5.0, 0.1)
self.assertTrue(np.all(result == image))
class TestLaplacianOfGaussian(unittest.TestCase):
def test_00_00_zeros(self):
result = F.laplacian_of_gaussian(np.zeros((10, 10)), None, 9, 3)
self.assertTrue(np.all(result == 0))
def test_00_01_zeros_mask(self):
result = F.laplacian_of_gaussian(
np.zeros((10, 10)), np.zeros((10, 10), bool), 9, 3
)
self.assertTrue(np.all(result == 0))
def test_01_01_ring(self):
"""The LoG should have its lowest value in the center of the ring"""
i, j = np.mgrid[-20:21, -20:21].astype(float)
# A ring of radius 3, more or less
image = (np.abs(i ** 2 + j ** 2 - 3) < 2).astype(float)
result = F.laplacian_of_gaussian(image, None, 9, 3)
self.assertTrue(
(np.argmin(result) % 41, int(np.argmin(result) / 41)) == (20, 20)
)
class TestCanny(unittest.TestCase):
def test_00_00_zeros(self):
"""Test that the Canny filter finds no points for a blank field"""
result = F.canny(np.zeros((20, 20)),
|
np.ones((20, 20), bool)
|
numpy.ones
|
import os # nowa: autoimport
import shutil
from math import floor, log2
from subprocess import DEVNULL, Popen
from typing import Tuple
import essentia.standard as esst
import fastdtw as fdtw
import librosa
import numpy as np
import pretty_midi
import scipy
from dtw import dtw
from .. import cdist, utils
from .dlnco.DLNCO import dlnco
def check_executables():
"""
Just check that `fluidsynth` is available in the path and raise a
RuntimeError if not
"""
if shutil.which('fluidsynth') is None:
raise RuntimeError(
"Please, install fluidsynth command and make it available in your PATH environment variable"
)
def download_soundfont():
"""
Just download MuseScore 3 soundfont to `./soundfont.sf2`
"""
sf2_path = "soundfont.sf2"
if not os.path.exists(sf2_path):
import urllib.request # noqa: autoimport
url = "https://ftp.osuosl.org/pub/musescore/soundfont/MuseScore_General/MuseScore_General.sf2"
print("downloading...")
urllib.request.urlretrieve(url, sf2_path)
# hack to let fastdtw accept float32
def _my_prep_inputs(x, y, dist):
return x, y
def fdtw_dist(sample1, sample2):
"""
This functions computes the eculidean distance on the first `12` featres and
cosine distance on the remaining. Then it sums the two distances and returns
"""
return cdist.euclidean(sample1[:12], sample2[:12]) + cdist.cosine(
sample1[12:], sample2[12:])
def multiple_audio_alignment(audio1,
sr1,
audio2,
sr2,
hopsize,
n_fft=4096,
merge_dlnco=True,
fastdtw=False):
"""
Aligns two audio files and returns a list of lists containing the map
between the audio frames.
Parameters
----------
audio1 : np.array
Numpy array representing the signal.
sr1 : int
Sampling rate of **audio1**
audio2 : np.array
Numpy array representing the signal.
sr2 : int
Sampling rate of **audio2**
hopsize : int
The hopsize for the FFT. Consider to use something like `n_fft/4`
n_fft : int
The window size. Consider to use something like `4*hopsize`
merge_dlnco : bool
Unknown
Returns
-------
numpy.ndarray
A 2d array, mapping frames from :attr: `audio1` to frames in
:attr: `audio2`. `[[frame in audio 1, frame in audio 2]]`
"""
# chroma and DLNCO features
# output shape is (features, frames)
# transposed -> (frames, features)
audio1_chroma = librosa.feature.chroma_stft(y=audio1,
sr=sr1,
tuning=0,
norm=2,
hop_length=hopsize,
n_fft=n_fft).T
audio1_dlnco = dlnco(audio1, sr1, n_fft, hopsize).T
audio2_chroma = librosa.feature.chroma_stft(y=audio2,
sr=sr2,
tuning=0,
norm=2,
hop_length=hopsize,
n_fft=n_fft).T
audio2_dlnco = dlnco(audio2, sr2, n_fft, hopsize).T
L = min(audio1_dlnco.shape[0], audio2_dlnco.shape[0])
if not fastdtw:
dlnco_mat = scipy.spatial.distance.cdist(audio1_dlnco[:L, :],
audio2_dlnco[:L, :],
'euclidean')
chroma_mat = scipy.spatial.distance.cdist(audio1_chroma[:L, :],
audio2_chroma[:L, :],
'cosine')
# print("Starting DTW")
res = dtw(dlnco_mat + chroma_mat)
wp = np.stack([res.index1, res.index2], axis=1)
return wp
else:
# shape of features is still (frames, features)
features1 = np.concatenate([audio1_chroma, audio1_dlnco], axis=1)
features2 = np.concatenate([audio2_chroma, audio2_dlnco], axis=1)
fdtw._fastdtw.__prep_inputs = _my_prep_inputs
_D, path = fdtw.fastdtw(features1.astype(np.float32),
features2.astype(np.float32),
dist=fdtw_dist,
radius=98)
return
|
np.asarray(path)
|
numpy.asarray
|
import numpy as np
import scipy
import scipy.stats
import scipy.sparse
from sklearn.decomposition import PCA,TruncatedSVD
from sklearn.neighbors import NearestNeighbors
import time
import os
import json
from datetime import datetime
import matplotlib.pyplot as plt
import scanpy as sc
import pandas as pd
########## LOADING DATA
def load_genes(filename, delimiter='\t', column=0, skip_rows=0):
''' Load gene list from a file
Arguments
- filename : str
Name of file containing gene names
- delimiter : str, optional (default: "\t")
Column delimiter
- column : int, optional (default: 0)
Column containing gene names
- skip_rows : int, optional (default: 0)
Number of rows to skip at beginning of file
Returns
- gene_list : list, length n_genes
List of gene names
'''
gene_list = []
with open(filename) as f:
for iL,l in enumerate(f):
if iL >= skip_rows:
gene_list.append(l.strip('\n').split(delimiter)[column])
return gene_list
def make_genes_unique(orig_gene_list):
''' Make gene names unique by adding "__1", "__2", etc. to end of duplicate gene names
Arguments
- orig_gene_list : list, length n_genes
List of gene names possibly containing duplicates
Returns
- gene_list : list, length n_genes
List of unique gene names
'''
gene_list = []
gene_dict = {}
for gene in orig_gene_list:
if gene in gene_dict:
gene_dict[gene] += 1
gene_list.append(gene + '__' + str(gene_dict[gene]))
if gene_dict[gene] == 2:
i = gene_list.index(gene)
gene_list[i] = gene + '__1'
else:
gene_dict[gene] = 1
gene_list.append(gene)
return gene_list
def load_pickle(filename):
'''Load data from pickle file
Attempts to load pickle file using pickle library, falling back on pandas read_pickle if there
are version issues.
Arguments
- filename : str
Pickle file name
Returns
- dat : object
Object loaded from filename
'''
try:
import pickle
dat = pickle.load(open(filename, 'rb'))
except:
import pandas as pd
dat = pd.read_pickle(filename)
return dat
### loading counts
def file_opener(filename):
'''Open file and return a file object, automatically decompressing zip and gzip
Arguments
- filename : str
Name of input file
Returns
- outData : file object
(Decompressed) file data
'''
if filename.endswith('.gz'):
fileData = open(filename, 'rb')
import gzip
outData = gzip.GzipFile(fileobj = fileData, mode = 'rb')
elif filename.endswith('.zip'):
fileData = open(filename, 'rb')
import zipfile
zipData = zipfile.ZipFile(fileData, 'r')
fnClean = filename.strip('/').split('/')[-1][:-4]
outData = zipData.open(fnClean)
else:
outData = open(filename, 'r')
return outData
def load_mtx(file_data):
''' Load mtx file as scipy.sparse.csc_matrix
Arguments
- file_data : str or file object
Name of input file or a file object
Returns
- scipy.sparse.csc_matrix
'''
return scipy.io.mmread(file_data).tocsc()
def load_npz(file_data):
''' Load scipy.sparse npz file as scipy.sparse.csc_matrix
Arguments
- file_data : str or file object
Name of input file or a file object
Returns
- scipy.sparse.csc_matrix
'''
return scipy.sparse.load_npz(file_data).tocsc()
def load_npy(file_data):
''' Load npy file, converting to scipy.sparse.csc_matrix
Arguments
- file_data : str or file object
Name of input file or a file object
Returns
- scipy.sparse.csc_matrix
'''
return scipy.sparse.csc_matrix(np.load(file_data))
def load_text(file_data, delim='\t', start_column=None, start_row=None, print_row_interval=None):
'''Load text file as scipy.sparse.csc_matrix
If start_column is not specificied, attempts to automatically identify the counts matrix
(all numeric columns).
'''
X_data = []
X_row = []
X_col = []
ncol = None
for row_ix, dat in enumerate(file_data):
if print_row_interval is not None:
if (row_ix+1) % print_row_interval == 0:
print('Row {}'.format(row_ix+1))
if type(dat) == bytes:
dat = dat.decode('utf-8')
dat = dat.strip('\n').split(delim)
if start_row is None:
current_col = 0
found_float = False
while not found_float and current_col < len(dat):
try:
tmp = float(dat[current_col])
try:
rowdat = np.array(list(map(float, dat[current_col:])))
ncol = len(rowdat)
col_ix = np.nonzero(rowdat)[0]
found_float = True
start_row = row_ix
start_column = current_col
X_col.extend(col_ix)
X_row.extend([row_ix - start_row] * len(col_ix))
X_data.extend(rowdat[col_ix])
except:
current_col += 1
except:
current_col += 1
elif row_ix >= start_row:
rowdat = np.array(list(map(float, dat[start_column:])))
if ncol is None:
ncol = len(rowdat)
else:
if len(rowdat) != ncol:
return 'ERROR: Rows have different numbers of numeric columns.'
col_ix = np.nonzero(rowdat)[0]
X_col.extend(col_ix)
X_row.extend([row_ix - start_row] * len(col_ix))
X_data.extend(rowdat[col_ix])
if start_row is None:
return 'ERROR: no numeric values found'
nrow = row_ix - start_row + 1
E = scipy.sparse.coo_matrix((X_data, (X_row, X_col)), dtype=float, shape=(nrow, ncol)).tocsc()
return E
def load_text2(file_data, delim='\t', start_column=0, start_row=0, row_indices=None, column_indices=None, print_row_interval=None):
'''Load text file as scipy.sparse.csc_matrix
Can load a user-specified subset of rows and/or columns
'''
X_data = []
X_row = []
X_col = []
ncol = None
nrow = 0
for row_ix, dat in enumerate(file_data):
if print_row_interval is not None:
if (row_ix+1) % print_row_interval == 0:
print('Row {}'.format(row_ix+1))
if type(dat) == bytes:
dat = dat.decode('utf-8')
dat = dat.strip('\n').split(delim)
if row_ix >= start_row:
read_row = True
if row_indices is not None:
if (row_ix - start_row) not in row_indices:
read_row = False
if read_row:
rowdat = dat[start_column:]
if ncol is None:
ncol = len(rowdat)
else:
if len(rowdat) != ncol:
return 'ERROR: Rows have different numbers of numeric columns.'
if column_indices is None:
column_indices = np.arange(ncol)
rowdat = np.array(list(map(float, rowdat)))[column_indices]
col_ix = np.nonzero(rowdat)[0]
X_col.extend(col_ix)
X_row.extend([nrow] * len(col_ix))
X_data.extend(rowdat[col_ix])
nrow += 1
ncol = len(column_indices)
print(nrow,ncol)
E = scipy.sparse.coo_matrix((X_data, (X_row, X_col)), dtype=float, shape=(nrow, ncol)).tocsc()
return E
def load_annotated_text(file_data, delim='\t', read_row_labels=False, read_column_labels=False, transpose=False, chunk_size=2000):
'''Load text file as scipy.sparse.csc_matrix, returning column and/or row labels if desired.
Loads rows in chunks to ease memory demands.
'''
X_data = []
X_row = []
X_col = []
ncol = None
nrow = 0
row_labels = []
column_labels = []
E_chunks = []
for row_ix, dat in enumerate(file_data):
if type(dat) == bytes:
dat = dat.decode('utf-8')
dat = dat.strip('\n').split(delim)
if read_column_labels and row_ix == 0:
if read_column_labels:
column_labels = dat[1:]
else:
column_labels = dat
else:
if read_row_labels:
row_labels.append(dat[0])
rowdat = dat[1:]
else:
rowdat = dat[0:]
if ncol is None:
ncol = len(rowdat)
else:
if len(rowdat) != ncol:
return 'ERROR: Line {} has {} columns. Previous line(s) had {}'.format(row_ix, len(rowdat), ncol)
rowdat = np.array(list(map(float, rowdat)))
col_ix = np.nonzero(rowdat)[0]
X_col.extend(col_ix)
X_row.extend([nrow] * len(col_ix))
X_data.extend(rowdat[col_ix])
nrow += 1
if chunk_size is not None:
if nrow % chunk_size == 0:
E_chunks.append(scipy.sparse.coo_matrix((X_data, (X_row, X_col)), dtype=float, shape=(nrow, ncol)))
X_data = []
X_row = []
X_col = []
nrow = 0
if nrow > 0:
E_chunks.append(scipy.sparse.coo_matrix((X_data, (X_row, X_col)), dtype=float, shape=(nrow, ncol)))
E = scipy.sparse.vstack(E_chunks)
if transpose:
E = E.T
return E.tocsc(), np.array(row_labels), np.array(column_labels)
def load_cellranger_h5_v2(filename, genome):
import h5py
import scipy.sparse as ssp
f = h5py.File(filename, 'r')
barcodes = np.array(f.get(genome).get('barcodes')).astype(str)
gene_names = np.array(f.get(genome).get('gene_names')).astype(str)
data = np.array(f.get(genome).get('data'))
indices = np.array(f.get(genome).get('indices'))
indptr = np.array(f.get(genome).get('indptr'))
shape = np.array(f.get(genome).get('shape'))
# Make sparse expression matrix
E = ssp.csc_matrix((data, indices, indptr), shape=shape).T.tocsc()
f.close()
return E, barcodes, gene_names
def load_cellranger_h5_v3(filename):
import h5py
import scipy.sparse as ssp
f = h5py.File(filename, 'r')
barcodes = np.array(f.get('matrix').get('barcodes')).astype(str)
gene_names = np.array(f.get('matrix').get('features').get('name')).astype(str)
data = np.array(f.get('matrix').get('data'))
indices = np.array(f.get('matrix').get('indices'))
indptr = np.array(f.get('matrix').get('indptr'))
shape = np.array(f.get('matrix').get('shape'))
# Make sparse expression matrix
E = ssp.csc_matrix((data, indices, indptr), shape=shape).T.tocsc()
f.close()
return E, barcodes, gene_names
########## USEFUL SPARSE FUNCTIONS
def sparse_var(E, axis=0):
''' calculate variance across the specified axis of a sparse matrix'''
mean_gene = E.mean(axis=axis).A.squeeze()
tmp = E.copy()
tmp.data **= 2
return tmp.mean(axis=axis).A.squeeze() - mean_gene ** 2
def sparse_rowwise_multiply(E, a):
''' multiply each row of sparse matrix by a scalar '''
nrow = E.shape[0]
w = scipy.sparse.lil_matrix((nrow, nrow))
w.setdiag(a)
return w * E
def mean_center(E, column_means=None):
''' mean-center columns of a sparse matrix '''
if column_means is None:
column_means = E.mean(axis=0)
return E - column_means
def normalize_variance(E, column_stdevs=None):
''' variance-normalize columns of a sparse matrix '''
if column_stdevs is None:
column_stdevs = np.sqrt(sparse_var(E, axis=0))
return sparse_rowwise_multiply(E.T, 1 / column_stdevs).T
def sparse_zscore(E, gene_mean=None, gene_stdev=None):
''' z-score normalize each column of a sparse matrix '''
if gene_mean is None:
gene_mean = E.mean(0)
if gene_stdev is None:
gene_stdev = np.sqrt(sparse_var(E))
return sparse_rowwise_multiply((E - gene_mean).T, 1/gene_stdev).T
########## CELL FILTERING
def filter_dict(d, filt):
''' filter 1-D and 2-D entries in a dictionary '''
for k,v in d.items():
if k != 'meta':
if len(v.shape) == 1:
d[k] = v[filt]
else:
d[k] = v[filt,:]
return d
########## GENE FILTERING
def runningquantile(x, y, p, nBins):
''' calculate the quantile of y in bins of x '''
ind = np.argsort(x)
x = x[ind]
y = y[ind]
dx = (x[-1] - x[0]) / nBins
xOut = np.linspace(x[0]+dx/2, x[-1]-dx/2, nBins)
yOut = np.zeros(xOut.shape)
for i in range(len(xOut)):
ind = np.nonzero((x >= xOut[i]-dx/2) & (x < xOut[i]+dx/2))[0]
if len(ind) > 0:
yOut[i] = np.percentile(y[ind], p)
else:
if i > 0:
yOut[i] = yOut[i-1]
else:
yOut[i] = np.nan
return xOut, yOut
def get_vscores(E, min_mean=0, nBins=50, fit_percentile=0.1, error_wt=1):
'''
Calculate v-score (above-Poisson noise statistic) for genes in the input sparse counts matrix
Return v-scores and other stats
'''
ncell = E.shape[0]
mu_gene = E.mean(axis=0).A.squeeze()
gene_ix = np.nonzero(mu_gene > min_mean)[0]
mu_gene = mu_gene[gene_ix]
tmp = E[:,gene_ix]
tmp.data **= 2
var_gene = tmp.mean(axis=0).A.squeeze() - mu_gene ** 2
del tmp
FF_gene = var_gene / mu_gene
data_x = np.log(mu_gene)
data_y = np.log(FF_gene / mu_gene)
x, y = runningquantile(data_x, data_y, fit_percentile, nBins)
x = x[~np.isnan(y)]
y = y[~np.isnan(y)]
gLog = lambda input: np.log(input[1] * np.exp(-input[0]) + input[2])
h,b = np.histogram(np.log(FF_gene[mu_gene>0]), bins=200)
b = b[:-1] + np.diff(b)/2
max_ix = np.argmax(h)
c = np.max((np.exp(b[max_ix]), 1))
errFun = lambda b2: np.sum(abs(gLog([x,c,b2])-y) ** error_wt)
b0 = 0.1
b = scipy.optimize.fmin(func = errFun, x0=[b0], disp=False)
a = c / (1+b) - 1
v_scores = FF_gene / ((1+a)*(1+b) + b * mu_gene);
CV_eff = np.sqrt((1+a)*(1+b) - 1);
CV_input = np.sqrt(b);
return v_scores, CV_eff, CV_input, gene_ix, mu_gene, FF_gene, a, b
def filter_genes(E, base_ix = [], min_vscore_pctl = 85, min_counts = 3, min_cells = 3, show_vscore_plot = False, sample_name = ''):
'''
Filter genes by expression level and variability
Return list of filtered gene indices
'''
if len(base_ix) == 0:
base_ix =
|
np.arange(E.shape[0])
|
numpy.arange
|
import logging
mpl_logger = logging.getLogger('matplotlib')
mpl_logger.setLevel(logging.WARNING)
import unittest
import pygsti
import numpy as np
from scipy import polyfit
from ..testutils import compare_files, regenerate_references
from .basecase import AlgorithmsBase
class TestCoreMethods(AlgorithmsBase):
def test_LGST(self):
ds = self.ds
print("GG0 = ",self.model.default_gauge_group)
mdl_lgst = pygsti.run_lgst(ds, self.fiducials, self.fiducials, self.model, svd_truncate_to=4, verbosity=0)
mdl_lgst_verb = self.runSilent(pygsti.run_lgst, ds, self.fiducials, self.fiducials, self.model, svd_truncate_to=4, verbosity=10)
self.assertAlmostEqual(mdl_lgst.frobeniusdist(mdl_lgst_verb),0)
print("GG = ",mdl_lgst.default_gauge_group)
mdl_lgst_go = pygsti.gaugeopt_to_target(mdl_lgst, self.model, {'spam':1.0, 'gates': 1.0}, check_jac=True)
mdl_clgst = pygsti.contract(mdl_lgst_go, "CPTP")
# RUN BELOW LINES TO SEED SAVED GATESET FILES
if regenerate_references():
pygsti.io.write_model(mdl_lgst, compare_files + "/lgst.model", "Saved LGST Model before gauge optimization")
pygsti.io.write_model(mdl_lgst_go, compare_files + "/lgst_go.model", "Saved LGST Model after gauge optimization")
pygsti.io.write_model(mdl_clgst, compare_files + "/clgst.model", "Saved LGST Model after G.O. and CPTP contraction")
mdl_lgst_compare = pygsti.io.load_model(compare_files + "/lgst.model")
mdl_lgst_go_compare = pygsti.io.load_model(compare_files + "/lgst_go.model")
mdl_clgst_compare = pygsti.io.load_model(compare_files + "/clgst.model")
self.assertAlmostEqual( mdl_lgst.frobeniusdist(mdl_lgst_compare), 0, places=5)
self.assertAlmostEqual( mdl_lgst_go.frobeniusdist(mdl_lgst_go_compare), 0, places=5)
self.assertAlmostEqual( mdl_clgst.frobeniusdist(mdl_clgst_compare), 0, places=5)
def test_LGST_no_sample_error(self):
#change rep-count type so dataset can hold fractional counts for sampleError = 'none'
oldType = pygsti.data.dataset.Repcount_type
pygsti.data.dataset.Repcount_type = np.float64
ds = pygsti.data.simulate_data(self.datagen_gateset, self.lgstStrings,
num_samples=10000, sample_error='none')
pygsti.data.dataset.Repcount_type = oldType
mdl_lgst = pygsti.run_lgst(ds, self.fiducials, self.fiducials, self.model, svd_truncate_to=4, verbosity=0)
print("DATAGEN:")
print(self.datagen_gateset)
print("\nLGST RAW:")
print(mdl_lgst)
mdl_lgst = pygsti.gaugeopt_to_target(mdl_lgst, self.datagen_gateset, {'spam':1.0, 'gates': 1.0}, check_jac=False)
print("\nAfter gauge opt:")
print(mdl_lgst)
print(mdl_lgst.strdiff(self.datagen_gateset))
self.assertAlmostEqual( mdl_lgst.frobeniusdist(self.datagen_gateset), 0, places=4)
def test_LGST_1overSqrtN_dependence(self):
my_datagen_gateset = self.model.depolarize(op_noise=0.05, spam_noise=0)
# !!don't depolarize spam or 1/sqrt(N) dependence saturates!!
nSamplesList = np.array([ 16, 128, 1024, 8192 ])
diffs = []
for nSamples in nSamplesList:
ds = pygsti.data.simulate_data(my_datagen_gateset, self.lgstStrings, nSamples,
sample_error='binomial', seed=100)
mdl_lgst = pygsti.run_lgst(ds, self.fiducials, self.fiducials, self.model, svd_truncate_to=4, verbosity=0)
mdl_lgst_go = pygsti.gaugeopt_to_target(mdl_lgst, my_datagen_gateset, {'spam':1.0, 'gate': 1.0}, check_jac=True)
diffs.append( my_datagen_gateset.frobeniusdist(mdl_lgst_go) )
diffs = np.array(diffs, 'd')
a, b = polyfit(
|
np.log10(nSamplesList)
|
numpy.log10
|
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
import os
import argparse
import numpy as np
import random
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from torch.autograd import Variable, grad
from tensorboardX import SummaryWriter
from QualityMetrics import Indicators
from MakeHist import MakeHist
from matplotlib import pyplot as plt
import importlib
##
## Command line parameters ##
##
parser = argparse.ArgumentParser(description='Train the selected model')
parser.add_argument('--run',type=int,help='Number of the run')
parser.add_argument('--model_ver',type=str,help='Version of the model. Must be the name of the module containing the desired model.')
parser.add_argument('--eta',type=float,default=0.12,help='Weight of the sum between the losses of the two discriminators (Only relevant for GANConv1Dv1WIa.py model).')
parser.add_argument('--alpha',type=float,default=300,help='Alpha parameter for pre-processing.')
parser.add_argument('--grad_norm_reg',type=bool,default=False,help='If gradient-norm regularization is applied.')
parser.add_argument('--gamma',type=float,default=0.01,help='Rate for gradient-norm regularization.')
parser.add_argument('--n_epochs',type=int,default=140,help='Number of epochs for training.')
parser.add_argument('--batch_size',type=int,default=20,help='Batch size.')
parser.add_argument('--lr_g',type=float,default=0.0001,help='Learning rate for the generator.')
parser.add_argument('--lr_d',type=float,default=0.00001,help='Learning rate for the discriminator.')
parser.add_argument('--n_critic',type=int,default=3,help='Number of discriminator steps per generator step.')
parser.add_argument('--n_parts',type=int,default=5,help='Split the Universe in n_parts')
opt = parser.parse_args()
model_ver = opt.model_ver
run = opt.run
# Directory for saving TensorBoard files, numpy arrays contaning the results of the attacks and the weights of trained models.
saving_dir = './Logs/'+model_ver
# Instantiate Tensorboar SummaryWriter
writer = SummaryWriter(saving_dir+'/Tensorboard/exp'+str(run))
##
## Creating Random list of clients ##
##
# First 'step' elements of the list will be selected as training data
n_parts = opt.n_parts
data_dir = "./DataSets"
dir_list = os.listdir(data_dir)
random.shuffle(dir_list)
step = int(len(dir_list)/n_parts)
# Saving the training set suffle
list_directory = saving_dir+'/npdata/dirlist'
if not os.path.exists(list_directory):
os.makedirs(list_directory)
np.save(list_directory+'/List'+str(run)+'.npy',dir_list)
# Arranging clients into subsets. The first subset will be the training set.
subset_list = []
universe = np.empty(shape=[0,336], dtype='float32')
for i in range(0,len(dir_list),step):
np_aux = np.empty(shape=[0,336], dtype='float32')
if ((len(dir_list)-i)>=step):
for j in range(step):
aux = np.load(data_dir+'/'+dir_list[i+j])
np_aux = np.append(np_aux,aux,axis=0)
universe = np.append(universe,aux,axis=0)
subset_list.append(np_aux)
# Saving alpha and maximum for transformation and inverse
# Maximum taken over the universe
alpha = opt.alpha
train_aux = np.arcsinh(universe*alpha)/alpha
save_max = np.reshape(train_aux,-1).max()
##
## Set-up ##
##
# Checking for cuda
if torch.cuda.is_available():
cuda = True
else:
cuda = False
# Loss Function
loss = nn.BCEWithLogitsLoss() #Note that this lost function integrates the softmax activation function for numerical stability.
# Instantiating generator and discriminator models
module_arch = importlib.__import__(model_ver)
generator = module_arch.GeneratorConv1D()
if model_ver == 'GANConv1Dv1WIa':
discriminator = module_arch.DiscriminatorSignal()
discriminator_I = module_arch.DiscriminatorIndicators()
else:
discriminator = module_arch.DiscriminatorConv1D()
if cuda:
generator.cuda()
discriminator.cuda()
loss.cuda()
if model_ver == 'GANConv1Dv1WIa':
discriminator_I.cuda()
Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
# Defining pre-processing transformation and inverse transformation
# Works with numpy arrays!!!
def transformation(array,alpha,save_max):
array = np.arcsinh(array*alpha)/alpha
array = (array*2.0)/save_max - 1.0
array = array[:,np.newaxis,:]
return array
# Works with pytorch tensors!!!
def inverse_trans(arrtensor,alpha,save_max):
arrtensor = (arrtensor+1.0)*save_max/2.0
return torch.sinh(arrtensor*alpha)/alpha
# Optimizer
optimizer_G = torch.optim.Adam(generator.parameters(),lr=opt.lr_g)
optimizer_D = torch.optim.Adam(discriminator.parameters(),lr=opt.lr_d)
if model_ver =='GANConv1Dv1WIa':
optimizer_D_I = torch.optim.Adam(discriminator_I.parameters(),lr=opt.lr_d)
# Loading training set
training_set = subset_list[0]
class TimeSeriesLCL(Dataset):
def __init__(self, npy_array,alpha,save_max):
self.x_train = npy_array
self.x_train = np.arcsinh(self.x_train*alpha)/alpha
self.x_train = (self.x_train*2.0)/save_max - 1.0
self.x_train = self.x_train[:,np.newaxis,:]
def __len__(self):
return self.x_train.shape[0]
def __getitem__(self, idx):
example = self.x_train[idx,]
return example
x_train = TimeSeriesLCL(training_set,alpha,save_max)
# Some parameters for training
if model_ver == 'GANConv1Dv0':
latent_space_dim = 25
else:
latent_space_dim = 42
eta = opt.eta
gamma = opt.gamma
n_epochs = opt.n_epochs
batch_size = opt.batch_size
steps_generator = opt.n_critic
steps_discriminator = 1
dataloader = DataLoader(x_train,batch_size=batch_size,shuffle=True)
generated_samples = []
real_examples = []
##
## Training ##
##
for epoch in range(n_epochs):
for i, example_batch in enumerate(dataloader):
# Ground truths for the discriminator
valid = Variable(Tensor(example_batch.shape[0], 1).fill_(1.0), requires_grad=False)
fake = Variable(Tensor(example_batch.shape[0], 1).fill_(0.0), requires_grad=False)
# Configuring input
example_batch = example_batch.type(Tensor)
real_examples.append(torch.squeeze(example_batch))
# Generating samples
z = Tensor(np.random.normal(size=[example_batch.shape[0],latent_space_dim]))
generated_sample = generator(z)
generated_samples.append(torch.squeeze(generated_sample.detach()))
if model_ver =='GANConv1Dv1WIa':
# Train generator
if i%steps_generator == 0:
optimizer_G.zero_grad()
g_loss_S = loss(discriminator(generated_sample),valid)
g_loss_I = loss(discriminator_I(generated_sample),valid)
basic_g_loss = (1.0-eta)*g_loss_S + eta*g_loss_I
basic_g_loss.backward()
optimizer_G.step()
# Train Discriminator
if i%steps_discriminator == 0:
optimizer_D.zero_grad()
real_loss = loss(discriminator(example_batch),valid)
fake_loss = loss(discriminator(generated_sample.detach()),fake)
if opt.grad_norm_reg:
basic_d_loss = (real_loss + fake_loss)/2.0
d_grad = grad(basic_d_loss,discriminator.parameters(),create_graph=True)
dn2 = torch.sqrt(sum([grd.norm()**2 for grd in d_grad]))
final_d_loss = basic_d_loss - gamma*dn2
else:
final_d_loss = (real_loss + fake_loss)/2.0
final_d_loss.backward()
optimizer_D.step()
optimizer_D_I.zero_grad()
real_loss_I = loss(discriminator_I(example_batch),valid)
fake_loss_I = loss(discriminator_I(generated_sample.detach()),fake)
d_loss_I = (real_loss_I + fake_loss_I)/2.0
d_loss_I.backward()
optimizer_D_I.step()
else:
# Train generator
if i%steps_generator == 0:
optimizer_G.zero_grad()
basic_g_loss = loss(discriminator(generated_sample),valid)
basic_g_loss.backward()
optimizer_G.step()
# Train Discriminator
if i%steps_discriminator == 0:
optimizer_D.zero_grad()
real_loss = loss(discriminator(example_batch),valid)
fake_loss = loss(discriminator(generated_sample.detach()),fake)
if opt.grad_norm_reg:
basic_d_loss = (real_loss + fake_loss)/2.0
d_grad = grad(basic_d_loss,discriminator.parameters(),create_graph=True)
dn2 = torch.sqrt(sum([grd.norm()**2 for grd in d_grad]))
final_d_loss = basic_d_loss - gamma*dn2
else:
final_d_loss = (real_loss + fake_loss)/2.0
final_d_loss.backward()
optimizer_D.step()
print(
"[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f]"
% (epoch+1, n_epochs, i+1, len(dataloader), final_d_loss.item(), basic_g_loss.item())
)
# Saving the loss for the Generator and Discriminator
writer.add_scalar('Generator loss', basic_g_loss.item(), 1+i+(epoch*len(dataloader)))
writer.add_scalar('Discriminator loss', final_d_loss.item(),1+i+(epoch*len(dataloader)))
# Plotting artificially generated samples, empirical distributions of the indicators and saving plots to tensorboard.
if (((i+1)*batch_size) % 800) == 0:
generated_samples = torch.cat(generated_samples)
generated_samples = inverse_trans(generated_samples,alpha,save_max)
indicators_gen = Indicators(generated_samples)
indicators_gen = Tensor.cpu(indicators_gen)
indicators_gen = indicators_gen.data.numpy()
real_examples = torch.cat(real_examples)
real_examples = inverse_trans(real_examples,alpha,save_max)
indicators_real = Indicators(real_examples)
indicators_real = Tensor.cpu(indicators_real)
indicators_real = indicators_real.data.numpy()
g_sample = generated_samples[0,:]
g_sample = Tensor.cpu(g_sample)
g_sample = g_sample.data.numpy()
g_sample_fig = plt.figure(0)
plt.plot(g_sample)
plt.title('Generated Sample')
plt.ylabel('Energy (KWh)')
plt.xlabel('Time (half hour)')
writer.add_figure('Generated Sample', g_sample_fig,1+i+(epoch*len(dataloader)))
List_Hist_r, List_Hist_f, List_Hist_x, List_EMD, Avg_Ind_Index = MakeHist(indicators_real,indicators_gen)
mean_Hist = plt.figure(0)
plt.plot(List_Hist_x[0],List_Hist_r[0])
plt.plot(List_Hist_x[0],List_Hist_f[0])
plt.legend(['Real','Fake'])
plt.title('Empirical distribution of the mean.')
plt.xlabel('Mean')
writer.add_scalar('EMD of the mean', List_EMD[0],1+i+(epoch*len(dataloader)))
writer.add_figure('Histogram of the mean', mean_Hist,1+i+(epoch*len(dataloader)))
skewness_Hist = plt.figure(1)
plt.plot(List_Hist_x[1],List_Hist_r[1])
plt.plot(List_Hist_x[1],List_Hist_f[1])
plt.legend(['Real','Fake'])
plt.title('Empirical distribution of the skewness.')
plt.xlabel('Skewness')
writer.add_scalar('EMD of the skewness', List_EMD[1],1+i+(epoch*len(dataloader)))
writer.add_figure('Histogram of the skewness', skewness_Hist,1+i+(epoch*len(dataloader)))
CV_Hist = plt.figure(2)
plt.plot(List_Hist_x[2],List_Hist_r[2])
plt.plot(List_Hist_x[2],List_Hist_f[2])
plt.legend(['Real','Fake'])
plt.title('Empirical distribution of the CV.')
plt.xlabel('Coefficient of variation')
writer.add_scalar('EMD of the CV', List_EMD[2],1+i+(epoch*len(dataloader)))
writer.add_figure('Histogram of the CV', CV_Hist,1+i+(epoch*len(dataloader)))
kurtosis_Hist = plt.figure(3)
plt.plot(List_Hist_x[3],List_Hist_r[3])
plt.plot(List_Hist_x[3],List_Hist_f[3])
plt.legend(['Real','Fake'])
plt.title('Empirical distribution of the kurtosis.')
plt.xlabel('Kurtosis')
writer.add_scalar('EMD of the kurtosis', List_EMD[3],1+i+(epoch*len(dataloader)))
writer.add_figure('Histogram of the kurtosis', kurtosis_Hist,1+i+(epoch*len(dataloader)))
maxmean_Hist = plt.figure(4)
plt.plot(List_Hist_x[4],List_Hist_r[4])
plt.plot(List_Hist_x[4],List_Hist_f[4])
plt.legend(['Real','Fake'])
plt.title('Empirical distribution of the max-mean ratio.')
plt.xlabel('Max-mean ratio')
writer.add_scalar('EMD of the max-mean ratio', List_EMD[4],1+i+(epoch*len(dataloader)))
writer.add_figure('Histogram of the max-mean ratio', maxmean_Hist,1+i+(epoch*len(dataloader)))
writer.add_scalar('Average Indicator Index', Avg_Ind_Index,1+i+(epoch*len(dataloader)))
generated_samples = []
real_examples = []
# Saving the model
mod_directory = saving_dir+'/Trained'
if not os.path.exists(mod_directory):
os.makedirs(mod_directory)
torch.save(generator.state_dict(), mod_directory+'/GEN_run'+str(run)+'.pth')
torch.save(discriminator.state_dict(), mod_directory+'/DIS_run'+str(run)+'.pth')
print('Model Saved')
##
## Gradient Norm Attack ##
##
batch_size = 1 # batch size for the attack
generator.eval()
discriminator.eval()
# The attack itself
norms_per_subset = []
scores_per_subset = []
for i in range(n_parts):
norm = []
scores = []
for j in range(step):
examples = np.load(data_dir+'/'+dir_list[(i*step)+j])
examples = transformation(examples,alpha,save_max)
client_norm = np.empty([0])
client_score = np.empty([0])
for k in range(0,examples.shape[0],batch_size):
# Configuring Input
example_batch = Tensor(examples[k:k+batch_size,:])
# Ground truth for the discriminator
valid = Variable(Tensor(example_batch.size(0), 1).fill_(1.0), requires_grad=False)
fake = Variable(Tensor(example_batch.size(0), 1).fill_(0.0), requires_grad=False)
# Generating fake samples
z = Tensor(np.random.normal(size=[example_batch.size(0),latent_space_dim]))
generated = generator(z)
# Taking the gradient of the discriminator
valid_loss = loss(discriminator(example_batch),valid)
fake_loss = loss(discriminator(generated.detach()),fake)
total_loss = (valid_loss + fake_loss)/2.0
discriminator.zero_grad()
# total_loss.backward(retain_graph=True)
# Saving discriminator score for sample
score = discriminator(example_batch)
score = Tensor.cpu(score)
score = score.data.numpy()
client_score = np.append(client_score, score)
# Calculating the norm
d_grad = grad(total_loss,discriminator.parameters(),create_graph=True)
dn2 = torch.sqrt(sum([grd.norm()**2 for grd in d_grad]))
dn2 = dn2.detach()
dn2 = Tensor.cpu(dn2)
dn2 = dn2.data.numpy()
client_norm = np.append(client_norm,dn2)
# Saving the norm for a client
scores.append(client_score)
norm.append(client_norm)
# Loop through clients
norms_per_subset.append(norm)
scores_per_subset.append(scores)
# Loop through subsets
norms_directory = saving_dir+'/npdata/Norms'
if not os.path.exists(norms_directory):
os.makedirs(norms_directory)
np.save(norms_directory+'/SSNorm'+str(run)+'.npy',norms_per_subset)
scores_directory = saving_dir+'/npdata/Scores'
if not os.path.exists(scores_directory):
os.makedirs(scores_directory)
np.save(scores_directory+'/SSScore'+str(run)+'.npy',scores_per_subset)
##
## Classification ##
##
# Using the Norm
mean_norms_per_client = []
mean_norms_per_subset = []
std_norms_per_client = []
std_norms_per_subset = []
# Going through norms for all samples.
# Saving per client and per subset mean and std.
for i in range(len(norms_per_subset)):
norms_per_client = norms_per_subset[i]
mean_norm_client_for_subset = []
std_norm_client_for_subset = []
all_norms_subset = np.empty([0])
for j in range(step):
client_norms = norms_per_client[j]
all_norms_subset = np.append(all_norms_subset,client_norms)
mean_client_norm = np.mean(client_norms)
std_client_norm = np.std(client_norms)
mean_norm_client_for_subset.append(mean_client_norm)
std_norm_client_for_subset.append(std_client_norm)
mean_norms_per_client.append(mean_norm_client_for_subset)
mean_norms_per_subset.append(np.mean(mean_norm_client_for_subset))
std_norms_per_client.append(std_norm_client_for_subset)
std_norms_per_subset.append(np.std(all_norms_subset))
# Classifying Subset Based
subset_ranking_mean = np.argsort(mean_norms_per_subset)
subset_ranking_std = np.argsort(std_norms_per_subset)
ranksSS_mean_directory = saving_dir+'/npdata/RankMeanPSS/'
if not os.path.exists(ranksSS_mean_directory):
os.makedirs(ranksSS_mean_directory)
np.save(ranksSS_mean_directory+'RankpSubset'+str(run)+'.npy',subset_ranking_mean)
ranksSS_std_directory = saving_dir+'/npdata/RankStdPSS/'
if not os.path.exists(ranksSS_std_directory):
os.makedirs(ranksSS_std_directory)
np.save(ranksSS_std_directory+'RankpSubset'+str(run)+'.npy',subset_ranking_std)
# Classifying Client Based
mean_arb_client_ranking = []
std_arb_client_ranking = []
for j in range(100):
rand_select_norms = []
rand_select_norms_std = []
for i in range(len(mean_norms_per_client)):
selected_client = np.random.choice(step)
norm_of_client = mean_norms_per_client[i][selected_client]
std_of_client = std_norms_per_client[i][selected_client]
rand_select_norms.append(norm_of_client)
rand_select_norms_std.append(std_of_client)
aux = np.argsort(rand_select_norms)
aux_1 = np.argsort(rand_select_norms_std)
mean_arb_client_ranking.append(aux)
std_arb_client_ranking.append(aux_1)
ranksC_mean_directory = saving_dir+'/npdata/RankMeanPC'
if not os.path.exists(ranksC_mean_directory):
os.makedirs(ranksC_mean_directory)
np.save(ranksC_mean_directory+'/RankpClient'+str(run)+'.npy',mean_arb_client_ranking)
ranksC_std_directory = saving_dir+'/npdata/RankStdPC'
if not os.path.exists(ranksC_std_directory):
os.makedirs(ranksC_std_directory)
np.save(ranksC_std_directory+'/RankpClient'+str(run)+'.npy',std_arb_client_ranking)
# Using the scores
mean_score_per_client = []
mean_score_per_subset = []
for i in range(len(scores_per_subset)):
scores_per_client = scores_per_subset[i]
mean_scores_client_for_subset = []
for j in range(step):
client_scores = scores_per_client[j]
mean_client_score = np.mean(client_scores)
mean_scores_client_for_subset.append(mean_client_score)
mean_score_per_client.append(mean_scores_client_for_subset)
mean_score_per_subset.append(np.mean(mean_scores_client_for_subset))
# Classifying Subset Based
subset_ranking_score = np.argsort(mean_score_per_subset)
ranksSS_score_directory = saving_dir+'/npdata/RankMeanScorePSS/'
if not os.path.exists(ranksSS_score_directory):
os.makedirs(ranksSS_score_directory)
np.save(ranksSS_score_directory+'RankpSubset'+str(run)+'.npy',subset_ranking_score)
# Classifying Client Based
score_arb_client_ranking = []
for j in range(100):
rand_select_scores = []
for i in range(len(mean_score_per_client)):
selected_client = np.random.choice(step)
score_of_client = mean_score_per_client[i][selected_client]
rand_select_scores.append(score_of_client)
aux = np.argsort(rand_select_scores)
score_arb_client_ranking.append(aux)
ranksC_score_directory = saving_dir+'/npdata/RankMeanScorePC'
if not os.path.exists(ranksC_score_directory):
os.makedirs(ranksC_score_directory)
np.save(ranksC_score_directory+'/RankpClient'+str(run)+'.npy',score_arb_client_ranking)
# Using the Indicators
AII_list = []
for i in range(n_parts):
examples = np.empty([0,336])
generated_list = []
for j in range(step):
aux =
|
np.load(data_dir+'/'+dir_list[(i*step)+j])
|
numpy.load
|
import numpy as np
import functools
import sys
import pytest
from numpy.lib.shape_base import (
apply_along_axis, apply_over_axes, array_split, split, hsplit, dsplit,
vsplit, dstack, column_stack, kron, tile, expand_dims, take_along_axis,
put_along_axis
)
from numpy.testing import (
assert_, assert_equal, assert_array_equal, assert_raises, assert_warns
)
IS_64BIT = sys.maxsize > 2**32
def _add_keepdims(func):
""" hack in keepdims behavior into a function taking an axis """
@functools.wraps(func)
def wrapped(a, axis, **kwargs):
res = func(a, axis=axis, **kwargs)
if axis is None:
axis = 0 # res is now a scalar, so we can insert this anywhere
return np.expand_dims(res, axis=axis)
return wrapped
class TestTakeAlongAxis(object):
def test_argequivalent(self):
""" Test it translates from arg<func> to <func> """
from numpy.random import rand
a = rand(3, 4, 5)
funcs = [
(np.sort, np.argsort, dict()),
(_add_keepdims(np.min), _add_keepdims(np.argmin), dict()),
(_add_keepdims(np.max), _add_keepdims(np.argmax), dict()),
(np.partition, np.argpartition, dict(kth=2)),
]
for func, argfunc, kwargs in funcs:
for axis in list(range(a.ndim)) + [None]:
a_func = func(a, axis=axis, **kwargs)
ai_func = argfunc(a, axis=axis, **kwargs)
assert_equal(a_func, take_along_axis(a, ai_func, axis=axis))
def test_invalid(self):
""" Test it errors when indices has too few dimensions """
a = np.ones((10, 10))
ai = np.ones((10, 2), dtype=np.intp)
# sanity check
take_along_axis(a, ai, axis=1)
# not enough indices
assert_raises(ValueError, take_along_axis, a, np.array(1), axis=1)
# bool arrays not allowed
assert_raises(IndexError, take_along_axis, a, ai.astype(bool), axis=1)
# float arrays not allowed
assert_raises(IndexError, take_along_axis, a, ai.astype(float), axis=1)
# invalid axis
assert_raises(np.AxisError, take_along_axis, a, ai, axis=10)
def test_empty(self):
""" Test everything is ok with empty results, even with inserted dims """
a = np.ones((3, 4, 5))
ai = np.ones((3, 0, 5), dtype=np.intp)
actual = take_along_axis(a, ai, axis=1)
assert_equal(actual.shape, ai.shape)
def test_broadcast(self):
""" Test that non-indexing dimensions are broadcast in both directions """
a = np.ones((3, 4, 1))
ai = np.ones((1, 2, 5), dtype=np.intp)
actual = take_along_axis(a, ai, axis=1)
assert_equal(actual.shape, (3, 2, 5))
class TestPutAlongAxis(object):
def test_replace_max(self):
a_base = np.array([[10, 30, 20], [60, 40, 50]])
for axis in list(range(a_base.ndim)) + [None]:
# we mutate this in the loop
a = a_base.copy()
# replace the max with a small value
i_max = _add_keepdims(np.argmax)(a, axis=axis)
put_along_axis(a, i_max, -99, axis=axis)
# find the new minimum, which should max
i_min = _add_keepdims(np.argmin)(a, axis=axis)
assert_equal(i_min, i_max)
def test_broadcast(self):
""" Test that non-indexing dimensions are broadcast in both directions """
a = np.ones((3, 4, 1))
ai = np.arange(10, dtype=np.intp).reshape((1, 2, 5)) % 4
put_along_axis(a, ai, 20, axis=1)
assert_equal(take_along_axis(a, ai, axis=1), 20)
class TestApplyAlongAxis(object):
def test_simple(self):
a = np.ones((20, 10), 'd')
assert_array_equal(
apply_along_axis(len, 0, a), len(a)*np.ones(a.shape[1]))
def test_simple101(self):
a = np.ones((10, 101), 'd')
assert_array_equal(
apply_along_axis(len, 0, a), len(a)*np.ones(a.shape[1]))
def test_3d(self):
a = np.arange(27).reshape((3, 3, 3))
assert_array_equal(apply_along_axis(np.sum, 0, a),
[[27, 30, 33], [36, 39, 42], [45, 48, 51]])
def test_preserve_subclass(self):
def double(row):
return row * 2
class MyNDArray(np.ndarray):
pass
m = np.array([[0, 1], [2, 3]]).view(MyNDArray)
expected = np.array([[0, 2], [4, 6]]).view(MyNDArray)
result = apply_along_axis(double, 0, m)
assert_(isinstance(result, MyNDArray))
assert_array_equal(result, expected)
result = apply_along_axis(double, 1, m)
assert_(isinstance(result, MyNDArray))
assert_array_equal(result, expected)
def test_subclass(self):
class MinimalSubclass(np.ndarray):
data = 1
def minimal_function(array):
return array.data
a = np.zeros((6, 3)).view(MinimalSubclass)
assert_array_equal(
apply_along_axis(minimal_function, 0, a), np.array([1, 1, 1])
)
def test_scalar_array(self, cls=np.ndarray):
a = np.ones((6, 3)).view(cls)
res = apply_along_axis(np.sum, 0, a)
assert_(isinstance(res, cls))
assert_array_equal(res, np.array([6, 6, 6]).view(cls))
def test_0d_array(self, cls=np.ndarray):
def sum_to_0d(x):
""" Sum x, returning a 0d array of the same class """
assert_equal(x.ndim, 1)
return np.squeeze(np.sum(x, keepdims=True))
a = np.ones((6, 3)).view(cls)
res = apply_along_axis(sum_to_0d, 0, a)
assert_(isinstance(res, cls))
assert_array_equal(res, np.array([6, 6, 6]).view(cls))
res =
|
apply_along_axis(sum_to_0d, 1, a)
|
numpy.lib.shape_base.apply_along_axis
|
from flask import Flask, render_template, request
from werkzeug.utils import secure_filename
import os
import time
os.chdir('C:\\ITWILL\\Flask_part\\Lookus')
os.getcwd()
check = False
app = Flask(__name__)
import tensorflow as tf
from sklearn.cluster import KMeans
import pandas as pd
import numpy as np
from numpy import dot
from numpy.linalg import norm
import matplotlib.pyplot as plt
import argparse
import cv2
from keras.preprocessing import image
import os
from PIL import Image
import keras
import cv2
from keras.models import load_model
from keras.preprocessing.image import ImageDataGenerator
result_idx = []
def cos_sim(A, B):
return dot(A, B)/(norm(A)*
|
norm(B)
|
numpy.linalg.norm
|
"""Processing functions for dannce."""
import numpy as np
import imageio
import os
import PIL
from six.moves import cPickle
from typing import Dict, Text
import pickle
from tqdm import tqdm
from copy import deepcopy
import scipy.io as sio
from scipy.ndimage.filters import maximum_filter
from skimage import measure
from skimage.color import rgb2gray
from skimage.transform import downscale_local_mean as dsm
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
from dannce.engine.data import serve_data_DANNCE, io
from dannce.config import make_paths_safe, make_none_safe
_DEFAULT_VIDDIR = "videos"
_DEFAULT_VIDDIR_SIL = "videos_sil"
_DEFAULT_COMSTRING = "COM"
_DEFAULT_COMFILENAME = "com3d.mat"
_DEFAULT_SEG_MODEL = 'weights/maskrcnn.pth'
"""
VIDEO
"""
def initialize_vids(params, datadict, e, vids, pathonly=True, vidkey="viddir"):
"""
Initializes video path dictionaries for a training session. This is different
than a predict session because it operates over a single animal ("experiment")
at a time
"""
for i in range(len(params["experiment"][e]["camnames"])):
# Rather than opening all vids, only open what is needed based on the
# maximum frame ID for this experiment and Camera
flist = []
for key in datadict.keys():
if int(key.split("_")[0]) == e:
flist.append(
datadict[key]["frames"][params["experiment"][e]["camnames"][i]]
)
flist = max(flist)
# For COM prediction, we don't prepend experiment IDs
# So detect this case and act accordingly.
basecam = params["experiment"][e]["camnames"][i]
if "_" in basecam:
basecam = basecam.split("_")[1]
if params["vid_dir_flag"]:
addl = ""
else:
addl = os.listdir(
os.path.join(
params["experiment"][e][vidkey],
basecam,
)
)[0]
r = generate_readers(
params["experiment"][e][vidkey],
os.path.join(basecam, addl),
maxopt=flist, # Large enough to encompass all videos in directory.
extension=params["experiment"][e]["extension"],
pathonly=pathonly,
)
if "_" in params["experiment"][e]["camnames"][i]:
vids[params["experiment"][e]["camnames"][i]] = {}
for key in r:
vids[params["experiment"][e]["camnames"][i]][str(e) + "_" + key] = r[
key
]
else:
vids[params["experiment"][e]["camnames"][i]] = r
return vids
def initialize_all_vids(params, datadict, exps, pathonly=True, vidkey="viddir"):
vids = {}
for e in exps:
vids = initialize_vids(params, datadict, e, vids, pathonly, vidkey)
return vids
def generate_readers(
viddir, camname, minopt=0, maxopt=300000, pathonly=False, extension=".mp4"
):
"""Open all mp4 objects with imageio, and return them in a dictionary."""
out = {}
mp4files = [
os.path.join(camname, f)
for f in os.listdir(os.path.join(viddir, camname))
if extension in f
and int(f.rsplit(extension)[0]) <= maxopt
and int(f.rsplit(extension)[0]) >= minopt
]
# This is a trick (that should work) for getting rid of
# awkward sub-directory folder names when they are being used
mp4files_scrub = [
os.path.join(
os.path.normpath(f).split(os.sep)[0], os.path.normpath(f).split(os.sep)[-1]
)
for f in mp4files
]
pixelformat = "yuv420p"
input_params = []
output_params = []
for i in range(len(mp4files)):
if pathonly:
out[mp4files_scrub[i]] = os.path.join(viddir, mp4files[i])
else:
print(
"NOTE: Ignoring {} files numbered above {}".format(extensions, maxopt)
)
out[mp4files_scrub[i]] = imageio.get_reader(
os.path.join(viddir, mp4files[i]),
pixelformat=pixelformat,
input_params=input_params,
output_params=output_params,
)
return out
"""
LOAD EXP INFO
"""
def grab_predict_label3d_file(defaultdir=""):
"""
Finds the paths to the training experiment yaml files.
"""
def_ep = os.path.join(".", defaultdir)
label3d_files = os.listdir(def_ep)
label3d_files = [
os.path.join(def_ep, f) for f in label3d_files if "dannce.mat" in f
]
label3d_files.sort()
if len(label3d_files) == 0:
raise Exception("Did not find any *dannce.mat file in {}".format(def_ep))
print("Using the following *dannce.mat files: {}".format(label3d_files[0]))
return label3d_files[0]
def load_expdict(params, e, expdict, _DEFAULT_VIDDIR, _DEFAULT_VIDDIR_SIL, logger):
"""
Load in camnames and video directories and label3d files for a single experiment
during training.
"""
_DEFAULT_NPY_DIR = "npy_volumes"
exp = params.copy()
exp = make_paths_safe(exp)
exp["label3d_file"] = expdict["label3d_file"]
exp["base_exp_folder"] = os.path.dirname(exp["label3d_file"])
if "viddir" not in expdict:
# if the videos are not at the _DEFAULT_VIDDIR, then it must
# be specified in the io.yaml experiment portion
exp["viddir"] = os.path.join(exp["base_exp_folder"], _DEFAULT_VIDDIR)
else:
exp["viddir"] = expdict["viddir"]
logger.info("Experiment {} using videos in {}".format(e, exp["viddir"]))
if params["use_silhouette"]:
exp["viddir_sil"] = os.path.join(exp["base_exp_folder"], _DEFAULT_VIDDIR_SIL) if "viddir_sil" not in expdict else expdict["viddir_sil"]
logger.info("Experiment {} also using masked videos in {}".format(e, exp["viddir_sil"]))
l3d_camnames = io.load_camnames(expdict["label3d_file"])
if "camnames" in expdict:
exp["camnames"] = expdict["camnames"]
elif l3d_camnames is not None:
exp["camnames"] = l3d_camnames
logger.info("Experiment {} using camnames: {}".format(e, exp["camnames"]))
# Use the camnames to find the chunks for each video
chunks = {}
for name in exp["camnames"]:
if exp["vid_dir_flag"]:
camdir = os.path.join(exp["viddir"], name)
else:
camdir = os.path.join(exp["viddir"], name)
intermediate_folder = os.listdir(camdir)
camdir = os.path.join(camdir, intermediate_folder[0])
video_files = os.listdir(camdir)
video_files = [f for f in video_files if ".mp4" in f]
video_files = sorted(video_files, key=lambda x: int(x.split(".")[0]))
chunks[str(e) + "_" + name] = np.sort(
[int(x.split(".")[0]) for x in video_files]
)
exp["chunks"] = chunks
logger.info(chunks)
# For npy volume training
if params["use_npy"]:
exp["npy_vol_dir"] = os.path.join(exp["base_exp_folder"], _DEFAULT_NPY_DIR)
return exp
def load_all_exps(params, logger):
samples = [] # training sample identifiers
datadict, datadict_3d, com3d_dict = {}, {}, {} # labels
cameras, camnames = {}, {} # camera
total_chunks = {} # video chunks
temporal_chunks = {} # for temporal training
for e, expdict in enumerate(params["exp"]):
# load basic exp info
exp = load_expdict(params, e, expdict, _DEFAULT_VIDDIR, _DEFAULT_VIDDIR_SIL, logger)
# load corresponding 2D & 3D labels, COMs
(
exp,
samples_,
datadict_,
datadict_3d_,
cameras_,
com3d_dict_,
temporal_chunks_
) = do_COM_load(exp, expdict, e, params)
logger.info("Using {} samples total.".format(len(samples_)))
(
samples,
datadict,
datadict_3d,
com3d_dict,
temporal_chunks
) = serve_data_DANNCE.add_experiment(
e,
samples,
datadict,
datadict_3d,
com3d_dict,
samples_,
datadict_,
datadict_3d_,
com3d_dict_,
temporal_chunks,
temporal_chunks_
)
cameras[e] = cameras_
camnames[e] = exp["camnames"]
logger.info("Using the following cameras: {}".format(camnames[e]))
params["experiment"][e] = exp
for name, chunk in exp["chunks"].items():
total_chunks[name] = chunk
samples = np.array(samples)
return samples, datadict, datadict_3d, com3d_dict, cameras, camnames, total_chunks, temporal_chunks
def do_COM_load(exp: Dict, expdict: Dict, e, params: Dict, training=True):
"""Load and process COMs.
Args:
exp (Dict): Parameters dictionary for experiment
expdict (Dict): Experiment specific overrides (e.g. com_file, vid_dir)
e (TYPE): Description
params (Dict): Parameters dictionary.
training (bool, optional): If true, load COM for training frames.
Returns:
TYPE: Description
exp, samples_, datadict_, datadict_3d_, cameras_, com3d_dict_
Raises:
Exception: Exception when invalid com file format.
"""
(
samples_,
datadict_,
datadict_3d_,
cameras_,
temporal_chunks
) = serve_data_DANNCE.prepare_data(
exp,
prediction=not training,
predict_labeled_only=params["predict_labeled_only"],
valid=(e in params["valid_exp"]) if params["valid_exp"] is not None else False,
support=(e in params["support_exp"]) if params["support_exp"] is not None else False,
)
# If there is "clean" data (full marker set), can take the
# 3D COM from the labels
if exp["com_fromlabels"] and training:
print("For experiment {}, calculating 3D COM from labels".format(e))
com3d_dict_ = deepcopy(datadict_3d_)
for key in com3d_dict_.keys():
com3d_dict_[key] = np.nanmean(datadict_3d_[key], axis=1, keepdims=True)
elif "com_file" in expdict and expdict["com_file"] is not None:
exp["com_file"] = expdict["com_file"]
if ".mat" in exp["com_file"]:
c3dfile = sio.loadmat(exp["com_file"])
com3d_dict_ = check_COM_load(c3dfile, "com", params["medfilt_window"])
elif ".pickle" in exp["com_file"]:
datadict_, com3d_dict_ = serve_data_DANNCE.prepare_COM(
exp["com_file"],
datadict_,
comthresh=params["comthresh"],
weighted=params["weighted"],
camera_mats=cameras_,
method=params["com_method"],
)
if params["medfilt_window"] is not None:
raise Exception(
"Sorry, median filtering a com pickle is not yet supported. Please use a com3d.mat or *dannce.mat file instead"
)
else:
raise Exception("Not a valid com file format")
else:
# Then load COM from the label3d file
exp["com_file"] = expdict["label3d_file"]
c3dfile = io.load_com(exp["com_file"])
com3d_dict_ = check_COM_load(c3dfile, "com3d", params["medfilt_window"])
print("Experiment {} using com3d: {}".format(e, exp["com_file"]))
if params["medfilt_window"] is not None:
print(
"Median filtering COM trace with window size {}".format(
params["medfilt_window"]
)
)
# Remove any 3D COMs that are beyond the confines off the 3D arena
do_cthresh = True if exp["cthresh"] is not None else False
pre = len(samples_)
samples_ = serve_data_DANNCE.remove_samples_com(
samples_,
com3d_dict_,
rmc=do_cthresh,
cthresh=exp["cthresh"],
)
msg = "Removed {} samples from the dataset because they either had COM positions over cthresh, or did not have matching sampleIDs in the COM file"
print(msg.format(pre - len(samples_)))
return exp, samples_, datadict_, datadict_3d_, cameras_, com3d_dict_, temporal_chunks
def check_COM_load(c3dfile: Dict, kkey: Text, win_size: int):
"""Check that the COM file is of the appropriate format, and filter it.
Args:
c3dfile (Dict): Loaded com3d dictionary.
kkey (Text): Key to use for extracting com.
wsize (int): Window size.
Returns:
Dict: Dictionary containing com data.
"""
c3d = c3dfile[kkey]
# do a median filter on the COM traces if indicated
if win_size is not None:
if win_size % 2 == 0:
win_size += 1
print("medfilt_window was not odd, changing to: {}".format(win_size))
from scipy.signal import medfilt
c3d = medfilt(c3d, (win_size, 1))
c3dsi = np.squeeze(c3dfile["sampleID"])
com3d_dict = {s: c3d[i] for (i, s) in enumerate(c3dsi)}
return com3d_dict
def trim_COM_pickle(fpath, start_sample, end_sample, opath=None):
"""Trim dictionary entries to the range [start_sample, end_sample].
spath is the output path for saving the trimmed COM dictionary, if desired
"""
with open(fpath, "rb") as f:
save_data = cPickle.load(f)
sd = {}
for key in save_data:
if key >= start_sample and key <= end_sample:
sd[key] = save_data[key]
with open(opath, "wb") as f:
cPickle.dump(sd, f)
return sd
"""
DATA SPLITS
"""
def make_data_splits(samples, params, results_dir, num_experiments, temporal_chunks=None):
"""
Make train/validation splits from list of samples, or load in a specific
list of sampleIDs if desired.
"""
# TODO: Switch to .mat from .pickle so that these lists are easier to read
# and change.
partition = {}
if params["use_temporal"]:
if params["load_valid"] is None:
assert temporal_chunks != None, "If use temporal, do partitioning over chunks."
v = params["num_validation_per_exp"]
# fix random seeds
if params["data_split_seed"] is not None:
np.random.seed(params["data_split_seed"])
valid_chunks, train_chunks = [], []
if params["valid_exp"] is not None and v > 0:
for e in range(num_experiments):
if e in params["valid_exp"]:
v = params["num_validation_per_exp"]
if v > len(temporal_chunks[e]):
v = len(temporal_chunks[e])
print("Setting all {} samples in experiment {} for validation purpose.".format(v, e))
valid_chunk_idx = sorted(np.random.choice(len(temporal_chunks[e]), v, replace=False))
valid_chunks += list(np.array(temporal_chunks[e])[valid_chunk_idx])
train_chunks += list(np.delete(temporal_chunks[e], valid_chunk_idx, 0))
else:
train_chunks += temporal_chunks[e]
elif v > 0:
for e in range(num_experiments):
valid_chunk_idx = sorted(np.random.choice(len(temporal_chunks[e]), v, replace=False))
valid_chunks += list(np.array(temporal_chunks[e])[valid_chunk_idx])
train_chunks += list(np.delete(temporal_chunks[e], valid_chunk_idx, 0))
elif params["valid_exp"] is not None:
raise Exception("Need to set num_validation_per_exp in using valid_exp")
else:
for e in range(num_experiments):
train_chunks += list(temporal_chunks[e])
train_expts = np.arange(num_experiments)
print("TRAIN EXPTS: {}".format(train_expts))
if isinstance(params["training_fraction"], float):
assert (params["training_fraction"] < 1.0) & (params["training_fraction"] > 0)
# load in the training samples
labeled_train_samples = np.load('train_samples/baseline.pickle', allow_pickle=True)
#labeled_train_chunks = [labeled_train_samples[i:i+params["temporal_chunk_size"]] for i in range(0, len(labeled_train_samples), params["temporal_chunk_size"])]
n_chunks = len(labeled_train_samples)
# do the selection from
labeled_train_idx = sorted(np.random.choice(n_chunks, int(n_chunks*params["training_fraction"]), replace=False))
idxes_to_be_removed = list(set(range(n_chunks)) - set(labeled_train_idx))
train_samples_to_be_removed = [labeled_train_samples[i] for i in idxes_to_be_removed]
new_train_chunks = []
for chunk in train_chunks:
if chunk[2] not in train_samples_to_be_removed:
new_train_chunks.append(chunk)
train_chunks = new_train_chunks
train_sampleIDs = list(np.concatenate(train_chunks))
try:
valid_sampleIDs = list(np.concatenate(valid_chunks))
except:
valid_sampleIDs = []
partition["train_sampleIDs"], partition["valid_sampleIDs"] = train_sampleIDs, valid_sampleIDs
else:
# Load validation samples from elsewhere
with open(os.path.join(params["load_valid"], "val_samples.pickle"), "rb") as f:
partition["valid_sampleIDs"] = cPickle.load(f)
partition["train_sampleIDs"] = [f for f in samples if f not in partition["valid_sampleIDs"]]
chunk_size = len(temporal_chunks[0][0])
partition["train_chunks"] = [np.arange(i, i+chunk_size) for i in range(0, len(partition["train_sampleIDs"]), chunk_size)]
partition["valid_chunks"] = [np.arange(i, i+chunk_size) for i in range(0, len(partition["valid_sampleIDs"]), chunk_size)]
# breakpoint()
# Save train/val inds
with open(os.path.join(results_dir, "val_samples.pickle"), "wb") as f:
cPickle.dump(partition["valid_sampleIDs"], f)
with open(os.path.join(results_dir, "train_samples.pickle"), "wb") as f:
cPickle.dump(partition["train_sampleIDs"], f)
return partition
if params["load_valid"] is None:
# Set random seed if included in params
if params["data_split_seed"] is not None:
np.random.seed(params["data_split_seed"])
all_inds = np.arange(len(samples))
# extract random inds from each set for validation
v = params["num_validation_per_exp"]
valid_inds = []
if params["valid_exp"] is not None and v > 0:
all_valid_inds = []
for e in params["valid_exp"]:
tinds = [
i for i in range(len(samples)) if int(samples[i].split("_")[0]) == e
]
all_valid_inds = all_valid_inds + tinds
# enable full validation experiments
# by specifying params["num_validation_per_exp"] > number of samples
v = params["num_validation_per_exp"]
if v > len(tinds):
v = len(tinds)
print("Setting all {} samples in experiment {} for validation purpose.".format(v, e))
valid_inds = valid_inds + list(
np.random.choice(tinds, (v,), replace=False)
)
valid_inds = list(np.sort(valid_inds))
train_inds = list(set(all_inds) - set(all_valid_inds)) # [i for i in all_inds if i not in all_valid_inds]
if isinstance(params["training_fraction"], float):
assert (params["training_fraction"] < 1.0) & (params["training_fraction"] > 0)
n_samples = len(train_inds)
train_inds_idx = sorted(np.random.choice(n_samples, int(n_samples*params["training_fraction"]), replace=False))
train_inds = [train_inds[i] for i in train_inds_idx]
elif v > 0: # if 0, do not perform validation
for e in range(num_experiments):
tinds = [
i for i in range(len(samples)) if int(samples[i].split("_")[0]) == e
]
valid_inds = valid_inds + list(
np.random.choice(tinds, (v,), replace=False)
)
valid_inds = list(np.sort(valid_inds))
train_inds = [i for i in all_inds if i not in valid_inds]
elif params["valid_exp"] is not None:
raise Exception("Need to set num_validation_per_exp in using valid_exp")
else:
train_inds = all_inds
assert (set(valid_inds) & set(train_inds)) == set()
train_samples = samples[train_inds]
train_inds = []
if params["valid_exp"] is not None:
train_expts = [f for f in range(num_experiments) if f not in params["valid_exp"]]
else:
train_expts = np.arange(num_experiments)
print("TRAIN EXPTS: {}".format(train_expts))
if params["num_train_per_exp"] is not None:
# Then sample randomly without replacement from training sampleIDs
for e in train_expts:
tinds = [
i
for i in range(len(train_samples))
if int(train_samples[i].split("_")[0]) == e
]
print(e)
print(len(tinds))
train_inds = train_inds + list(
np.random.choice(
tinds, (params["num_train_per_exp"],), replace=False
)
)
train_inds = list(np.sort(train_inds))
else:
train_inds = np.arange(len(train_samples))
partition["valid_sampleIDs"] = samples[valid_inds]
partition["train_sampleIDs"] = train_samples[train_inds]
# Save train/val inds
with open(os.path.join(results_dir, "val_samples.pickle"), "wb") as f:
cPickle.dump(partition["valid_sampleIDs"], f)
with open(os.path.join(results_dir, "train_samples.pickle"), "wb") as f:
cPickle.dump(partition["train_sampleIDs"], f)
else:
# Load validation samples from elsewhere
with open(
os.path.join(params["load_valid"], "val_samples.pickle"),
"rb",
) as f:
partition["valid_sampleIDs"] = cPickle.load(f)
partition["train_sampleIDs"] = [
f for f in samples if f not in partition["valid_sampleIDs"]
]
# Reset any seeding so that future batch shuffling, etc. are not tied to this seed
if params["data_split_seed"] is not None:
np.random.seed()
return partition
def resplit_social(partition):
# the partition needs to be aligned for both animals
# for now, manually put exps as consecutive pairs,
# i.e. [exp1_instance0, exp1_instance1, exp2_instance0, exp2_instance1, ...]
new_partition = {"train_sampleIDs": [], "valid_sampleIDs": []}
pairs = {"train_pairs": [], "valid_pairs": []}
all_sampleIDs = np.concatenate((partition["train_sampleIDs"], partition["valid_sampleIDs"]))
for samp in partition["train_sampleIDs"]:
exp_id = int(samp.split("_")[0])
if exp_id % 2 == 0:
paired = samp.replace(f"{exp_id}_", f"{exp_id+1}_")
new_partition["train_sampleIDs"].append(samp)
new_partition["train_sampleIDs"].append(paired)
pairs["train_pairs"].append([samp, paired])
new_partition["train_sampleIDs"] = np.array(sorted(new_partition["train_sampleIDs"]))
new_partition["valid_sampleIDs"] = np.array(sorted(list(set(all_sampleIDs) - set(new_partition["train_sampleIDs"]))))
for samp in new_partition["valid_sampleIDs"]:
exp_id = int(samp.split("_")[0])
if exp_id % 2 == 0:
paired = samp.replace(f"{exp_id}_", f"{exp_id+1}_")
pairs["valid_pairs"].append([samp, paired])
return new_partition, pairs
def align_social_data(X, X_grid, y, aux, n_animals=2):
X = X.reshape((n_animals, -1, *X.shape[1:]))
X_grid = X_grid.reshape((n_animals, -1, *X_grid.shape[1:]))
y = y.reshape((n_animals, -1, *y.shape[1:]))
if aux is not None:
aux = aux.reshape((n_animals, -1, *aux.shape[1:]))
X = np.transpose(X, (1, 0, 2, 3, 4, 5))
X_grid = np.transpose(X_grid, (1, 0, 2, 3))
y = np.transpose(y, (1, 0, 2, 3))
if aux is not None:
aux = np.transpose(aux, (1, 0, 2, 3, 4, 5))
return X, X_grid, y, aux
def remove_samples_npy(npydir, samples, params):
"""
Remove any samples from sample list if they do not have corresponding volumes in the image
or grid directories
"""
# image_volumes
# grid_volumes
samps = []
for e in npydir.keys():
imvol = os.path.join(npydir[e], "image_volumes")
gridvol = os.path.join(npydir[e], "grid_volumes")
ims = os.listdir(imvol)
grids = os.listdir(gridvol)
npysamps = [
"0_" + f.split("_")[1] + ".npy"
for f in samples
if int(f.split("_")[0]) == e
]
goodsamps = list(set(npysamps) & set(ims) & set(grids))
samps = samps + [
str(e) + "_" + f.split("_")[1].split(".")[0] for f in goodsamps
]
sampdiff = len(npysamps) - len(goodsamps)
# import pdb; pdb.set_trace()
print(
"Removed {} samples from {} because corresponding image or grid files could not be found".format(
sampdiff, params["experiment"][e]["label3d_file"]
)
)
return np.array(samps)
"""
PRELOAD DATA INTO MEMORY
"""
def load_volumes_into_mem(params, logger, partition, n_cams, generator, train=True, silhouette=False, social=False):
n_samples = len(partition["train_sampleIDs"]) if train else len(partition["valid_sampleIDs"])
message = "Loading training data into memory" if train else "Loading validation data into memory"
gridsize = tuple([params["nvox"]] * 3)
# initialize vars
if silhouette:
X = np.empty((n_samples, *gridsize, 1), dtype="float32")
else:
X = np.empty((n_samples, *gridsize, params["chan_num"]*n_cams), dtype="float32")
logger.info(message)
X_grid, y = None, None
if params["expval"]:
if not silhouette:
y = np.empty((n_samples, 3, params["n_channels_out"]), dtype="float32")
X_grid = np.empty((n_samples, params["nvox"] ** 3, 3), dtype="float32")
else:
y = np.empty((n_samples, *gridsize, params["n_channels_out"]), dtype="float32")
# load data from generator
if social:
X = np.reshape(X, (2, -1, *X.shape[1:]))
if X_grid is not None:
X_grid = np.reshape(X_grid, (2, -1, *X_grid.shape[1:]))
if y is not None:
y = np.reshape(y, (2, -1, *y.shape[1:]))
for i in tqdm(range(n_samples//2)):
rr = generator.__getitem__(i)
for j in range(2):
vol = rr[0][0][j]
if not silhouette:
X[j, i] = vol
X_grid[j, i], y[j, i] = rr[0][1][j], rr[1][0][j]
else:
X[j, i] = extract_3d_sil(vol)
X = np.reshape(X, (-1, *X.shape[2:]))
if X_grid is not None:
X_grid = np.reshape(X_grid, (-1, *X_grid.shape[2:]))
if y is not None:
y = np.reshape(y, (-1, *y.shape[2:]))
else:
for i in tqdm(range(n_samples)):
rr = generator.__getitem__(i)
if params["expval"]:
vol = rr[0][0][0]
if not silhouette:
X[i] = vol
X_grid[i], y[i] = rr[0][1], rr[1][0]
else:
X[i] = extract_3d_sil(vol)
else:
X[i], y[i] = rr[0], rr[1]
if silhouette:
logger.info("Now loading binary silhouettes")
return None, None, X
return X, X_grid, y
"""
DEBUG, VIS
"""
def write_debug(
params: Dict,
ims_train: np.ndarray,
ims_valid: np.ndarray,
y_train: np.ndarray,
model,
trainData: bool = True,
):
"""Factoring re-used debug output code.
Args:
params (Dict): Parameters dictionary
ims_train (np.ndarray): Training images
ims_valid (np.ndarray): Validation images
y_train (np.ndarray): Training targets
model (Model): Model
trainData (bool, optional): If True use training data for debug. Defaults to True.
"""
def plot_out(imo, lo, imn):
plot_markers_2d(norm_im(imo), lo, newfig=False)
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
imname = imn
plt.savefig(os.path.join(debugdir, imname), bbox_inches="tight", pad_inches=0)
if params["debug"] and not params["multi_mode"]:
if trainData:
outdir = "debug_im_out"
ims_out = ims_train
label_out = y_train
else:
outdir = "debug_im_out_valid"
ims_out = ims_valid
label_out = model.predict(ims_valid, batch_size=1)
# Plot all training images and save
# create new directory for images if necessary
debugdir = os.path.join(params["com_train_dir"], outdir)
print("Saving debug images to: " + debugdir)
if not os.path.exists(debugdir):
os.makedirs(debugdir)
plt.figure()
for i in range(ims_out.shape[0]):
plt.cla()
if params["mirror"]:
for j in range(label_out.shape[-1]):
plt.cla()
plot_out(
ims_out[i],
label_out[i, :, :, j : j + 1],
str(i) + "_cam_" + str(j) + ".png",
)
else:
plot_out(ims_out[i], label_out[i], str(i) + ".png")
elif params["debug"] and params["multi_mode"]:
print("Note: Cannot output debug information in COM multi-mode")
def save_volumes_into_npy(params, npy_generator, missing_npydir, samples, logger, silhouette=False):
logger.info("Generating missing npy files ...")
for i, samp in enumerate(npy_generator.list_IDs):
fname = "0_{}.npy".format(samp.split("_")[1])
rr = npy_generator.__getitem__(i)
print(i, end="\r")
if params["social_training"]:
for j in range(npy_generator.n_instances):
exp = int(samp.split("_")[0]) + j
save_root = missing_npydir[exp]
if not silhouette:
X = rr[0][0][j].astype("uint8")
X_grid, y = rr[0][1][j], rr[1][0][j]
np.save(os.path.join(save_root, "image_volumes", fname), X)
np.save(os.path.join(save_root, "grid_volumes", fname), X_grid)
np.save(os.path.join(save_root, "targets", fname), y)
if params["downscale_occluded_view"]:
np.save(os.path.join(save_root, "occlusion_scores", fname), rr[0][2][j])
else:
sil = extract_3d_sil(rr[0][0][j].astype("uint8"))
np.save(os.path.join(save_root, "visual_hulls", fname), sil)
else:
exp = int(samp.split("_")[0])
save_root = missing_npydir[exp]
X, X_grid, y = rr[0][0][0].astype("uint8"), rr[0][1][0], rr[1][0]
if not silhouette:
np.save(os.path.join(save_root, "image_volumes", fname), X)
np.save(os.path.join(save_root, "grid_volumes", fname), X_grid)
np.save(os.path.join(save_root, "targets", fname), y)
else:
sil = extract_3d_sil(X)
np.save(os.path.join(save_root, "visual_hulls", fname), sil)
# samples = remove_samples_npy(npydir, samples, params)
logger.info("{} samples ready for npy training.".format(len(samples)))
def save_volumes_into_tif(params, tifdir, X, sampleIDs, n_cams, logger):
if not os.path.exists(tifdir):
os.makedirs(tifdir)
logger.info("Dump training volumes to {}".format(tifdir))
for i in range(X.shape[0]):
for j in range(n_cams):
im = X[
i,
:,
:,
:,
j * params["chan_num"] : (j + 1) * params["chan_num"],
]
im = norm_im(im) * 255
im = im.astype("uint8")
of = os.path.join(
tifdir,
sampleIDs[i] + "_cam" + str(j) + ".tif",
)
imageio.mimwrite(of, np.transpose(im, [2, 0, 1, 3]))
def save_visual_hull(aux, sampleIDs, savedir='./visual_hull'):
if not os.path.exists(savedir):
os.makedirs(savedir)
for i in range(aux.shape[0]):
intersection = np.squeeze(aux[i].astype(np.float32))
# apply marching cubes algorithm
verts, faces, normals, values = measure.marching_cubes(intersection, 0.0)
# print('Number of vertices: ', verts.shape[0])
# save predictions
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(111, projection='3d')
# Fancy indexing: `verts[faces]` to generate a collection of triangles
mesh = Poly3DCollection(verts[faces])
mesh.set_edgecolor('k')
ax.add_collection3d(mesh)
min_limit, max_limit = np.min(verts), np.max(verts)
ax.set_xlim(min_limit, max_limit)
ax.set_ylim(min_limit, max_limit)
ax.set_zlim(min_limit, max_limit)
of = os.path.join(savedir, sampleIDs[i])
fig.savefig(of)
plt.close(fig)
def save_train_volumes(params, tifdir, generator, n_cams):
if not os.path.exists(tifdir):
os.makedirs(tifdir)
for i in range(len(generator)):
X = generator.__getitem__(i)[0][0].permute(1, 2, 3, 0).numpy()
for j in range(n_cams):
im = X[...,j * params["chan_num"] : (j + 1) * params["chan_num"]]
im = norm_im(im) * 255
im = im.astype("uint8")
of = os.path.join(tifdir,f"{i}_cam{j}.tif")
imageio.mimwrite(of, np.transpose(im, [2, 0, 1, 3]))
def write_npy(uri, gen):
"""
Creates a new image folder and grid folder at the uri and uses
the generator to generate samples and save them as npy files
"""
imdir = os.path.join(uri, "image_volumes")
if not os.path.exists(imdir):
os.makedirs(imdir)
griddir = os.path.join(uri, "grid_volumes")
if not os.path.exists(griddir):
os.makedirs(griddir)
# Make sure rotation and shuffle are turned off
gen.channel_combo = None
gen.shuffle = False
gen.rotation = False
gen.expval = True
# Turn normalization off so that we can save as uint8
gen.norm_im = False
bs = gen.batch_size
for i in range(len(gen)):
if i % 1000 == 0:
print(i)
# Generate batch
bch = gen.__getitem__(i)
# loop over all examples in batch and save volume
for j in range(bs):
# get the frame name / unique ID
fname = gen.list_IDs[i * bs + j]
# and save
print(fname)
np.save(os.path.join(imdir, fname + ".npy"), bch[0][0][j].astype("uint8"))
np.save(os.path.join(griddir, fname + ".npy"), bch[0][1][j])
def write_sil_npy(uri, gen):
"""
Creates a new image folder and grid folder at the uri and uses
the generator to generate samples and save them as npy files
"""
imdir = os.path.join(uri, "visual_hulls")
if not os.path.exists(imdir):
os.makedirs(imdir)
# Make sure rotation and shuffle are turned off
gen.channel_combo = None
gen.shuffle = False
gen.rotation = False
gen.expval = True
# Turn normalization off so that we can save as uint8
gen.norm_im = False
bs = gen.batch_size
for i in range(len(gen)):
if i % 1000 == 0:
print(i)
# Generate batch
bch = gen.__getitem__(i)
# loop over all examples in batch and save volume
for j in range(bs):
# get the frame name / unique ID
fname = gen.list_IDs[i * bs + j]
# and save
print(fname)
# extract visual hull
sil = np.squeeze(extract_3d_sil(bch[0][0][j], 18))
np.save(os.path.join(imdir, fname + ".npy"), sil)
"""
SAVE TRAIN
"""
def rename_weights(traindir, kkey, mon):
"""
At the end of DANNCe or COM training, rename the best weights file with the epoch #
and value of the monitored quantity
"""
# First load in the training.csv
r = np.genfromtxt(os.path.join(traindir, "training.csv"), delimiter=",", names=True)
e = r["epoch"]
q = r[mon]
minq = np.min(q)
if e.size == 1:
beste = e
else:
beste = e[np.argmin(q)]
newname = "weights." + str(int(beste)) + "-" + "{:.5f}".format(minq) + ".hdf5"
os.rename(os.path.join(traindir, kkey), os.path.join(traindir, newname))
def save_params_pickle(params):
"""
save copy of params as pickle for reproducibility.
"""
handle = open(os.path.join(params["dannce_train_dir"], "params.pickle"), "wb")
pickle.dump(params, handle)
return True
def prepare_save_metadata(params):
"""
To save metadata, i.e. the prediction param values associated with COM or DANNCE
output, we need to convert loss and metrics and net into names, and remove
the 'experiment' field
"""
# Need to convert None to string but still want to conserve the metadat structure
# format, so we don't want to convert the whole dict to a string
meta = params.copy()
# if "experiment" in meta:
# del meta["experiment"]
# if "loss" in meta:
# try:
# meta["loss"] = [loss.__name__ for loss in meta["loss"]]
# except:
# meta["loss"] = meta["loss"].__name__
# if "net" in meta:
# meta["net"] = meta["net"].__name__
# if "metric" in meta:
# meta["metric"] = [
# f.__name__ if not isinstance(f, str) else f for f in meta["metric"]
# ]
meta = make_none_safe(meta.copy())
return meta
def save_COM_dannce_mat(params, com3d, sampleID):
"""
Instead of saving 3D COM to com3d.mat, save it into the dannce.mat file, which
streamlines subsequent dannce access.
"""
com = {}
com["com3d"] = com3d
com["sampleID"] = sampleID
com["metadata"] = prepare_save_metadata(params)
# Open dannce.mat file, add com and re-save
print("Saving COM predictions to " + params["label3d_file"])
rr = sio.loadmat(params["label3d_file"])
# For safety, save old file to temp and delete it at the end
sio.savemat(params["label3d_file"] + ".temp", rr)
rr["com"] = com
sio.savemat(params["label3d_file"], rr)
os.remove(params["label3d_file"] + ".temp")
def save_COM_checkpoint(
save_data, results_dir, datadict_, cameras, params, file_name="com3d"
):
"""
Saves COM pickle and matfiles
"""
# Save undistorted 2D COMs and their 3D triangulations
f = open(os.path.join(results_dir, file_name + ".pickle"), "wb")
cPickle.dump(save_data, f)
f.close()
# We need to remove the eID in front of all the keys in datadict
# for prepare_COM to run properly
datadict_save = {}
for key in datadict_:
datadict_save[int(float(key.split("_")[-1]))] = datadict_[key]
if params["n_instances"] > 1:
if params["n_channels_out"] > 1:
linking_method = "multi_channel"
else:
linking_method = "euclidean"
_, com3d_dict = serve_data_DANNCE.prepare_COM_multi_instance(
os.path.join(results_dir, file_name + ".pickle"),
datadict_save,
comthresh=0,
weighted=False,
camera_mats=cameras,
linking_method=linking_method,
)
else:
prepare_func = serve_data_DANNCE.prepare_COM
_, com3d_dict = serve_data_DANNCE.prepare_COM(
os.path.join(results_dir, file_name + ".pickle"),
datadict_save,
comthresh=0,
weighted=False,
camera_mats=cameras,
method="median",
)
cfilename = os.path.join(results_dir, file_name + ".mat")
print("Saving 3D COM to {}".format(cfilename))
samples_keys = list(com3d_dict.keys())
if params["n_instances"] > 1:
c3d = np.zeros((len(samples_keys), 3, params["n_instances"]))
else:
c3d = np.zeros((len(samples_keys), 3))
for i in range(len(samples_keys)):
c3d[i] = com3d_dict[samples_keys[i]]
sio.savemat(
cfilename,
{
"sampleID": samples_keys,
"com": c3d,
"metadata": prepare_save_metadata(params),
},
)
# Also save a copy into the label3d file
# save_COM_dannce_mat(params, c3d, samples_keys)
def write_com_file(params, samples_, com3d_dict_):
cfilename = os.path.join(params["dannce_predict_dir"], "com3d_used.mat")
print("Saving 3D COM to {}".format(cfilename))
c3d = np.zeros((len(samples_), 3))
for i in range(len(samples_)):
c3d[i] = com3d_dict_[samples_[i]]
sio.savemat(cfilename, {"sampleID": samples_, "com": c3d})
def savedata_expval(
fname, params, write=True, data=None, num_markers=20, tcoord=True, pmax=False
):
"""Save the expected values."""
if data is None:
f = open(fname, "rb")
data = cPickle.load(f)
f.close()
d_coords = np.zeros((len(list(data.keys())), 3, num_markers))
t_coords = np.zeros((len(list(data.keys())), 3, num_markers))
sID = np.zeros((len(list(data.keys())),))
p_max = np.zeros((len(list(data.keys())), num_markers))
for (i, key) in enumerate(data.keys()):
d_coords[i] = data[key]["pred_coord"]
if tcoord:
t_coords[i] = np.reshape(data[key]["true_coord_nogrid"], (3, num_markers))
if pmax:
p_max[i] = data[key]["pred_max"]
sID[i] = data[key]["sampleID"]
sdict = {
"pred": d_coords,
"data": t_coords,
"p_max": p_max,
"sampleID": sID,
#"metadata": #prepare_save_metadata(params),
}
if write and data is None:
sio.savemat(
fname.split(".pickle")[0] + ".mat",
sdict,
)
elif write and data is not None:
sio.savemat(fname, sdict)
return d_coords, t_coords, p_max, sID
def savedata_tomat(
fname,
params,
vmin,
vmax,
nvox,
write=True,
data=None,
num_markers=20,
tcoord=True,
tcoord_scale=True,
addCOM=None,
):
"""Save pickled data to a mat file.
From a save_data structure saved to a *.pickle file, save a matfile
with useful variables for easier manipulation in matlab.
Also return pred_out_world and other variables for plotting within jupyter
"""
if data is None:
f = open(fname, "rb")
data = cPickle.load(f)
f.close()
d_coords = np.zeros((list(data.keys())[-1] + 1, 3, num_markers))
t_coords = np.zeros((list(data.keys())[-1] + 1, 3, num_markers))
p_max = np.zeros((list(data.keys())[-1] + 1, num_markers))
log_p_max = np.zeros((list(data.keys())[-1] + 1, num_markers))
sID = np.zeros((list(data.keys())[-1] + 1,))
for (i, key) in enumerate(data.keys()):
d_coords[i] = data[key]["pred_coord"]
if tcoord:
t_coords[i] = np.reshape(data[key]["true_coord_nogrid"], (3, num_markers))
p_max[i] = data[key]["pred_max"]
log_p_max[i] = data[key]["logmax"]
sID[i] = data[key]["sampleID"]
vsize = (vmax - vmin) / nvox
# First, need to move coordinates over to centers of voxels
pred_out_world = vmin + d_coords * vsize + vsize / 2
if tcoord and tcoord_scale:
t_coords = vmin + t_coords * vsize + vsize / 2
if addCOM is not None:
# We use the passed comdict to add back in the com, this is useful
# if one wnats to bootstrap on these values for COMnet or otherwise
for i in range(len(sID)):
pred_out_world[i] = pred_out_world[i] + addCOM[int(sID)][:, np.newaxis]
sdict = {
"pred": pred_out_world,
"data": t_coords,
"p_max": p_max,
"sampleID": sID,
"log_pmax": log_p_max,
"metadata": prepare_save_metadata(params),
}
if write and data is None:
sio.savemat(
fname.split(".pickle")[0] + ".mat",
sdict,
)
elif write and data is not None:
sio.savemat(
fname,
sdict,
)
return pred_out_world, t_coords, p_max, log_p_max, sID
"""
IMAGE OPS (should be moved to ops)
"""
def __initAvgMax(t, g, o, params):
"""
Helper function for creating 3D targets
"""
gridsize = tuple([params["nvox"]] * 3)
g = np.reshape(
g,
(-1, *gridsize, 3),
)
for i in range(o.shape[0]):
for j in range(o.shape[-1]):
o[i, ..., j] = np.exp(
-(
(g[i, ..., 1] - t[i, 1, j]) ** 2
+ (g[i, ..., 0] - t[i, 0, j]) ** 2
+ (g[i, ..., 2] - t[i, 2, j]) ** 2
)
/ (2 * params["sigma"] ** 2)
)
return o
def initAvgMax(y_train, y_valid, Xtg, Xvg, params):
"""
Converts 3D coordinate targets into 3D volumes, for AVG+MAX training
"""
gridsize = tuple([params["nvox"]] * 3)
y_train_aux = np.zeros(
(
y_train.shape[0],
*gridsize,
params["new_n_channels_out"],
),
dtype="float32",
)
y_valid_aux = np.zeros(
(
y_valid.shape[0],
*gridsize,
params["new_n_channels_out"],
),
dtype="float32",
)
return (
__initAvgMax(y_train, Xtg, y_train_aux, params),
__initAvgMax(y_valid, Xvg, y_valid_aux, params),
)
def batch_rgb2gray(imstack):
"""Convert to gray image-wise.
batch dimension is first.
"""
grayim = np.zeros((imstack.shape[0], imstack.shape[1], imstack.shape[2]), "float32")
for i in range(grayim.shape[0]):
grayim[i] = rgb2gray(imstack[i].astype("uint8"))
return grayim
def return_tile(imstack, fac=2):
"""Crop a larger image into smaller tiles without any overlap."""
height = imstack.shape[1] // fac
width = imstack.shape[2] // fac
out = np.zeros(
(imstack.shape[0] * fac * fac, height, width, imstack.shape[3]), "float32"
)
cnt = 0
for i in range(imstack.shape[0]):
for j in np.arange(0, imstack.shape[1], height):
for k in np.arange(0, imstack.shape[2], width):
out[cnt, :, :, :] = imstack[i, j : j + height, k : k + width, :]
cnt = cnt + 1
return out
def tile2im(imstack, fac=2):
"""Reconstruct lagrer image from tiled data."""
height = imstack.shape[1]
width = imstack.shape[2]
out = np.zeros(
(imstack.shape[0] // (fac * fac), height * fac, width * fac, imstack.shape[3]),
"float32",
)
cnt = 0
for i in range(out.shape[0]):
for j in np.arange(0, out.shape[1], height):
for k in np.arange(0, out.shape[2], width):
out[i, j : j + height, k : k + width, :] = imstack[cnt]
cnt += 1
return out
def downsample_batch(imstack, fac=2, method="PIL"):
"""Downsample each image in a batch."""
if method == "PIL":
out = np.zeros(
(
imstack.shape[0],
imstack.shape[1] // fac,
imstack.shape[2] // fac,
imstack.shape[3],
),
"float32",
)
if out.shape[-1] == 3:
# this is just an RGB image, so no need to loop over channels with PIL
for i in range(imstack.shape[0]):
out[i] = np.array(
PIL.Image.fromarray(imstack[i].astype("uint8")).resize(
(out.shape[2], out.shape[1]), resample=PIL.Image.LANCZOS
)
)
else:
for i in range(imstack.shape[0]):
for j in range(imstack.shape[3]):
out[i, :, :, j] = np.array(
PIL.Image.fromarray(imstack[i, :, :, j]).resize(
(out.shape[2], out.shape[1]), resample=PIL.Image.LANCZOS
)
)
elif method == "dsm":
out = np.zeros(
(
imstack.shape[0],
imstack.shape[1] // fac,
imstack.shape[2] // fac,
imstack.shape[3],
),
"float32",
)
for i in range(imstack.shape[0]):
for j in range(imstack.shape[3]):
out[i, :, :, j] = dsm(imstack[i, :, :, j], (fac, fac))
elif method == "nn":
out = imstack[:, ::fac, ::fac]
elif fac > 1:
raise Exception("Downfac > 1. Not a valid downsampling method")
return out
def batch_maximum(imstack):
"""Find the location of the maximum for each image in a batch."""
maxpos =
|
np.zeros((imstack.shape[0], 2))
|
numpy.zeros
|
from scipy.io import wavfile
from scipy.special import expn
from scipy.fftpack import ifft
import numpy as np
def logMMSE(inputFilePath, outputFilePath):
"""
% Implements the logMMSE algorithm [1].
Parameters
----------
inputFilePath : string or open file handle
Input wav file.
outputFilePath: string or open file handle
Output wav file.
References
----------
.. [1] <NAME>. and <NAME>. (1985). Speech enhancement using a minimum
mean-square error log-spectral amplitude estimator. IEEE Trans. Acoust.,
Speech, Signal Process., ASSP-23(2), 443-445.
"""
[sample_rate, sample_data] = wavfile.read(inputFilePath, True)
# Frame size in samples
len = np.int(np.floor(20 * sample_rate * 0.001))
if len % 2 == 1:
len += 1
# window overlap in percent of frame size
perc = 50
len1 = np.floor(len * perc * 0.01)
len2 = len - len1
win = np.hanning(len)
win = win * len2 / sum(win)
# Noise magnitude calculations - assuming that the first 6 frames is noise / silence
nFFT = len << 2
noise_mean = np.zeros([nFFT, 1])
dtype = 2 << 14
j = 0
for i in range(1, 7):
s1 = j
s2 = j + np.int(len)
batch = sample_data[s1: s2] / dtype
X = win * batch
foo = np.fft.fft(X, np.int(nFFT))
noise_mean += np.abs(foo.reshape(foo.shape[0], 1))
j += len
noise_mu = np.square(noise_mean / 6)
# Allocate memory and initialize various variables
x_old = np.zeros([np.int(len1), 1]);
Nframes = np.floor(sample_data.shape[0] / len2) - np.floor(len / len2)
xfinal = np.zeros([np.int(Nframes * len2), 1]);
# Start Processing
k = 0
aa = 0.98
mu = 0.98
eta = 0.15
ksi_min = 10 ** (-25 * 0.1)
for n in range(0, np.int(Nframes)):
s1 = k
s2 = k + np.int(len)
batch = sample_data[s1: s2] / dtype
insign = win * batch
spec = np.fft.fft(insign, nFFT)
# Compute the magnitude
sig = abs(spec)
sig2 = sig ** 2
# Limit post SNR to avoid overflows
gammak = np.divide(sig2.reshape(sig2.shape[0], 1), noise_mu.reshape(noise_mu.shape[0], 1))
gammak[gammak > 40] = 40
foo = gammak - 1
foo[foo < 0] = 0
if 0 == n:
ksi = aa + (1 - aa) * foo
else:
# a priori SNR
ksi = aa * Xk_prev / noise_mu + (1 - aa) * foo
# limit ksi to - 25 db
ksi[ksi < ksi_min] = ksi_min
log_sigma_k = gammak * ksi / (1 + ksi) - np.log(1 + ksi)
vad_decision = sum(log_sigma_k) / len
# noise only frame found
if vad_decision < eta:
noise_mu = mu * noise_mu + (1 - mu) * sig2.reshape([sig2.shape[0], 1])
# == = end of vad == =
# Log - MMSE estimator
A = ksi / (1 + ksi)
vk = A * gammak
ei_vk = 0.5 * expn(1, vk)
hw = A * np.exp(ei_vk)
sig = sig.reshape([sig.shape[0], 1]) * hw
Xk_prev = sig ** 2
xi_w = ifft(hw * spec.reshape([spec.shape[0], 1]), nFFT, 0)
xi_w = np.real(xi_w)
xfinal[k: k + np.int(len2)] = x_old + xi_w[0: np.int(len1)]
x_old = xi_w[np.int(len1): np.int(len)]
k = k +
|
np.int(len2)
|
numpy.int
|
import numpy as np
import scipy.stats
class ImproperCovarianceError(Exception):
"""
Exception to be thrown when a Gaussian is created with a covariance matrix that isn't strictly positive definite.
"""
def __str__(self):
return 'Covariance matrix is not strictly positive definite'
class Gaussian:
"""
Implements a gaussian pdf. Focus is on efficient multiplication, division and sampling.
"""
def __init__(self, m=None, P=None, U=None, S=None, Pm=None):
"""
Initialize a gaussian pdf given a valid combination of its parameters. Valid combinations are:
m-P, m-U, m-S, Pm-P, Pm-U, Pm-S
:param m: mean
:param P: precision
:param U: upper triangular precision factor such that U'U = P
:param S: covariance
:param Pm: precision times mean such that P*m = Pm
"""
try:
if m is not None:
m = np.asarray(m)
self.m = m
self.n_dims = m.size
if P is not None:
P = np.asarray(P)
L = np.linalg.cholesky(P)
self.P = P
self.C = np.linalg.inv(L)
self.S = np.dot(self.C.T, self.C)
self.Pm = np.dot(P, m)
self.logdetP = 2.0 * np.sum(np.log(np.diagonal(L)))
elif U is not None:
U = np.asarray(U)
self.P =
|
np.dot(U.T, U)
|
numpy.dot
|
from __future__ import print_function, division, absolute_import
import matplotlib
matplotlib.use('Agg')
import os
import numpy as np
from odin import preprocessing as pp
from odin import fuel as F, visual as V
from odin.utils import (ctext, get_logpath, get_module_from_path,
get_script_path, Progbar, mpi,
catch_warnings_ignore)
from helpers import (ALL_FILES, ALL_NOISE, ALL_DATASET, IS_DEBUGGING,
PATH_ACOUSTIC_FEATURES, FEATURE_RECIPE,
AUGMENTATION_NAME, Config,
EXP_DIR, NCPU, validate_features_dataset)
if AUGMENTATION_NAME == 'None':
raise ValueError("`-aug` option was not provided, choose: 'rirs' or 'musan'")
np.random.seed(Config.SUPER_SEED)
# percentage of data will be used for augmentation
PERCENTAGE_AUGMENTATION = 0.8
# ===========================================================================
# Constant
# ===========================================================================
AUGMENTATION_DATASET = [
'swb', 'sre04', 'sre05', 'sre06', 'sre08', 'sre10']
AUGMENTATION_DATASET = [i for i in AUGMENTATION_DATASET
if i in ALL_DATASET]
print("Augmenting following dataset: %s" %
ctext(', '.join(AUGMENTATION_DATASET), 'yellow'))
# ====== get the duration ====== #
path = os.path.join(PATH_ACOUSTIC_FEATURES, FEATURE_RECIPE)
assert os.path.exists(path), \
"Acoustic feature must be extracted first, and stored at path: %s" % path
ds = F.Dataset(path, read_only=True)
all_duration = dict(ds['duration'].items())
ds.close()
# ====== select a new file list ====== #
AUG_FILES = []
missing_duration = []
for row in ALL_FILES:
if row[4] not in AUGMENTATION_DATASET:
continue
if row[2] not in all_duration:
missing_duration.append(row)
continue
dur = all_duration[row[2]]
AUG_FILES.append([i for i in row] + [dur])
print("#Files missing duration:", ctext(len(missing_duration), 'cyan'))
assert len(AUG_FILES), "Cannot find any files for augmentation"
AUG_FILES = np.array(AUG_FILES)
org_shape = AUG_FILES.shape
# select half of the files
np.random.shuffle(AUG_FILES);
|
np.random.shuffle(AUG_FILES)
|
numpy.random.shuffle
|
'''MIT License
Copyright (c) 2022 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE. '''
class HERTZ:
"""Calculation of contact stresses, power loss and \
film thickness along path of contact"""
def __init__(self, GMAT, GLUB, GEO, GPATH, GFS, POSAE):
import numpy as np
# tile speeds
self.vg3D = np.tile(GFS.vg, (len(GPATH.bpos), 1)).T
self.vr13D = np.tile(GFS.vr1, (len(GPATH.bpos), 1)).T
self.vr23D = np.tile(GFS.vr2, (len(GPATH.bpos), 1)).T
self.vr3D = np.tile(GFS.vr, (len(GPATH.bpos), 1)).T
# HERTZ ===============================================================
# effective Young modulus
self.Eeq = 1/((1 - GMAT.v1**2)/GMAT.E1 + (1 - GMAT.v2**2)/GMAT.E2)
# equivalent radius
self.R13D = np.tile(GPATH.R1, (len(GPATH.bpos), 1)).T
self.R23D = np.tile(GPATH.R2, (len(GPATH.bpos), 1)).T
self.Req = 1/((1/self.R13D) + (1/self.R23D))/
|
np.cos(GEO.betab)
|
numpy.cos
|
import numpy as np
import cv2
import matplotlib.pyplot as plt
import copy
import math
from tqdm import *
import argparse
import os
import glob
def normalizePoints(pts):
#calculate the mean of x and y coordinates - Compute the centroid of all corresponding points in a single image
pts_mean = np.mean(pts, axis=0)
x_bar = pts_mean[0]
y_bar = pts_mean[1]
#Recenter by subtracting the mean from original points
x_tilda, y_tilda = pts[:,0] - x_bar, pts[:, 1] - y_bar
#scale term s and s': average distances of the centered points from the origin in both the left and right images
s = (2/np.mean(x_tilda**2 + y_tilda**2))**(0.5)
#construct transformation matrix
T_S = np.diag([s,s,1])
T_T = np.array([[1, 0, -x_bar],[0, 1, -y_bar],[0, 0, 1]])
T = np.dot(T_S, T_T)
x = np.column_stack((pts, np.ones(len(pts))))
x_norm = (T.dot(x.T)).T
return x_norm, T
def getFundamentalMatrix(src, dst):
col = 9
# need minimum of 8 points to compute F Matrix
if src.shape[0] > 7:
#do normalization
src_norm, T1 = normalizePoints(src)
dst_norm, T2 = normalizePoints(dst)
A = np.zeros((len(src_norm),9))
for i in range(len(src_norm)):
x1, y1 = src_norm[i][0], src_norm[i][1]
x2, y2 = dst_norm[i][0], dst_norm[i][1]
A[i] = np.array([x1*x2, x2*y1, x2, y2*x1, y2*y1, y2, x1, y1, 1])
#calculate SVD of A
U, S, VT = np.linalg.svd(A, full_matrices=True)
#F = VT.T[:, -1]
#F = F.reshape(3,3)
F = VT[-1,:]
F = F.reshape(3,3)
#the calculated F matrix can be of rank 3, but to find epipolar lines, the rank should be 2.
#thus,
U_, S_, VT_ = np.linalg.svd(F)
# make diagonal matrix and set last element to 0
S_ = np.diag(S_)
S_[2,2] = 0
#recompute F
F = np.dot(U_, np.dot(S_, VT_))
#un-normalize
F = np.dot(T2.T, np.dot(F, T1))
return F
else:
return None
def calculateError(x1, x2, F):
# make x1 and x2 3*3
x1_ = np.array([x1[0], x1[1], 1])
x2_ = np.array([x2[0], x2[1], 1])
#calculate the error, the ideal case should be zero for the below product
error = np.dot(x2_.T, np.dot(F, x1_))
return np.abs(error)
# https://cmsc733.github.io/2019/proj/p3/
def processInliers(src_pts, dst_pts):
max_error = 0.001
iterations = 1
final_idx = []
F_Matrix = None
inliers = 0
rows = src_pts.shape[0]
for i in range(2000):
temp_idx = []
random_row = np.random.choice(rows, size=8)
src = src_pts[random_row]
dst = dst_pts[random_row]
f_matrix = getFundamentalMatrix(src, dst)
#now check the F matrix for all pairs
for j in range(rows):
error = calculateError(src_pts[j], dst_pts[j], f_matrix)
if error < max_error:
temp_idx.append(j)
if len(temp_idx) > inliers:
inliers = len(temp_idx)
final_idx = temp_idx
F_Matrix = f_matrix
src_final = src_pts[final_idx]
dst_final = dst_pts[final_idx]
return F_Matrix, src_final, dst_final
def getEssentialMatrix(F, K1, K2):
E = K2.T.dot(F).dot(K1)
#enforce rank 2 for singular matrix
U,S,VT = np.linalg.svd(E)
S = np.diag([1,1,0])
E = np.dot(U, np.dot(S, VT))
return E
#https://cmsc733.github.io/2019/proj/p3/
def getCameraPose(E):
U, S, VT = np.linalg.svd(E)
W = np.array([[0, -1, 0], [1, 0, 0], [0, 0, 1]])
R, T = [],[]
R.append(np.dot(U, np.dot(W, VT)))
R.append(np.dot(U, np.dot(W, VT)))
R.append(np.dot(U, np.dot(W.T, VT)))
R.append(np.dot(U, np.dot(W.T, VT)))
T.append(U[:, 2])
T.append(-U[:, 2])
T.append(U[:, 2])
T.append(-U[:, 2])
# R should always be positive
for i in range(len(R)):
if (np.linalg.det(R[i]) < 0):
R[i] = -R[i]
T[i] = -T[i]
return R, T
def getCorrectPose(pts_3D, R1, T1, R2, T2):
num_Z_positive = 0
zList = []
I = np.identity(3)
for k in range (len(pts_3D)):
num_Z_positive = 0
pts3D = pts_3D[k]
#normalize
pts3D = pts3D/pts3D[3, :]
P_2 = np.dot(R2[k], np.hstack((I, -T2[k].reshape(3,1))))
P_2 = np.vstack((P_2, np.array([0,0,0,1]).reshape(1,4)))
P_1 = np.dot(R1, np.hstack((I, -T1.reshape(3,1))))
P_1 = np.vstack((P_1, np.array([0,0,0,1]).reshape(1,4)))
for i in range(pts3D.shape[1]):
#calculate point for Right image
X_2 = pts3D[:,i]
X_2 = X_2.reshape(4,1)
Xc_2 = np.dot(P_2, X_2)
Xc_2 = Xc_2 / Xc_2[3]
z_2 = Xc_2[2]
#calcuate points for Left image
X_1 = pts3D[:,i]
X_1 = X_1.reshape(4,1)
Xc_1 = np.dot(P_1, X_1)
Xc_1 = Xc_1 / Xc_1[3]
z_1 = Xc_1[2]
if (z_1 > 0 and z_2 > 0):
num_Z_positive += 1
#print(num_Z_positive)
zList.append(num_Z_positive)
# get the correct camera pose index - define threshold for points as half the number of points
threshold = pts_3D[0].shape[1]//2
zArray = np.array(zList)
index = np.where(zArray > threshold)
return index
def getEpipolarLines(src_final, dst_final, F, im1_epipolar, im2_epipolar, rectified=False):
lines1, lines2 = [], []
for i in range(len(src_final)):
#arrange the source and destination points in a 3*3 array to be multiplied with F
x1 = np.array([src_final[i,0], src_final[i,1], 1]).reshape(3,1)
x2 = np.array([dst_final[i,0], dst_final[i,1], 1]).reshape(3,1)
#epipolar line 1 coefficients - left image
line1 = np.dot(F.T, x2)
lines1.append(line1)
#epipolar line 2 coefficients - right image
line2 = np.dot(F, x1)
lines2.append(line2)
if (not rectified):
#get the x and y values - lines are not parallel to x axis
x1_low = 0
x1_high = im1_epipolar.shape[1] - 1
y1_low = -(line1[2] + x1_low*line1[0])/line1[1]
y1_high = -(line1[2] + x1_high*line1[0])/line1[1]
x2_low = 0
x2_high = im2_epipolar.shape[1] - 1
y2_low = -(line2[2] + x2_low*line2[0])/line2[1]
y2_high = -(line2[2] + x2_high*line2[0])/line2[1]
else:
# as the lines are parallel to the X axis, the slope tends to zero
x1_low = 0
x1_high = im1_epipolar.shape[1] - 1
y1_low = -(line1[2]/line1[1])
y1_high = y1_low
x2_low = 0
x2_high = im2_epipolar.shape[1] - 1
y2_low = -(line2[2]/line2[1])
y2_high = y2_low
#print the points onto image
cv2.circle(im1_epipolar, (int(src_final[i,0]), int(src_final[i,1])), 5, (0,0,255), 2)
im1_epipolar = cv2.line(im1_epipolar, (int(x1_low), int(y1_low)), (int(x1_high), int(y1_high)), (0,255,0), 1)
cv2.circle(im2_epipolar, (int(dst_final[i,0]), int(dst_final[i,1])), 5, (0,0,255), 2)
im2_epipolar = cv2.line(im2_epipolar, (int(x2_low), int(y2_low)), (int(x2_high), int(y2_high)), (0,255,0), 1)
combined = np.concatenate((im1_epipolar, im2_epipolar), axis=1)
# temp = cv2.resize(combined, (1200,700))
# cv2.imshow('epilines', temp)
# cv2.imwrite('Epilines_.png', combined)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
return lines1, lines2, combined
def SSD(mat1, mat2):
diff_sq = np.square(mat1 - mat2)
ssd = np.sum(diff_sq)
return ssd
def SAD(mat1, mat2):
return np.sum(abs(mat1 - mat1))
def calcuateDisparity(img1_rectified_reshaped, img2_rectified_reshaped):
h, w = img1_rectified_reshaped.shape
disparity = np.zeros((h, w), np.uint8)
window_size = 11
half_window_size = math.floor((window_size)/2)
search_distance = 200
for row in tqdm(range(half_window_size, h - half_window_size)):
for col in range(half_window_size, w - half_window_size):
patch1 = img1_rectified_reshaped[row - half_window_size: row + half_window_size, col - half_window_size : col + half_window_size]
min_ssd = 10000
disp = 0
#scan along epiline till a particular length
for distance in range(-search_distance, search_distance, 1): #bidirectional
c_dash = col + distance
# print(c_dash)
if (c_dash < w - half_window_size) and (c_dash > half_window_size):
patch2 = img2_rectified_reshaped[row - half_window_size: row + half_window_size, c_dash - half_window_size : c_dash + half_window_size]
# if patch2.shape[1] < 4:
# print(r, c, c_dash)
ssd = SSD(patch1, patch2)
if ssd < min_ssd:
min_ssd = ssd
disp = np.abs(distance)
disparity[row, col] = disp
return disparity
def Stereo(args):
dataset = args['set']
if (dataset == 1):
K1 = np.array([[5299.313, 0, 1263.818], [0, 5299.313, 977.763], [0, 0, 1]])
K2 = np.array([[5299.313, 0, 1438.004], [0, 5299.313, 977.763], [0, 0, 1]])
baseline = 177.288
f = K1[0,0]
elif (dataset == 2):
K1 =
|
np.array([[4396.869, 0, 1353.072], [0, 4396.869, 989.702], [0, 0, 1]])
|
numpy.array
|
import os
import pickle
import numpy as np
import rampwf as rw
from sklearn.model_selection import train_test_split, ShuffleSplit
problem_title = 'Optical network modelling'
_NB_CHANNELS = 32 # C100
# We are splitting both train/test and train/valid using the campaign
# indices. Training campaigns will be all subcascades, and they fully
# go in the training set. Each train/valid split on the training set
# is then using _cv_valid_rate of the training instances for training.
# .They will not be part of the validation.
# Test campaigns will be split: _test_rate of them will be in the test
# set and (1 - _test_rate) in the training set. Of this latter,
# _cv_valid_rate will be in each fold validation set, and
# (1 - _cv_valid_rate) will be part of each fold training set.
_train_campaigns = [1, 2]
_test_campaigns = [3, 4]
_test_rate = 0.8
_cv_valid_rate = 0.5
class EM99(rw.score_types.BaseScoreType):
"""99% error margin (EM99) score. Measures the required
margin in terms of the ratio of the true and predicted
values to cover 99% of all cases."""
is_lower_the_better = True
minimum = 0.0
maximum = float('inf')
def __init__(self, name='EM99', precision=2, quant=0.99, eps=1e-8):
self.name = name
self.precision = precision
self.quant = quant
self.eps = eps
def __call__(self, y_true, y_pred):
if (y_pred < 0).any():
return self.worst
ratio_err = np.array(
[(p + self.eps) / t for y_hat, y in zip(y_pred, y_true)
for p, t in zip(y_hat, y) if t != 0])
# sorted absolute value of mw2dB ratio err
score = np.percentile(
np.abs(10 * np.log10(ratio_err)), 100 * self.quant)
return score
class MEM(rw.score_types.BaseScoreType):
"""Maximum error margin score. Measures the required
margin in terms of the ratio of the true and predicted
values to cover all cases. The same as EM100."""
is_lower_the_better = True
minimum = 0.0
maximum = float('inf')
def __init__(self, name='MEM', precision=2, eps=1e-8):
self.name = name
self.precision = precision
self.eps = eps
def __call__(self, y_true, y_pred):
if (y_pred < 0).any():
return self.worst
ratio_err = np.array(
[(p + self.eps) / t for y_hat, y in zip(y_pred, y_true)
for p, t in zip(y_hat, y) if t != 0])
# sorted absolute value of mw2dB ratio err
score = np.max(
np.abs(10 * np.log10(ratio_err)))
return score
class ONRMSE(rw.score_types.BaseScoreType):
"""Optical network root-mean-square error. Measures the RMSE
between the true and predicted values of all on channels."""
is_lower_the_better = True
minimum = 0.0
maximum = float('inf')
def __init__(self, name='ONRMSE', precision=2):
self.name = name
self.precision = precision
def __call__(self, y_true, y_pred):
on_y_true = np.array([t for y in y_true for t in y if t != 0])
on_y_pred = np.array([p for y_hat, y in zip(y_pred, y_true) for p, t in zip(y_hat, y) if t != 0])
if (on_y_pred < 0).any():
return self.worst
return np.sqrt(np.mean(np.square(on_y_true - on_y_pred)))
workflow = rw.workflows.Regressor()
Predictions = rw.prediction_types.make_regression(list(range(_NB_CHANNELS)))
score_types = [
EM99(precision=3),
ONRMSE(name='RMSE', precision=4),
MEM(precision=2),
]
def _read_data(path, campaign):
data_path = os.path.join(path, 'data')
with open(os.path.join(data_path, f'c{campaign}', 'X.pkl'), 'rb') as f:
X = pickle.load(f)
# add campaign index as last column to X
X = np.array([np.append(x, campaign) for x in X])
with open(os.path.join(data_path, f'c{campaign}', 'y.pkl'), 'rb') as f:
y = pickle.load(f)
return X, y
# Select full cascades from the data: only instances with maximum
# number of modules
def _full_cascade_mask(X):
lengths = list(map(len, X[:, 0])) # first column is list of modules
max_length = np.max(lengths)
return lengths == max_length
def _train_test_indices(X):
# random_state must be fixed to avoid leakage
return train_test_split(
range(len(X)), test_size=_test_rate, random_state=51)
# Load only full cascades from the test campaigns. is_test_int = 1 of loading
# test, 0 if loading the portion of the test that goes in train.
def _load_test(path, is_test_int):
Xs = []
ys = []
for campaign in _test_campaigns:
X, y = _read_data(path, campaign)
mask = _full_cascade_mask(X) # only full cascades in test set
test_is = _train_test_indices(X[mask])[is_test_int]
Xs.append(X[mask][test_is])
ys.append(y[mask][test_is])
return np.concatenate(Xs), np.concatenate(ys)
def get_train_data(path='.'):
Xs = []
ys = []
for campaign in _train_campaigns:
X, y = _read_data(path, campaign)
Xs.append(X)
ys.append(y)
# adding (1 - _test_rate) of the test full cascades to the training data
X_test_in_train, y_test_in_train = _load_test(path, is_test_int=0)
Xs.append(X_test_in_train)
ys.append(y_test_in_train)
return
|
np.concatenate(Xs)
|
numpy.concatenate
|
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
import settings
import datetime
import pygame
import random
import keras
import time
import os
from keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten, Input, concatenate
from keras.models import Model, load_model, Sequential
from keras.utils import plot_model
from keras.optimizers import Adam
from collections import deque
from matplotlib import style
from keras import backend
class Game:
_count = 0
def __init__(self, food_ammount=1, snake_len=3, width=30, height=30, free_moves=200, view_len=3,
random_start=True, render=False):
""""""
self.render = render
self.food_limit = food_ammount
self.view_len = view_len
self.area_len = self.view_len * 2 + 1
self.free_moves = free_moves
self.initial_snake_len = snake_len
self.random_start = random_start
self.MOVE_PENALTY = settings.MOVE_PENALTY
self.FOOD_REWARD = settings.FOOD_REWARD
self.DEATH_PENALTY = settings.DEATH_PENALTY
self.width, self.height = width, height
self.box_size = 25
self.size = self.width * self.box_size, self.height * self.box_size
self.score = 0
self.food_eaten = 0
self.direction = 0
self.moves_left = self.free_moves
self.time = 0
self.done = False
self.head = [0, 0]
self.tail = deque([self.head] * self.initial_snake_len, maxlen=self.width * self.height + 1)
self.snake_len = self.initial_snake_len
self.Food = []
self.fill_food()
self._reset()
Game._count += 1
if render:
pygame.init()
width = int(width)
height = int(height)
self.screen = pygame.display.set_mode((self.box_size * width, self.box_size * height))
def __del__(self):
Game._count -= 1
if Game._count < 1:
pygame.quit()
def _reset(self):
self.score = 0
self.food_eaten = 0
self.direction = 0
self.moves_left = self.free_moves
self.time = 0
self.done = False
if self.random_start:
self.head = [np.random.randint(1, self.width - 1), np.random.randint(1, self.height - 1)]
else:
self.head = [self.width // 2, self.height // 2]
self.tail = deque([self.head] * self.initial_snake_len, maxlen=self.width * self.height + 1)
self.snake_len = self.initial_snake_len
self.Food = []
self.fill_food()
def reset(self):
self._reset()
obs = self.observation()
return obs
def fill_food(self):
while len(self.Food) < self.food_limit:
self.place_food()
def place_food(self):
while True:
valid_to_brake = True
new_food_pos = [np.random.randint(0, self.width), np.random.randint(0, self.height)]
if new_food_pos == self.head:
# Repeat while with new food
continue
for tail in self.tail:
if tail == new_food_pos:
valid_to_brake = False
break
if valid_to_brake:
break
self.Food.append(new_food_pos)
def observation(self):
area = np.ones((self.area_len, self.area_len))
for arr_y in range(self.area_len):
for arr_x in range(self.area_len):
y = self.head[1] - (arr_y - self.view_len)
x = self.head[0] - self.view_len + arr_x
if x < 0 or y < 0 or x >= self.width or y >= self.height:
area[arr_y, arr_x] = 2
continue
skip_tail = False
for food in self.Food:
if [x, y] == food:
area[arr_y, arr_x] = 0
skip_tail = True
break
if skip_tail:
continue
for tail in self.tail:
if [x, y] == tail:
area[arr_y, arr_x] = 2
area = (area / 2).ravel()
food_relative_pos = (np.array(self.head) - np.array(self.Food[0])) / np.max(self.size)
if settings.DUAL_INPUT:
output = (area, food_relative_pos)
else:
output = np.concatenate([area, food_relative_pos])
return output
def random_action(self):
""" Return valid action in current situation"""
new_direction = (self.direction + np.random.randint(-1, 2)) % 4
return new_direction
def move_snake(self, new_direction):
"""
Move snake, and update environment
Directions
0 Up
3 1 Left / Right
2 Down
Parameters
----------
new_direction - int <0, 3>
Returns
-------
collision - boolean
food_eaten - boolean
action_valid - boolean
"""
# Check if action is valid
if self.direction == 0 and new_direction == 2 or \
self.direction == 1 and new_direction == 3 or \
self.direction == 2 and new_direction == 0 or \
self.direction == 3 and new_direction == 1:
action_valid = False
else:
action_valid = True
if action_valid:
new_direction = new_direction
else:
new_direction = self.direction
new_x = self.head[0] + (1 if new_direction == 1 else -1 if new_direction == 3 else 0)
new_y = self.head[1] + (1 if new_direction == 2 else -1 if new_direction == 0 else 0)
new_pos = [new_x, new_y]
if new_x >= self.width or new_x < 0 or new_y >= self.width or new_y < 0:
collision = True
else:
collision = False
if not collision:
for tail in list(self.tail)[1:]: # First tail segment will move
if new_pos == tail:
collision = True
break
food_eaten = False
if not collision:
self.head = new_pos
for f_index, food in enumerate(self.Food):
if self.head == food:
self.Food.pop(f_index)
self.snake_len += 1
food_eaten = True
break
self.update_tail()
self.direction = new_direction
return collision, food_eaten, action_valid
def update_tail(self):
self.tail.append(self.head)
while len(self.tail) > self.snake_len: # Head is separate
self.tail.popleft()
def step(self, action=None):
"""
Parameters
----------
action
Returns
-------
observation
reward
done
"""
self.moves_left -= 1
self.time += 1
if self.done:
print("Game has been already ended!")
done = True
else:
done = False
collision, food, action_valid = self.move_snake(action)
if collision:
done = True
elif self.moves_left < 1 and not food:
done = True
if not action_valid:
reward = self.DEATH_PENALTY * 2
elif collision:
reward = self.DEATH_PENALTY
elif self.moves_left < 1:
reward = self.MOVE_PENALTY * 2
elif food:
reward = self.FOOD_REWARD
else:
reward = self.MOVE_PENALTY
if food:
self.moves_left = self.free_moves
self.food_eaten += 1
if done:
self.done = done
self.fill_food()
observation = self.observation()
self.score += reward
return observation, reward, done
def draw(self, epoch=None):
if self.done:
head_color = (255, 0, 0)
else:
head_color = (130, 255, 255)
self.screen.fill((25, 20, 30))
# self.screen.fill((40, 40, 45))
pygame.draw.rect(self.screen, (40, 60, 45),
(
(self.head[0] - self.view_len) * self.box_size, (self.head[1] - self.view_len) * self.box_size,
self.area_len * self.box_size, self.area_len * self.box_size))
for tail in self.tail:
pygame.draw.rect(self.screen, (35, 120, 50),
(tail[0] * self.box_size, tail[1] * self.box_size, self.box_size,
self.box_size))
pygame.draw.rect(self.screen, head_color,
(self.head[0] * self.box_size, self.head[1] * self.box_size, self.box_size,
self.box_size))
for food in self.Food:
pygame.draw.rect(self.screen, (0, 255, 0),
(food[0] * self.box_size, food[1] * self.box_size, self.box_size, self.box_size))
self.display_score(epoch)
pygame.display.update()
def display_score(self, epoch):
my_font = pygame.font.SysFont('Comic Sans MS', 30)
text_surface = my_font.render('Score = ' + str(round(self.score, 1)), False, (255, 255, 255))
self.screen.blit(text_surface, (0, 0))
text_surface = my_font.render('Food-eaten = ' + str(self.food_eaten), False, (255, 255, 255))
self.screen.blit(text_surface, (0, 20))
text_surface = my_font.render('Moves left = ' + str(self.moves_left), False, (255, 255, 255))
self.screen.blit(text_surface, (0, 40))
if epoch:
text_surface = my_font.render('Epoch = ' + str(epoch), False, (255, 255, 255))
self.screen.blit(text_surface, (0, 60))
class Agent:
def __init__(self,
input_shape,
action_space,
dual_input=False,
min_batch_size=1000,
max_batch_size=1000,
learining_rate=0.0001,
memory_size=10000):
dt = datetime.datetime.timetuple(datetime.datetime.now())
self.runtime_name = f"{dt.tm_mon:>02}-{dt.tm_mday:>02}--" \
f"{dt.tm_hour:>02}-{dt.tm_min:>02}-{dt.tm_sec:>02}"
self.min_batch_size = min_batch_size
self.max_batch_size = max_batch_size
self.input_shape = input_shape
self.action_space = action_space
self.learning_rate = learining_rate
self.memory = deque(maxlen=memory_size)
load_success = self.load_model()
# Bind train command
self._train = self._dual_train if settings.DUAL_INPUT else self._normal_train
if load_success:
print(f"Loading model: {MODEL_NAME}")
else:
print(f"New model: {MODEL_NAME}")
if dual_input:
self.model = self.create_dual_model()
else:
self.model = self.create_normal_model()
self.model.compile(optimizer=Adam(lr=self.learning_rate),
loss='mse',
metrics=['accuracy'])
backend.set_value(self.model.optimizer.lr, self.learning_rate)
self.model.summary()
def create_dual_model(self):
input_area = Input(shape=(self.input_shape[0]))
layer1a = Dense(64, activation='relu')(input_area)
input_direction = Input(shape=(self.input_shape[1]))
layer2a = Dense(32, activation='relu')(input_direction)
merge_layer = concatenate([layer1a, layer2a], axis=-1)
layer3 = Dense(64, activation='relu')(merge_layer)
output = Dense(self.action_space, activation='linear')(layer3)
model = Model(inputs=[input_area, input_direction], outputs=output)
plot_model(model, f"{MODEL_NAME}/model.png")
with open(f"{MODEL_NAME}/model_summary.txt", 'w') as file:
model.summary(print_fn=lambda x: file.write(x + '\n'))
return model
def create_normal_model(self):
model = Sequential()
model.add(Dense(128, input_shape=self.input_shape, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(128, activation='relu'))
model.add(Dense(self.action_space, activation='linear'))
plot_model(model, f"{MODEL_NAME}/model.png")
with open(f"{MODEL_NAME}/model_summary.txt", 'w') as file:
model.summary(print_fn=lambda x: file.write(x + '\n'))
return model
def update_memory(self, state):
self.memory.append(state)
def save_model(self):
while True:
try:
self.model.save(f"{MODEL_NAME}/model")
return True
except OSError:
time.sleep(0.2)
def load_model(self):
if os.path.isfile(f"{MODEL_NAME}/model"):
while True:
try:
self.model = load_model(f"{MODEL_NAME}/model")
return True
except OSError:
time.sleep(0.2)
else:
return False
def train(self):
if len(self.memory) < self.min_batch_size:
return None
elif settings.TRAIN_ALL_SAMPLES:
train_data = list(self.memory)
elif len(self.memory) >= self.max_batch_size:
train_data = random.sample(self.memory, self.max_batch_size)
# print(f"Too much data, selecting from: {len(self.memory)} samples")
else:
train_data = list(self.memory)
if settings.STEP_TRAINING or settings.TRAIN_ALL_SAMPLES:
self.memory.clear()
self._train(train_data)
def _normal_train(self, train_data):
Old_states = []
New_states = []
Rewards = []
Dones = []
Actions = []
for old_state, new_state, reward, action, done in train_data:
Old_states.append(old_state)
New_states.append(new_state)
Actions.append(action)
Rewards.append(reward)
Dones.append(done)
Old_states = np.array(Old_states)
New_states = np.array(New_states)
old_qs = self.model.predict(Old_states)
new_qs = self.model.predict(New_states)
for old_q, new_q, rew, act, done in zip(old_qs, new_qs, Rewards, Actions, Dones):
if done:
old_q[act] = rew
else:
future_best_val = np.max(new_q)
old_q[act] = rew + DISCOUNT * future_best_val
self.model.fit(Old_states, old_qs,
verbose=0, shuffle=False, epochs=1)
def _dual_train(self, train_data):
Old_states = []
New_states = []
Rewards = []
Dones = []
Actions = []
for old_state, new_state, reward, action, done in train_data:
Old_states.append(old_state)
New_states.append(new_state)
Actions.append(action)
Rewards.append(reward)
Dones.append(done)
Old_states = np.array(Old_states)
New_states = np.array(New_states)
old_view_area = []
old_direction = []
new_view_area = []
new_direction = []
for _old_state, _new_state in zip(Old_states, New_states):
old_view_area.append(_old_state[0])
old_direction.append(_old_state[1])
new_view_area.append(_new_state[0])
new_direction.append(_new_state[1])
old_qs = self.model.predict([old_view_area, old_direction])
new_qs = self.model.predict([new_view_area, new_direction])
for old_q, new_q, rew, act, done in zip(old_qs, new_qs, Rewards, Actions, Dones):
if done:
old_q[act] = rew
else:
future_best_val = np.max(new_q)
old_q[act] = rew + DISCOUNT * future_best_val
self.model.fit([old_view_area, old_direction], old_qs,
verbose=0, shuffle=False, epochs=1)
EPOCHS = settings.EPOCHS
SIM_COUNT = settings.SIM_COUNT
REPLAY_MEMORY_SIZE = settings.REPLAY_MEMORY_SIZE
MIN_BATCH_SIZE = settings.MIN_BATCH_SIZE
MAX_BATCH_SIZE = settings.MAX_BATCH_SIZE
DISCOUNT = settings.DISCOUNT
AGENT_LR = settings.AGENT_LR
FREE_MOVE = settings.FREE_MOVE
MODEL_NAME = settings.MODEL_NAME
LOAD_MODEL = settings.LOAD_MODEL
ALLOW_TRAIN = settings.ALLOW_TRAIN
SAVE_PICS = settings.SAVE_PICS
STATE_OFFSET = settings.STATE_OFFSET
FIRST_EPS = settings.FIRST_EPS
RAMP_EPS = settings.RAMP_EPS
INITIAL_SMALL_EPS = settings.INITIAL_SMALL_EPS
END_EPS = settings.END_EPS
EPS_INTERVAL = settings.EPS_INTERVAL
SHOW_EVERY = settings.SHOW_EVERY
RENDER_DELAY = settings.RENDER_DELAY
SHOW_LAST = settings.SHOW_LAST
PLOT_ALL_QS = settings.PLOT_ALL_QS
COMBINE_QS = settings.COMBINE_QS
def training():
try:
episode_offset = np.load(f"{MODEL_NAME}/last-episode-num.npy", allow_pickle=True)
except FileNotFoundError:
episode_offset = 0
eps_iter = iter(np.linspace(RAMP_EPS, END_EPS, EPS_INTERVAL))
time_start = time.time()
emergency_break = False
for episode in range(0, EPOCHS):
try:
if not settings.STEP_TRAINING and ALLOW_TRAIN:
agent.train()
if not (episode + episode_offset) % 100 and episode > 0:
agent.save_model()
np.save(f"{MODEL_NAME}/last-episode-num.npy", episode + episode_offset)
Pred_sep.append(len(Predicts[0]))
if not (episode + episode_offset) % SHOW_EVERY:
render = True
else:
render = False
if episode == EPOCHS - 1 or emergency_break:
eps = 0
render = True
if SHOW_LAST:
input("Last agent is waiting...")
elif episode == 0 or not ALLOW_TRAIN:
eps = 0
render = True
elif episode < EPS_INTERVAL / 4:
eps = FIRST_EPS
# elif episode < EPS_INTERVAL:
# eps = 0.3
else:
try:
eps = next(eps_iter)
except StopIteration:
eps_iter = iter(
|
np.linspace(INITIAL_SMALL_EPS, END_EPS, EPS_INTERVAL)
|
numpy.linspace
|
# -*- coding: utf-8 -*-
from io_utils.plot.plot_maps import cp_map, cp_scatter_map
import numpy as np
import pandas as pd
import tempfile
import os
import io_utils.root_path as root_path
from netCDF4 import Dataset
from smecv_grid.grid import SMECV_Grid_v052
from io_utils.plot.colormaps import cm_sm
import cartopy.crs as ccrs
import shutil
from tempfile import TemporaryDirectory
def test_scatter_map():
with TemporaryDirectory() as out_dir:
lons = np.linspace(-160, 160, 160)
lats = np.linspace(90, -90, 160)
values = np.random.rand(160)
f, imax, im = cp_scatter_map(lons, lats, values)
filename = 'plot_scatter.png'
f.savefig(os.path.join(out_dir, filename))
print('Stored plot in {}')
assert os.path.isfile(os.path.join(out_dir, filename))
def test_area_multiindex():
with TemporaryDirectory() as out_dir:
lons =
|
np.linspace(-20, 20, 41)
|
numpy.linspace
|
# SPDX-FileCopyrightText: <NAME> Tecnologia
# SPDX-License-Identifier: BSD-3-Clause
import math
import numpy as np
from typing import List
from dataclasses import dataclass, field
from gym_ignition.rbd.idyntree import numpy
from adherent.data_processing import utils
from gym_ignition.rbd.conversions import Quaternion
from gym_ignition.rbd.idyntree import kindyncomputations
@dataclass
class GlobalFrameFeatures:
"""Class for the global features associated to each retargeted frame."""
# Features computation
ik_solutions: List
dt_mean: float
kindyn: kindyncomputations.KinDynComputations
frontal_base_dir: List
frontal_chest_dir: List
# Features storage
base_positions: List = field(default_factory=list)
ground_base_directions: List = field(default_factory=list)
ground_chest_directions: List = field(default_factory=list)
facing_directions: List = field(default_factory=list)
base_velocities: List = field(default_factory=list)
base_angular_velocities: List = field(default_factory=list)
s: List = field(default_factory=list)
s_dot: List = field(default_factory=list)
@staticmethod
def build(ik_solutions: List,
dt_mean: float,
kindyn: kindyncomputations.KinDynComputations,
frontal_base_dir: List,
frontal_chest_dir: List) -> "GlobalFrameFeatures":
"""Build an empty GlobalFrameFeatures."""
return GlobalFrameFeatures(ik_solutions=ik_solutions,
dt_mean=dt_mean,
kindyn=kindyn,
frontal_base_dir=frontal_base_dir,
frontal_chest_dir=frontal_chest_dir)
def reset_robot_configuration(self, joint_positions: List, base_position: List, base_quaternion: List) -> None:
"""Reset the robot configuration."""
world_H_base = numpy.FromNumPy.to_idyntree_transform(
position=np.array(base_position),
quaternion=np.array(base_quaternion)).asHomogeneousTransform().toNumPy()
self.kindyn.set_robot_state(s=joint_positions, ds=np.zeros(len(joint_positions)), world_H_base=world_H_base)
def compute_global_frame_features(self) -> None:
"""Extract global features associated to each retargeted frame"""
# Debug
print("Computing global frame features")
# Subsampling (discard one ik solution over two)
for frame_idx in range(0, len(self.ik_solutions), 2):
ik_solution = self.ik_solutions[frame_idx]
# Retrieve the base pose and the joint positions
joint_positions = np.asarray(ik_solution["joint_positions"])
base_position = np.asarray(ik_solution["base_position"])
base_quaternion = np.asarray(ik_solution["base_quaternion"])
# Reset the robot configuration
self.reset_robot_configuration(joint_positions=joint_positions,
base_position=base_position,
base_quaternion=base_quaternion)
# Base position
self.base_positions.append(base_position)
# Ground base direction
base_rotation = Quaternion.to_rotation(np.array(base_quaternion))
base_direction = base_rotation.dot(self.frontal_base_dir) # we are interested in the frontal base direction
ground_base_direction = [base_direction[0], base_direction[1]] # project on the ground
ground_base_direction = ground_base_direction / np.linalg.norm(ground_base_direction) # of unitary norm
self.ground_base_directions.append(ground_base_direction)
# Ground chest direction
world_H_base = self.kindyn.get_world_base_transform()
base_H_chest = self.kindyn.get_relative_transform(ref_frame_name="root_link", frame_name="chest")
world_H_chest = world_H_base.dot(base_H_chest)
chest_rotation = world_H_chest[0:3, 0:3]
chest_direction = chest_rotation.dot(self.frontal_chest_dir) # we are interested in the frontal chest direction
ground_chest_direction = [chest_direction[0], chest_direction[1]] # project on the ground
ground_chest_direction = ground_chest_direction / np.linalg.norm(ground_chest_direction) # of unitary norm
self.ground_chest_directions.append(ground_chest_direction)
# Facing direction
facing_direction = ground_base_direction + ground_chest_direction # mean of ground base and chest directions
facing_direction = facing_direction / np.linalg.norm(facing_direction) # of unitary norm
self.facing_directions.append(facing_direction)
# Joint angles
joint_angles = joint_positions
self.s.append(joint_angles)
# Do not compute velocities by differentiation for the first frame
if frame_idx == 0:
continue
# Joint velocities by differentiation of joint angles
joint_angles_prev = self.s[-2]
joint_velocities = (joint_angles - joint_angles_prev) / self.dt_mean
self.s_dot.append(joint_velocities)
# Base velocities by differentiation of base positions
base_position_prev = self.base_positions[-2]
base_velocity = (base_position - base_position_prev) / self.dt_mean
self.base_velocities.append(base_velocity)
# Base angular velocities by differentiation of ground base directions
ground_base_direction_prev = self.ground_base_directions[-2]
cos_theta = np.dot(ground_base_direction_prev, ground_base_direction) # unitary norm vectors
sin_theta = np.cross(ground_base_direction_prev, ground_base_direction) # unitary norm vectors
theta = math.atan2(sin_theta, cos_theta)
base_angular_velocity = theta / self.dt_mean
self.base_angular_velocities.append(base_angular_velocity)
@dataclass
class GlobalWindowFeatures:
"""Class for the global features associated to a window of retargeted frames."""
# Features computation
window_length_frames: int
window_step: int
window_indexes: List
# Features storage
desired_velocities: List = field(default_factory=list)
base_positions: List = field(default_factory=list)
facing_directions: List = field(default_factory=list)
base_velocities: List = field(default_factory=list)
@staticmethod
def build(window_length_frames: int,
window_step: int,
window_indexes: List) -> "GlobalWindowFeatures":
"""Build an empty GlobalWindowFeatures."""
return GlobalWindowFeatures(window_length_frames=window_length_frames,
window_step=window_step,
window_indexes=window_indexes)
def compute_global_window_features(self, global_frame_features: GlobalFrameFeatures) -> None:
"""Extract global features associated to a window of retargeted frames."""
# Debug
print("Computing global window features")
initial_frame = self.window_length_frames
final_frame = len(global_frame_features.base_positions) - self.window_length_frames - self.window_step - 1
# For each window of retargeted frames
for i in range(initial_frame, final_frame):
# Initialize placeholders for the current window
future_traj_length = 0
current_global_base_positions = []
current_global_facing_directions = []
current_global_base_velocities = []
for window_index in self.window_indexes:
# Store the base positions, facing directions and base velocities in the current window
current_global_base_positions.append(global_frame_features.base_positions[i + window_index])
current_global_facing_directions.append(global_frame_features.facing_directions[i + window_index])
current_global_base_velocities.append(global_frame_features.base_velocities[i + window_index])
# Compute the desired velocity as sum of distances between the base positions in the future trajectory
if window_index == self.window_indexes[0]:
base_position_prev = global_frame_features.base_positions[i + window_index]
else:
base_position = global_frame_features.base_positions[i + window_index]
base_position_distance = np.linalg.norm(base_position - base_position_prev)
future_traj_length += base_position_distance
base_position_prev = base_position
# Store global features for the current window
self.desired_velocities.append(future_traj_length)
self.base_positions.append(current_global_base_positions)
self.facing_directions.append(current_global_facing_directions)
self.base_velocities.append(current_global_base_velocities)
@dataclass
class LocalFrameFeatures:
"""Class for the local features associated to each retargeted frame."""
# Features storage
base_x_velocities: List = field(default_factory=list)
base_z_velocities: List = field(default_factory=list)
base_angular_velocities: List = field(default_factory=list)
@staticmethod
def build() -> "LocalFrameFeatures":
"""Build an empty LocalFrameFeatures."""
return LocalFrameFeatures()
def compute_local_frame_features(self, global_frame_features: GlobalFrameFeatures) -> None:
"""Extract local features associated to each retargeted frame"""
# Debug
print("Computing local frame features")
# The definition of the base angular velocities is such that they coincide locally and globally
self.base_angular_velocities = global_frame_features.base_angular_velocities
for i in range(1, len(global_frame_features.base_positions)):
# Retrieve the base position and orientation at the previous step i - 1
# along with the base velocity from step i-1 to step i
prev_global_base_position = global_frame_features.base_positions[i - 1]
prev_global_ground_base_direction = global_frame_features.ground_base_directions[i - 1]
current_global_base_velocity = [global_frame_features.base_velocities[i - 1][0],
global_frame_features.base_velocities[i - 1][1]]
# Define the 2D local reference frame at step i-1 using the base position and orientation
reference_base_pos =
|
np.asarray([prev_global_base_position[0], prev_global_base_position[1]])
|
numpy.asarray
|
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
import torch.utils.data as data
import src.utils as utils
from typing import List, Tuple, Optional, Union
from pathlib import Path
from catalyst.core import Callback, CallbackOrder, State
from catalyst.dl import SupervisedRunner
from catalyst.utils import get_device
from scipy.stats import cauchy
from sklearn.metrics import average_precision_score, roc_auc_score
from .base import NNModel, Matrix
class FocalLoss(nn.Module):
def __init__(self, alpha=1, gamma=2, logits=False, reduce=True):
super(FocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.logits = logits
self.reduce = reduce
def forward(self, inputs, targets):
if self.logits:
BCE_loss = F.binary_cross_entropy_with_logits(
inputs, targets, reduce=False)
else:
BCE_loss = F.binary_cross_entropy(inputs, targets, reduce=False)
pt = torch.exp(-BCE_loss)
F_loss = self.alpha * (1 - pt)**self.gamma * BCE_loss
if self.reduce:
return torch.mean(F_loss)
else:
return F_loss
def get_criterion(criterion_params: dict):
name = criterion_params["name"]
params = {} if criterion_params.get(
"params") is None else criterion_params["params"]
if name == "FocalLoss":
return FocalLoss(**params)
else:
return nn.__getattribute__(name)(**params)
def get_optimizer(model, optimizer_params: dict):
name = optimizer_params["name"]
return optim.__getattribute__(name)(model.parameters(),
**optimizer_params["params"])
def get_scheduler(optimizer, scheduler_params: dict):
name = scheduler_params["name"]
return lr_scheduler.__getattribute__(name)(optimizer,
**scheduler_params["params"])
class TabularDataset(data.Dataset):
def __init__(self, df: Matrix, target: Optional[Matrix]):
self.values = df
self.target = target
def __len__(self):
return len(self.values)
def __getitem__(self, idx: int):
x = self.values[idx].astype(np.float32)
if self.target is not None:
return x, self.target[idx]
else:
return x
class FileDataset(data.Dataset):
def __init__(self,
df: Matrix,
target: Optional[Matrix],
file_dir: str,
scale="normalize"):
self.values = df
self.target = target
self.file_dir = Path(file_dir)
self.scale = scale
def __len__(self):
return len(self.values)
def __getitem__(self, idx: int):
filename = self.values[idx][0]
df = pd.read_csv(self.file_dir / filename, sep="\t", header=None)
spectrum = df[1].values
if self.scale == "normalize":
spectrum = (spectrum - spectrum.mean()) / spectrum.std()
else:
spectrum = (spectrum - spectrum.min()) / (
spectrum.max() - spectrum.min())
spectrum = spectrum[:511].astype(np.float32)
if self.target is not None:
return spectrum, self.target[idx]
else:
return spectrum
class RawFittingDataset(data.Dataset):
def __init__(self,
df: Matrix,
target: Optional[Matrix],
file_dir: str,
fitting_file_dir: str,
scale="normalize",
crop=False,
flip=False,
noise=False,
peak=False):
self.values = df
self.target = target
self.file_dir = Path(file_dir)
self.fitting_file_dir = Path(fitting_file_dir)
self.scale = scale
self.crop = crop
self.flip = flip
self.noise = noise
self.peak = peak
def __len__(self):
return len(self.values)
def __getitem__(self, idx: int):
filename = self.values[idx][0]
df = pd.read_csv(self.file_dir / filename, sep="\t", header=None)
fitting = pd.read_csv(
self.fitting_file_dir / filename, sep="\t", header=None)
spectrum = df[1].values
spectrum_fitting = fitting[1].values
if self.noise:
scale = np.random.randint(50, 200)
noise = scale * np.random.normal(len(spectrum))
spectrum = spectrum + noise
if self.peak:
idxmax = fitting[0].values.argmax()
sign = 1 if np.random.rand() > 0.5 else -1
peak_pos = fitting[0].values[idxmax] + np.random.randint(
50, 150) * sign
scale = np.abs(np.random.normal() * 40)
false_peak = cauchy.pdf(df[0].values, peak_pos, scale)
ratio = df[1].max() / false_peak.max()
false_peak = false_peak * ratio * min(np.random.rand(), 0.8)
spectrum = spectrum + false_peak
if self.crop:
start = np.random.randint(0, 111)
spectrum = spectrum[start:start + 400].astype(np.float32)
spectrum_fitting = spectrum_fitting[start:start + 400].astype(
np.float32)
else:
spectrum = spectrum[:511].astype(np.float32)
spectrum_fitting = spectrum_fitting[:511].astype(np.float32)
if self.scale == "normalize":
spectrum = (spectrum - spectrum.mean()) / spectrum.std()
spectrum_fitting = (spectrum_fitting - spectrum_fitting.mean()
) / spectrum_fitting.std()
else:
spectrum = (spectrum - spectrum.min()) / (
spectrum.max() - spectrum.min())
spectrum_fitting = (
spectrum_fitting - spectrum_fitting.min() /
(spectrum_fitting.max() - spectrum_fitting.min()))
if self.flip:
if np.random.rand() > 0.5:
spectrum = np.flip(spectrum).copy()
spectrum_fitting = np.flip(spectrum_fitting).copy()
x = np.asarray([spectrum, spectrum_fitting]).astype(np.float32)
if self.target is not None:
return x, self.target[idx]
else:
return x
class FittingDataset(data.Dataset):
def __init__(self,
df: Matrix,
target: Optional[Matrix],
file_dir: str,
scale="normalize",
crop=False,
flip=False,
noise=False,
peak=False):
self.values = df
self.target = target
self.file_dir = Path(file_dir)
self.scale = scale
self.crop = crop
self.flip = flip
self.noise = noise
self.peak = peak
def __len__(self):
return len(self.values)
def __getitem__(self, idx: int):
filename = self.values[idx][0]
params2 = self.values[idx][1]
df = pd.read_csv(self.file_dir / filename, sep="\t", header=None)
spectrum = df[1].values
if self.noise:
scale = np.random.randint(50, 200)
noise = scale * np.random.normal(len(spectrum))
spectrum = spectrum + noise
if self.peak:
sign = 1 if np.random.rand() > 0.5 else -1
peak_pos = params2 + np.random.randint(50, 150) * sign
scale = np.abs(np.random.normal() * 40)
false_peak = cauchy.pdf(df[0].values, peak_pos, scale)
ratio = df[1].max() / false_peak.max()
false_peak = false_peak * ratio * min(np.random.rand(), 0.8)
spectrum = spectrum + false_peak
if self.crop:
start = np.random.randint(0, 111)
spectrum = spectrum[start:start + 400].astype(np.float32)
else:
spectrum = spectrum[:511].astype(np.float32)
if self.scale == "normalize":
spectrum = (spectrum - spectrum.mean()) / spectrum.std()
else:
spectrum = (spectrum - spectrum.min()) / (
spectrum.max() - spectrum.min())
if self.flip:
if np.random.rand() > 0.5:
spectrum = np.flip(spectrum).copy()
if self.target is not None:
return spectrum, self.target[idx]
else:
return spectrum
def get_loader(loader_params: dict, df: Matrix, target: Optional[Matrix]):
dataset_type = loader_params.get("dataset_type")
if dataset_type == "from_file":
scale = "normalize" if loader_params.get(
"scale") is None else "min_max"
dataset = FileDataset(df, target, loader_params["file_dir"], scale)
params = loader_params.copy()
params.pop("dataset_type")
params.pop("file_dir")
if scale is not None:
params.pop("scale")
elif dataset_type == "with_fitting":
scale = "normalize" if loader_params.get(
"scale") is None else "min_max"
crop = loader_params["crop"]
flip = loader_params["flip"]
noise = loader_params["noise"]
peak = loader_params.get("peak")
if peak is None:
peak = False
dataset = FittingDataset( # type: ignore
df,
target,
loader_params["file_dir"],
scale,
crop=crop,
flip=flip,
noise=noise,
peak=peak)
params = loader_params.copy()
params.pop("dataset_type")
params.pop("file_dir")
if scale is not None:
params.pop("scale")
params.pop("crop")
params.pop("flip")
params.pop("noise")
if params.get("peak") is not None:
params.pop("peak")
elif dataset_type == "raw_and_fitting":
scale = "normalize" if loader_params.get(
"scale") is None else "min_max"
crop = loader_params["crop"]
flip = loader_params["flip"]
noise = loader_params["noise"]
dataset = RawFittingDataset( # type: ignore
df,
target,
loader_params["file_dir"],
loader_params["fitting_file_dir"],
scale,
crop=crop,
flip=flip,
noise=noise)
params = loader_params.copy()
params.pop("dataset_type")
params.pop("file_dir")
params.pop("fitting_file_dir")
if scale is not None:
params.pop("scale")
params.pop("crop")
params.pop("flip")
params.pop("noise")
else:
dataset = TabularDataset(df, target) # type: ignore
return data.DataLoader(dataset, **params)
class Conv1dBNReLU(nn.Module):
def __init__(self, in_channels: int, out_channels: int, kernel_size: int,
stride: int):
super(Conv1dBNReLU, self).__init__()
self.seq = nn.Sequential(
nn.Conv1d(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride), nn.BatchNorm1d(out_channels), nn.ReLU())
def forward(self, x):
return self.seq(x)
class SpatialAttention1d(nn.Module):
def __init__(self, in_channels: int):
super(SpatialAttention1d, self).__init__()
self.squeeze = nn.Conv1d(in_channels, 1, kernel_size=1, bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
z = self.squeeze(x)
z = self.sigmoid(z)
return x * z
class GAB1d(nn.Module):
def __init__(self, in_channels: int, reduction=4):
super(GAB1d, self).__init__()
self.global_avgpool = nn.AdaptiveMaxPool1d(1)
self.conv1 = nn.Conv1d(
in_channels, in_channels // reduction, kernel_size=1, stride=1)
self.conv2 = nn.Conv1d(
in_channels // reduction, in_channels, kernel_size=1, stride=1)
self.relu = nn.ReLU(inplace=True)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
z = self.global_avgpool(x)
z = self.relu(self.conv1(z))
z = self.sigmoid(self.conv2(z))
return x * z
class SCse1d(nn.Module):
def __init__(self, in_channels: int):
super(SCse1d, self).__init__()
self.satt = SpatialAttention1d(in_channels)
self.catt = GAB1d(in_channels)
def forward(self, x):
return self.satt(x) + self.catt(x)
class CNN1D(nn.Module):
def __init__(self, model_params: dict):
super(CNN1D, self).__init__()
modules = []
architecture = model_params['architecture']
for module in architecture:
name = module["name"]
params = {} if module.get("params") is None else module["params"]
if module["type"] == "torch":
modules.append(nn.__getattribute__(name)(**params))
else:
modules.append(globals().get(name)(**params)) # type: ignore
self.seq = nn.Sequential(*modules)
def forward(self, x):
batch_size = x.size(0)
if x.ndim == 2:
x = x.view(batch_size, 1, -1)
return self.seq(x).view(batch_size)
class mAPCallback(Callback):
def __init__(self, prefix: str = "mAP"):
super().__init__(CallbackOrder.Metric)
self.prefix = prefix
def on_loader_start(self, state: State):
self.prediction: List[np.ndarray] = []
self.target: List[np.ndarray] = []
def on_batch_end(self, state: State):
targ = state.input["targets"].detach().cpu().numpy()
out = state.output["logits"].detach().cpu().numpy()
self.prediction.append(out)
self.target.append(targ)
score = average_precision_score(targ, out)
score = np.nan_to_num(score)
state.batch_metrics[self.prefix] = score
def on_loader__end(self, state: State):
y_pred =
|
np.concatenate(self.prediction, axis=0)
|
numpy.concatenate
|
#!/usr/bin/python
##
## Here we consider the function PL(d) = PL(d0) + 10*n*log(d/d0) -> y = theta0 + theta1*(10*log(d/d0))
## and we want to estimate the PL(d0) a constant and "n"
##
import csv
import numpy as np
from numpy.linalg import pinv
from numpy import dot
import math
from threading import Lock
class RSSIKalmanFilter:
def __init__(self, m, var, measurment_var, d0 = 1.0):
self.m = np.transpose(m)
self.P =
|
np.array([[var[0], 0],[0, var[1]]])
|
numpy.array
|
from torch.nn import Module
from torch import nn
import torch
# import model.transformer_base
import math
from model import GCN
import utils.util as util
import numpy as np
class MultiHeadAttModel(Module):
def __init__(self, in_features=33, kernel_size=10, d_model=512, num_stage=2, dct_n=10, num_heads=1, parts=1):
super(MultiHeadAttModel, self).__init__()
self.heads = nn.ModuleList([AttHeadModel(in_features, kernel_size, d_model, num_stage, dct_n, parts) for _ in range(num_heads)])
self.linear = nn.Linear(num_heads * 20, 20)
def forward(self, src, output_n=25, input_n=50, itera=1, dct_m=[]):
return self.linear(torch.cat([h(src, output_n, input_n, itera, dct_m) for h in self.heads], dim=-1))
class AttHeadModel(Module):
def __init__(self, in_features=33, kernel_size=10, d_model=512, num_stage=2, dct_n=10, parts=1):
super(AttHeadModel, self).__init__()
self.in_features = [in_features]
if parts == 3: self.in_features = [15, 9, 9]
if parts == 33: self.in_features = np.ones(parts)
self.kernel_size = kernel_size
self.d_model = d_model
# self.seq_in = seq_in
self.dct_n = dct_n
self.parts = parts
# ks = int((kernel_size + 1) / 2)
assert kernel_size == 10
self.convQ = nn.ModuleList()
self.convK = nn.ModuleList()
for features in self.in_features:
self.convQ.append(nn.Sequential(nn.Conv1d(in_channels=features, out_channels=d_model, kernel_size=6,
bias=False),
nn.ReLU(),
nn.BatchNorm1d(d_model),
nn.Conv1d(in_channels=d_model, out_channels=d_model, kernel_size=5,
bias=False),
nn.ReLU(),
nn.BatchNorm1d(d_model)))
self.convK.append(nn.Sequential(nn.Conv1d(in_channels=features, out_channels=d_model, kernel_size=6,
bias=False),
nn.ReLU(),
nn.BatchNorm1d(d_model),
nn.Conv1d(in_channels=d_model, out_channels=d_model, kernel_size=5,
bias=False),
nn.ReLU(),
nn.BatchNorm1d(d_model)))
def forward(self, src, output_n=25, input_n=50, itera=1, dct_m=[]):
"""
:param src: [batch_size,seq_len,feat_dim]
:param output_n:
:param input_n:
:param frame_n:
:param dct_n:
:param itera:
:return:
"""
dct_n = self.dct_n
if dct_m == []:
# Create DCT matrix and its inverse
dct_m, idct_m = util.get_dct_matrix(self.kernel_size + output_n)
dct_m = torch.from_numpy(dct_m).float()
idct_m = torch.from_numpy(idct_m).float()
if torch.cuda.is_available():
dct_m = dct_m.cuda()
idct_m = idct_m.cuda()
# Take only the input seq
src = src[:, :input_n] # [bs,in_n,dim]
src_tmp = src.clone()
bs = src.shape[0]
full_body = torch.unsqueeze(src_tmp, 0)
if self.parts > 1:
if self.parts == 3:
right_arm_index = [6, 7, 8,
9, 10, 11,
12, 13, 14]
left_arm_index = [15, 16, 17,
18, 19, 20,
21, 22, 23]
torso_index = [0, 1, 2,
3, 4, 5,
24, 25, 26,
27, 28, 29,
30, 31, 32]
right_arm_src_tmp = src[:, :, right_arm_index]
left_arm_src_tmp = src[:, :, left_arm_index]
torso_src_tmp = src[:, :, torso_index]
full_body = [torso_src_tmp, right_arm_src_tmp, left_arm_src_tmp]
full_body_dct = torch.Tensor().cuda()
for i, part in enumerate(full_body):
src_tmp = part
# Temporal variables for keys and query
src_key_tmp = src_tmp.transpose(1, 2)[:, :, :(input_n - output_n)].clone() # [batch, dims, input_n-output_n]
src_query_tmp = src_tmp.transpose(1, 2)[:, :, -self.kernel_size:].clone() # [batch, dims, kernel, bins]
# Compute number of subsequences
vn = input_n - self.kernel_size - output_n + 1
# Compute number of frames per subsequence
vl = self.kernel_size + output_n
idx = np.expand_dims(np.arange(vl), axis=0) + \
np.expand_dims(
|
np.arange(vn)
|
numpy.arange
|
import numpy as np
from numpy.fft import fft, ifft
norm = None # or "orhto"
x_even = np.array([8, 9, 1, 3])
print("fft(x_even): ",
|
fft(x_even, norm=norm)
|
numpy.fft.fft
|
# -*- coding: utf-8 -*-
# @Author: <NAME>
# @Date: 2018-11-05 16:04:00
# @Last Modified by: <NAME>
# @Last Modified time: 2018-11-08 16:38:20
import sys
import cv2 # imread
import torch
import torch.nn as nn
import numpy as np
import scipy.io as scio
import os
import time
from os.path import realpath, dirname, join
from net import SiamRPNBIG
from run_SiamRPN import SiamRPN_init, SiamRPN_track
from utils import get_axis_aligned_bbox, cxy_wh_2_rect
# load net
net_file = join(realpath(dirname(__file__)), 'SiamRPNBIG.model')
net = SiamRPNBIG()
net.load_state_dict(torch.load(net_file))
net.eval().cuda()
OTB100_path = '/home/song/srpn/dataset/otb100'
result_path = '/home/song/srpn/result/'
# warm up
for i in range(10):
net.temple(torch.autograd.Variable(torch.FloatTensor(1, 3, 127, 127)).cuda())
net(torch.autograd.Variable(torch.FloatTensor(1, 3, 255, 255)).cuda())
idx=0
names = [name for name in os.listdir('/home/song/srpn/dataset/otb100')]
# human4
# skating2
"""
for i, name in enumerate(names):
print('idx == {:03d} name == {:10}'.format(i, name))
"""
for ids, x in enumerate(os.walk(OTB100_path)):
it1, it2, it3 = x #it1 **/img it2 [] it3 [img1, img2, ...]
if it1.rfind('img')!=-1 and len(it3) > 50:#Python rfind() 返回字符串最后一次出现的位置(从右向左查询),如果没有匹配项则返回-1。
name = it1.split('/')[-2]
imgpath=[]
it3 = sorted(it3)
for inames in it3:
imgpath.append(os.path.join(it1, inames))
gtpath=os.path.join(OTB100_path, name, 'groundtruth_rect.txt')
gt=(open(gtpath, 'r')).readline()
if gt.find(',')!=-1:
toks=map(float, gt.split(','))
else:
toks=map(float, gt.split(' '))
# ground truth是左上角点和w,h
# target_pos 目标中心点
# target_sz w,h
cx=toks[0]+toks[2]*0.5
cy=toks[1]+toks[3]*0.5
w=toks[2]
h=toks[3]
target_pos, target_sz = np.array([cx, cy]),
|
np.array([w, h])
|
numpy.array
|
import numpy as np
from scipy.stats import rankdata
import itertools
from PyEMD import EMD
import nolds
"""Complete package for calculating any kind of multiscale entropy features """
"""Coarse graining methods
- Normal
- Moving average (X)
- Volatility series (X)
- Moving average volatility series (X)
- EMD- Coarse to fine series (X) --> undetermind scale --> put limitation on it
- EMD - Fine to coarse series (X) --> undetermind scale --> put limitation on it
- Composite coarse graining (X)
Entropy measurement methods
THe permutation methods mentioned are returned in the same way as mentioned above
- Permutation entropy (X)
- Modified permutation entropy (X)
- Weighted permutation entropy (X)
- Weighted modified PE (X)
- Sample entropy (X)
- Composite variation for above (X)"""
def add_perm(perm):
"""Add extra permutations for modified PE case """
perm.append((1,1,0))
perm.append((0,1,1))
perm.append((1,0,1))
perm.append((0,1,0))
perm.append((0,0,0))
perm.append((0,0,1))
perm.append((1,0,0))
return perm
def get_perms(m,mod_flag=0):
"""get all the permutation for entropy calculation """
perm = (list(itertools.permutations(range(m))))
#adding similar instances
if mod_flag==1:
perm=add_perm(perm)
perm=np.array(perm)
return np.array(perm)
def get_1s_pe(x,m=3,lag=1,mod_flag=0,typ=''):
"""All the combinations of permutation entropy for a single scale """
mot_x, wt_x=make_mot_series(x,m,lag,mod_flag=0)
n_p=len(get_perms(m,mod_flag))
dist=get_mot_dist(mot_x,n_p,wt_x,typ=typ)
pe=perm_ent(dist)
return np.array(pe)
def make_mot_series(time_series,m=3,lag=1,mod_flag=0):
"""Creates a motif series and returns their with the motif distribution
Input:
- time_series
- m: permutaiton degree, lag: permutation lag
- mod_flag: flag to use modfied PE
Output:
- motif time series,
- corrsponding weights
"""
time_series=np.array(time_series).squeeze()
n=len(time_series)
mot_x, wt_x, mod_mot_x=[], [], []
perms=get_perms(m,0)
perms_mod=get_perms(m,1)
for i in range(n - lag * (m - 1)):
smp=time_series[i:i + lag * m:lag]
wt=np.var(smp)
#orginal dense ranking of data
mot_array1 = np.array(rankdata(smp, method='dense')-1)
val=np.where(np.all(perms==mot_array1,axis=1))[0]
val_mod=val
if val.shape[0]==0:
mot_array = np.array(rankdata(smp, method='ordinal')-1)
val=np.where(np.all(perms==mot_array,axis=1))[0]
val_mod=np.where(np.all(perms_mod==mot_array1,axis=1))[0]
mot_x.append(val[0])
mod_mot_x.append(val_mod[0])
wt_x.append(wt)
if mod_flag==1:
return np.array(mod_mot_x), np.array(wt_x)
elif mod_flag==0:
return np.array(mot_x), np.array(wt_x)
def get_mot_dist(mot_x,n_p,wt_x,typ=''):
"""Create the distribution of motifs
Input:
- mot_x: Motif time series,
- n_p: number of permutations,
- wt_x: weight time series
- typ: type of entropy, normal: '', or weighted: 'wt'
Output:
- motif distribution
"""
mot_dist = [0] * n_p
for j in range(n_p):
if typ=='wt':
wts=wt_x[np.where(abs(mot_x-j)==0)]
num_mots=np.ones(len(np.where(abs(mot_x-j)==0)[0]))
mot_dist[j]=sum(np.multiply(num_mots,wts))
else:
mot_dist[j] = len(np.where(abs(mot_x-j)==0)[0])
#removing non occring patterns as it breaks entropy
if len(mot_x)==0:
mot_dist=np.zeros(n_p)*np.nan
return mot_dist
def perm_ent(mot_dist,m=3):
"""Returns permutation entropy for the motif distribution given --> basic function for permutation entropy """
c=mot_dist
c = [element for element in c if element != 0]
p = np.divide(np.array(c), float(sum(c)))
pe = -sum(p * np.log(p))
return pe#/np.log(factorial(m))
def get_mot_ent_dist(RRs,m,lag,typ='',mod_flag=0):
"""
#RR series for all the scales (list of lists)
Returns four kind of motif distributions
--> normal motif distribution ('' + 0)
--> modified motif distribution ('' + 1)
--> weighted motif distribution ('wt' + 0)
--> weighted modified motif distribution ('wt' + 1)
"""
dist=[]
for rr in RRs:
mot_x , wt_x=make_mot_series(rr,m,lag,mod_flag = mod_flag)
n_p=len(get_perms(m,mod_flag))
mot_dist=get_mot_dist(mot_x,n_p,wt_x,typ='')
dist.append(mot_dist) #Contains motif distribution for all the different scales
d_all=[dist]
return d_all
def ord_dist(mot_dist_x,mot_dist_y):
"""Returns ordinal distance between two motif distributions
Not used anywhere in the code """
c_x=mot_dist_x
c_y=mot_dist_y
m=len(c_x)
p_x=np.divide(np.array(c_x), float(sum(c_x)))
p_y=np.divide(np.array(c_y), float(sum(c_y)))
sq_diff=0
for j in range(m):
sq_diff=sq_diff+(p_x[j] -p_y[j])**2
dm=np.sqrt(m/(m-1))*np.sqrt(sq_diff)
return dm
def get_com_mspe(distS,scale,mspe):
"""Calculate center of mass entropy using ordinal distances as weights
NOT USED ANYWHERE IN THE CODE
"""
distS=np.array(distS)
dm_mat=np.zeros((scale,scale))
for i in range(0,scale-1):
for j in range(i,scale):
dm=ord_dist(distS[i],distS[j])
dm_mat[i,j]=dm
dm_mat[j,i]=dm
com_wts=np.zeros(scale)
for i in range(0,scale):
com_wts[i]=np.sum(dm_mat[i,:])/(scale-1)
com_mspe=np.sum(np.multiply(com_wts,mspe))/np.sum(com_wts)
return com_mspe
def calc_mspe(distS):
"""Calculates the scaled permutation entropy and thier oridnal avg and normal average"""
"""Takes an input which is a list of lists where distS[i] is motif dist with scale i """
mspe=[]
scale=len(distS)
for s in range(0,scale):
distm=distS[s]
pe=perm_ent(distm)
mspe.append(pe)
mspe=np.array(mspe)
#com_mspe=get_com_mspe(distS,scale,mspe)
mspe_fin=np.hstack((mspe))
return mspe_fin
def scale_series(x,scale,cg_typ):
"""Get the different scales of the series based on specific scaling type
Except: composite and emd scaling types
Input:
Time series (x), number of scales (scale), scale type (cg_type)
"""
x_scale=[]
if cg_typ=='base':
for i in range(0,len(x),scale):
#not divided by scale
if i+scale<=len(x):
val=np.sum(x[i:i+scale])/len(x[i:i+scale])
x_scale.append(val)
elif cg_typ=='mov_avg':
wts = np.ones(scale) / scale
val=np.convolve(x, wts, mode='valid')
x_scale.append(val)
elif cg_typ=='mom':
for i in range(0,len(x),scale):
#not divided by scale
if i+scale<=len(x):
val=np.std(x[i:i+scale])
x_scale.append(val)
elif cg_typ=='mavg_mom':
for i in range(0,len(x)):
#not divided by scale
if i+scale<=len(x):
val=
|
np.std(x[i:i+scale])
|
numpy.std
|
import math
import numpy as np
import pytest
from astropy.nddata import NDData, VarianceUncertainty
# from astropy.stats import sigma_clip as sigma_clip_ast
# from numpy.testing import assert_array_equal
from ndcombine import combine_arrays # , sigma_clip
# Test values:
# - without outlier: mean=2.2, median=2.0, std=0.87, sum=22.0, len=10
# - with outlier: mean=11.09, median=2.0, std=28.13, sum=122.0, len=11
TEST_VALUES = [1, 2, 3, 2, 3, 2, 1, 4, 2, 2, 100]
# def test_sigclip():
# """Compare sigma_clip with Astropy."""
# data = np.array(TEST_VALUES, dtype=np.float32)
# mask1 = sigma_clip_ast(data).mask.astype(int)
# mask2 = sigma_clip(data, lsigma=3, hsigma=3, max_iters=10)
# assert_array_equal(mask1, mask2)
# mask2 = sigma_clip(data, lsigma=3, hsigma=3, max_iters=0)
# assert_array_equal(mask2, 0)
# def test_sigclip_with_mask():
# data = np.array(TEST_VALUES, dtype=np.float32)
# mask = np.zeros_like(data, dtype=np.uint16)
# mask[7] = 1
# mask1 = sigma_clip_ast(np.ma.array(data, mask=mask)).mask.astype(int)
# mask2 = sigma_clip(data, mask=mask, lsigma=3, hsigma=3, max_iters=10)
# assert_array_equal(mask1, mask2)
# def test_sigclip_with_var():
# data = np.array(TEST_VALUES, dtype=np.float32)
# var = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1000], dtype=np.float32)
# mask = sigma_clip(data,
# variance=var,
# lsigma=3,
# hsigma=3,
# max_iters=10,
# use_variance=True)
# assert_array_equal(mask, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1])
# var = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 100_000], dtype=np.float32)
# mask = sigma_clip(data,
# variance=var,
# lsigma=3,
# hsigma=3,
# max_iters=10,
# use_variance=True)
# assert_array_equal(mask, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
# var = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 100_000], dtype=np.float32)
# mask = sigma_clip(data,
# variance=var,
# lsigma=3,
# hsigma=3,
# max_iters=10,
# use_variance=False)
# assert_array_equal(mask, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1])
@pytest.mark.parametrize('dtype', (np.float32, np.float64))
def test_combine_array(dtype):
data = np.array([TEST_VALUES], dtype=dtype).T
out = combine_arrays(data, method='mean', clipping_method='sigclip')
assert out.data.dtype == np.float64
assert out.mask is None
assert out.uncertainty is None
assert np.isclose(out.data[0], 2.2)
assert out.meta['REJMAP'][0] == 1
out = combine_arrays(data,
method='mean',
clipping_method='sigclip',
clipping_limits=(2, 2))
assert np.isclose(out.data[0], 2)
assert out.meta['REJMAP'][0] == 2
out = combine_arrays(data,
method='mean',
clipping_method='sigclip',
clipping_limits=(5, 5))
assert np.isclose(out.data[0], 11.09, atol=1e-2)
assert out.meta['REJMAP'][0] == 0
out = combine_arrays(data,
method='mean',
clipping_method='sigclip',
max_iters=0)
assert np.isclose(out.data[0], 11.09, atol=1e-2)
assert out.meta['REJMAP'][0] == 0
@pytest.mark.parametrize('dtype', (np.float32, np.float64))
def test_combine_nddata(dtype):
data = [NDData(data=np.array([val], dtype=dtype)) for val in TEST_VALUES]
out = combine_arrays(data, method='mean', clipping_method='sigclip')
assert out.data.dtype == np.float64
assert out.mask is None
assert out.uncertainty is None
assert np.isclose(out.data[0], 2.2)
assert out.meta['REJMAP'][0] == 1
@pytest.mark.parametrize('dtype', (np.float32, np.float64))
def test_combine_median(dtype):
data = np.array([TEST_VALUES], dtype=dtype).T
var = np.ones_like(data)
out = combine_arrays(data,
variance=var,
method='median',
clipping_method='sigclip')
assert out.data.dtype == np.float64
assert out.mask is None
assert np.isclose(out.data[0], 2.)
assert np.isclose(out.uncertainty.array[0],
1 / 10 * math.pi / 2) # 10 valid values
assert out.meta['REJMAP'][0] == 1
@pytest.mark.parametrize('dtype', (np.float32, np.float64))
def test_combine_sum(dtype):
data = np.array([TEST_VALUES], dtype=dtype).T
var = np.ones_like(data)
out = combine_arrays(data,
variance=var,
method='sum',
clipping_method='sigclip')
assert out.data.dtype == np.float64
assert out.mask is None
assert np.isclose(out.data[0], 22)
assert np.isclose(out.uncertainty.array[0], 10) # 10 valid values
assert out.meta['REJMAP'][0] == 1
@pytest.mark.parametrize('dtype', (np.float32, np.float64))
def test_combine_no_clipping(dtype):
data = np.array([TEST_VALUES], dtype=dtype).T
out = combine_arrays(data, method='mean', clipping_method='none')
assert np.isclose(out.data[0], 11.09, atol=1e-2)
assert out.meta['REJMAP'][0] == 0
@pytest.mark.parametrize('dtype', (np.float32, np.float64))
def test_combine_array_with_mask(dtype):
data = np.array([TEST_VALUES], dtype=dtype).T
mask = np.array([[0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0]], dtype=bool).T
out = combine_arrays(data,
mask=mask,
method='mean',
clipping_method='sigclip')
assert np.isclose(out.data[0], 2.)
assert out.meta['REJMAP'][0] == 4
@pytest.mark.parametrize('dtype', (np.float32, np.float64))
def test_combine_nddata_with_mask(dtype):
mask_values = [0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0]
data = [
NDData(data=np.array([val], dtype=dtype),
mask=np.array([mask], dtype=bool))
for val, mask in zip(TEST_VALUES, mask_values)
]
out = combine_arrays(data, method='mean', clipping_method='sigclip')
assert np.isclose(out.data[0], 2.)
assert out.meta['REJMAP'][0] == 4
@pytest.mark.parametrize('dtype', (np.float32, np.float64))
def test_array_with_variance(dtype):
data = np.array([TEST_VALUES], dtype=dtype).T
var = np.ones_like(data)
out = combine_arrays(data,
variance=var,
method='mean',
clipping_method='sigclip')
assert isinstance(out.uncertainty, VarianceUncertainty)
assert np.isclose(out.data[0], 2.2)
assert np.isclose(out.uncertainty.array[0], 1 / 10) # 10 valid values
assert out.meta['REJMAP'][0] == 1
var = np.random.normal(size=data.shape)
out = combine_arrays(data,
variance=var,
method='mean',
clipping_method='sigclip')
assert np.isclose(out.data[0], 2.2)
assert np.isclose(out.uncertainty.array[0], np.mean(var[:10]) / 10)
@pytest.mark.parametrize('dtype', (np.float32, np.float64))
def test_nddata_with_variance(dtype):
data = [
NDData(data=np.array([val], dtype=dtype),
uncertainty=VarianceUncertainty(np.array([1], dtype=dtype)))
for val in TEST_VALUES
]
out = combine_arrays(data, method='mean', clipping_method='sigclip')
assert isinstance(out.uncertainty, VarianceUncertainty)
assert np.isclose(out.data[0], 2.2)
assert np.isclose(out.uncertainty.array[0], 1 / 10) # 10 valid values
assert out.meta['REJMAP'][0] == 1
@pytest.mark.parametrize('dtype', (np.float32, np.float64))
def test_combine_varclip(dtype):
data = np.array([TEST_VALUES], dtype=dtype).T
var = np.ones_like(data)
var[-1] = 100
out = combine_arrays(data,
variance=var,
method='mean',
clipping_method='varclip')
assert isinstance(out.uncertainty, VarianceUncertainty)
assert np.isclose(out.data[0], 2.2)
assert np.isclose(out.uncertainty.array[0], 1 / 10) # 10 valid values
assert out.meta['REJMAP'][0] == 1
var[-1] = 100_000
out = combine_arrays(data,
variance=var,
method='mean',
clipping_method='varclip')
assert isinstance(out.uncertainty, VarianceUncertainty)
assert np.isclose(out.data[0], 11.09, atol=1e-2)
assert np.isclose(out.uncertainty.array[0], (100000+10) / 11**2)
assert out.meta['REJMAP'][0] == 0
def test_unknow_rejector():
data =
|
np.array([TEST_VALUES])
|
numpy.array
|
import dgl
from mxnet import nd
import numpy as np
def bbox_improve(bbox):
'''bbox encoding'''
area = (bbox[:,2] - bbox[:,0]) * (bbox[:,3] - bbox[:,1])
return nd.concat(bbox, area.expand_dims(1))
def extract_edge_bbox(g):
'''bbox encoding'''
src, dst = g.edges(order='eid')
n = g.number_of_edges()
src_bbox = g.ndata['pred_bbox'][src.asnumpy()]
dst_bbox = g.ndata['pred_bbox'][dst.asnumpy()]
edge_bbox = nd.zeros((n, 4), ctx=g.ndata['pred_bbox'].context)
edge_bbox[:,0] = nd.stack(src_bbox[:,0], dst_bbox[:,0]).min(axis=0)
edge_bbox[:,1] = nd.stack(src_bbox[:,1], dst_bbox[:,1]).min(axis=0)
edge_bbox[:,2] = nd.stack(src_bbox[:,2], dst_bbox[:,2]).max(axis=0)
edge_bbox[:,3] = nd.stack(src_bbox[:,3], dst_bbox[:,3]).max(axis=0)
return edge_bbox
def build_graph_train(g_slice, gt_bbox, img, ids, scores, bbox, feat_ind,
spatial_feat, iou_thresh=0.5,
bbox_improvement=True, scores_top_k=50, overlap=False):
'''given ground truth and predicted bboxes, assign the label to the predicted w.r.t iou_thresh'''
# match and re-factor the graph
img_size = img.shape[2:4]
gt_bbox[:, :, 0] /= img_size[1]
gt_bbox[:, :, 1] /= img_size[0]
gt_bbox[:, :, 2] /= img_size[1]
gt_bbox[:, :, 3] /= img_size[0]
bbox[:, :, 0] /= img_size[1]
bbox[:, :, 1] /= img_size[0]
bbox[:, :, 2] /= img_size[1]
bbox[:, :, 3] /= img_size[0]
n_graph = len(g_slice)
g_pred_batch = []
for gi in range(n_graph):
g = g_slice[gi]
ctx = g.ndata['bbox'].context
inds = np.where(scores[gi, :, 0].asnumpy() > 0)[0].tolist()
if len(inds) == 0:
return None
if len(inds) > scores_top_k:
top_score_inds = scores[gi, inds, 0].asnumpy().argsort()[::-1][0:scores_top_k]
inds =
|
np.array(inds)
|
numpy.array
|
import matplotlib.pyplot as pl; pl.ioff()
import matplotlib.cm as cm
from matplotlib.ticker import MaxNLocator
import scipy.ndimage
import numpy as np
import re
import copy
__all__ = ['TrianglePlot_MCMC','marginalize_2d','marginalize_1d']
def TrianglePlot_MCMC(mcmcresult,plotmag=True,plotnuisance=False):
"""
Script to plot the usual triangle degeneracies.
Inputs:
mcmcresult:
The result of running the LensModelMCMC routine. We
can figure out everything we need from there.
plotmag:
Whether to show the dependence of magnification on the other
parameters (it's derived, not a fit param).
plotnuisance:
Whether to additionally plot various nuisance parameters, like
the absolute location of the lens or dataset amp scalings or
phase shifts.
Returns:
f,axarr:
A matplotlib.pyplot Figure object and array of Axes objects, which
can then be manipulated elsewhere. The goal is to send something
that looks pretty good, but this is useful for fine-tuning.
"""
# List of params we'll call "nuisance"
nuisance = ['xL','yL','ampscale_dset','astromshift_x_dset','astromshift_y_dset']
allcols = list(mcmcresult['chains'].dtype.names)
# Gets rid of mag for unlensed sources, which is always 1.
allcols = [col for col in allcols if not ('mu' in col and np.allclose(mcmcresult['chains'][col],1.))]
if not plotmag: allcols = [x for x in allcols if not 'mu' in x]
if not plotnuisance: allcols = [x for x in allcols if not any([l in x for l in nuisance])]
labelmap = {'xL':'$x_{L}$, arcsec','yL':'$y_{L}$, arcsec','ML':'$M_{L}$, $10^{11} M_\odot$',\
'eL':'$e_{L}$','PAL':'$\\theta_{L}$, deg CCW from E','xoffS':'$\Delta x_{S}$, arcsec','yoffS':'$\Delta y_{S}$, arcsec',\
'fluxS':'$F_{S}$, mJy','widthS':'$\sigma_{S}$, arcsec','majaxS':'$a_{S}$, arcsec',\
'indexS':'$n_{S}$','axisratioS':'$b_{S}/a_{S}$','PAS':'$\phi_{S}$, deg CCW from E',\
'shear':'$\gamma$','shearangle':'$\\theta_\gamma$',
'mu':'$\mu_{}$','ampscale_dset':'$A_{}$',
'astromshift_x_dset':'$\delta x_{}$, arcsec','astromshift_y_dset':'$\delta y_{}$, arcsec'}
f,axarr = pl.subplots(len(allcols),len(allcols),figsize=(len(allcols)*3,len(allcols)*3))
axarr[0,-1].text(-0.8,0.9,'Chain parameters:',fontsize='xx-large',transform=axarr[0,-1].transAxes)
it = 0.
for row,yax in enumerate(allcols):
for col,xax in enumerate(allcols):
x,y = copy.deepcopy(mcmcresult['chains'][xax]), copy.deepcopy(mcmcresult['chains'][yax])
if 'ML' in xax: x /= 1e11 # to 1e11Msun from Msun
if 'ML' in yax: y /= 1e11
if 'fluxS' in xax: x *= 1e3 # to mJy from Jy
if 'fluxS' in yax: y *= 1e3
# Figure out the axis labels...
if xax[-1].isdigit():
digit = re.search(r'\d+$',xax).group()
xlab = (digit+'}$').join(labelmap[xax[:-len(digit)]].split('}$'))
else: xlab = labelmap[xax]
if yax[-1].isdigit():
digit = re.search(r'\d+$',yax).group()
ylab = (digit+'}$').join(labelmap[yax[:-len(digit)]].split('}$'))
else: ylab = labelmap[yax]
# To counter outlying walkers stuck in regions of low likelihood, we use percentiles
# instead of std().
xstd = np.ediff1d(np.percentile(x,[15.87,84.13]))[0]/2.
ystd = np.ediff1d(np.percentile(y,[15.87,84.13]))[0]/2.
xmin,xmax = np.median(x)-8*xstd, np.median(x)+8*xstd
ymin,ymax = np.median(y)-8*ystd,
|
np.median(y)
|
numpy.median
|
import collections
import itertools
import logging
import math
import os
import os.path as osp
import gym
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import matplotlib.animation as manimation
from matplotlib.colors import ListedColormap
import pandas as pd
import numpy as np
import scipy.stats
import seaborn as sns
from pirl.envs import jungle_topology
logger = logging.getLogger('analysis.common')
THIS_DIR = osp.join(os.path.dirname(os.path.realpath(__file__)))
def style(name):
return osp.join(THIS_DIR, '{}.mplstyle'.format(name))
def nested_dicts_to_df(ds, idxs, transform):
if len(idxs) == 2:
ds = transform(ds)
df = pd.DataFrame(ds)
df.columns.name = idxs[0]
df.index.name = idxs[1]
else:
ds = {k: nested_dicts_to_df(v, idxs[1:], transform)
for k, v in ds.items()}
ds = {k: v.stack() for k, v in ds.items()}
df = pd.DataFrame(ds)
df.columns.name = idxs[0]
return df
def extract_value(data):
def unpack_mean_sd_tuple(d):
res = {}
for k, v in d.items():
if v is None:
res[k] = {'mean': None, 'se': None}
else:
res[k] = {'mean': v[0], 'se': v[1]}
return res
idx = ['seed', 'eval', 'env', 'type']
ground_truth = nested_dicts_to_df(data['ground_truth'], idx, unpack_mean_sd_tuple)
ground_truth = ground_truth.stack().unstack('eval')
ground_truth = ground_truth.reorder_levels(['env', 'seed', 'type'])
ground_truth.columns.name = None
idxs = ['seed', 'eval', 'irl', 'env', 'n', 'm', 'type']
values = nested_dicts_to_df(data['values'], idxs, unpack_mean_sd_tuple)
values = values.stack().unstack('irl')
values.columns.name = 'irl'
sorted_idx = ['env', 'n', 'm', 'eval', 'seed', 'type']
if not values.empty:
values = values.reorder_levels(sorted_idx)
idx = values.index
else:
idx = [(env, 0, 0, 'gt', seed, kind)
for env, seed, kind in tuple(ground_truth.index)]
idx = pd.MultiIndex.from_tuples(idx, names=sorted_idx)
def get_gt(k):
env, _, _, _, seed, kind = k
return ground_truth.loc[(env, seed, kind), :]
values_gt = pd.DataFrame(list(map(get_gt, idx)), index=idx)
values = pd.concat([values, values_gt], axis=1)
return values
def load_value(experiment_dir, algo_pattern='(.*)', env_pattern='(.*)', algos=['.*'], dps=2):
fname = osp.join(experiment_dir, 'results.pkl')
data = pd.read_pickle(fname)
value = extract_value(data)
value.columns = value.columns.str.extract(algo_pattern, expand=False)
envs = value.index.levels[0].str.extract(env_pattern, expand=False)
value.index = value.index.set_levels(envs, level=0)
matches = []
mask = pd.Series(False, index=value.columns)
for p in algos:
m = value.columns.str.match(p)
matches += list(value.columns[m & (~mask)])
mask |= m
value = value.loc[:, matches]
value.columns = value.columns.str.split('_').str.join(' ') # so lines wrap
value = value.round(dps)
return value
def _extract_means_ses(values, with_seed=True):
nil_slices = (slice(None),) * (len(values.index.levels) - 1)
means = values.loc[nil_slices + ('mean',), :].copy()
means.index = means.index.droplevel('type')
ses = values.loc[nil_slices + ('se',), :].copy()
ses.index = ses.index.droplevel('type')
return means, ses
def _combine_means_ses(means, ses):
means['type'] = 'mean'
means = means.set_index('type', append=True)
ses['type'] = 'se'
ses = ses.set_index('type', append=True)
return pd.concat([means, ses])
def aggregate_value(values, n=100):
'''Aggregate mean and standard error across seeds. We assume the same number
of samples n are used to calculate the mean and s.e. of each seed.'''
means, ses = _extract_means_ses(values)
# The mean is just the mean across seeds
mean = means.stack().unstack('seed').mean(axis=1).unstack(-1)
# Reconstruct mean-of-squares
squares = (ses * ses * n) + (means * means)
mean_square = squares.stack().unstack('seed').mean(axis=1).unstack(-1)
# Back out standard error
var = mean_square - (mean * mean)
se = np.sqrt(var) / np.sqrt(n)
return _combine_means_ses(mean, se)
def plot_ci(values, dp=3):
mean, se = _extract_means_ses(values)
fstr = '{:.' + str(dp) + 'f}'
return mean.applymap(lambda x: (fstr + ' +/- ').format(x)) + se.applymap(lambda x: fstr.format(1.96 * x))
def _gridworld_heatmap(reward, shape, walls=None, **kwargs):
reward = reward.reshape(shape)
kwargs.setdefault('fmt', '.0f')
kwargs.setdefault('annot', True)
kwargs.setdefault('annot_kws', {'fontsize': 'smaller'})
kwargs.setdefault('cmap', 'YlGnBu')
sns.heatmap(reward, mask=walls, **kwargs)
def _gridworld_heatmaps(reward, shape, env_name, get_axis,
prefix=None, share_scale=True, **kwargs):
env = gym.make(env_name)
try:
walls = env.unwrapped.walls
except AttributeError:
walls = None
gt = env.unwrapped.reward
ax = get_axis('gt')
_gridworld_heatmap(gt, shape, walls, ax=ax, **kwargs)
ax.set_title('Ground Truth')
yield ax
vmin = None
vmax = None
if share_scale:
vmin = min([v.min() for v in reward.values()])
vmax = min([v.min() for v in reward.values()])
i = 0
for n, reward_by_m in reward.items():
for m, r in reward_by_m.items():
r = r[env_name]
r = r - np.mean(r) + np.mean(gt)
ax = get_axis(i)
_gridworld_heatmap(r, shape, vmin=vmin, vmax=vmax, ax=ax)
title = '{}/{}'.format(m, n)
if prefix is not None:
title = '{} ({})'.format(prefix, title)
ax.set_title(title)
yield ax
i += 1
def gridworld_heatmap(reward, shape, num_cols=3, figsize=(11.6, 8.6),
prefix=None, share_scale=False):
envs = list(list(reward.values())[0].values())[0].keys()
num_plots = sum([len(d) for d in reward.values()]) + 1
num_rows = math.ceil(num_plots / num_cols)
for env_name in envs:
fig, axs = plt.subplots(num_rows,
num_cols,
squeeze=False,
figsize=figsize,
sharex=True,
sharey=True)
axs = list(itertools.chain(*axs)) # flatten
def get_ax(n):
if n == 'gt':
return axs[0]
else:
return axs[n + 1]
fig.suptitle(env_name)
it = _gridworld_heatmaps(reward, shape, env_name, get_ax,
prefix=prefix, share_scale=share_scale)
list(it)
yield env_name, fig
plt.close()
def gridworld_heatmap_movie(out_dir, reward, shape,
prefix=None, share_scale=False, fps=1, dpi=300):
envs = list(list(reward.values())[0].values())[0].keys()
get_ax = lambda n: fig.gca()
os.makedirs(out_dir, exist_ok=True)
for env_name in envs:
logger.debug('Generating movie for %s', env_name)
FFMpegWriter = manimation.writers['ffmpeg']
metadata = dict(title='Reward Heatmap', artist='matplotlib')
writer = FFMpegWriter(fps=fps, metadata=metadata)
fig = plt.figure()
fname = osp.join(out_dir, env_name.replace('/', '_') + '.mp4')
with writer.saving(fig, fname, dpi):
it = _gridworld_heatmaps(reward, shape, env_name, get_ax,
prefix=prefix, share_scale=share_scale)
for i, _v in enumerate(it):
writer.grab_frame()
fig.clf()
logger.debug('%s: written frame %d', fname, i)
plt.close(fig)
def gridworld_ground_truth(envs, shape):
data = {}
rmin = 1e10
rmax = -1e10
for nickname, env_name in envs.items():
env = gym.make(env_name)
reward = env.unwrapped.reward
walls = env.unwrapped.walls
env.close()
data[nickname] = (reward, walls)
rmin = min(rmin,
|
np.min(reward)
|
numpy.min
|
#! /usr/bin/env python
# -*- encoding: utf-8 -*-
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
# import matplotlib.mlab as mlab 已弃用
import scipy.stats
import random
np.random.seed(0)
# 6.2 深入理解伯努利分布
def pro_test1():
# 二项分布实现例程
# 同时抛掷5枚硬币,出现正面朝上的次数——试验10次
print(np.random.binomial(5, 0.5, 10))
# [3 3 3 3 2 3 2 4 4 2]
# 同时抛掷5枚硬币,则5次同时为正面发生的概率——采样size=100000次
print(sum(np.random.binomial(5, 0.5, size=100000) == 5) / 100000.)
# 0.03123
# 同时抛掷5枚硬币,则4次同时为反面发生的概率——采样size=5000次
print(sum(np.random.binomial(5, 0.5, size=100000) == 4) / 100000.)
# 同时抛掷5枚硬币,则3次同时为反面发生的概率——采样size=5000次
print(sum(np.random.binomial(5, 0.5, size=100000) == 3) / 100000.)
# 同时抛掷5枚硬币,则2次同时为反面发生的概率——采样size=5000次
print(sum(np.random.binomial(5, 0.5, size=100000) == 2) / 100000.)
# 同时抛掷5枚硬币,则1次同时为反面发生的概率——采样size=5000次
print(sum(np.random.binomial(5, 0.5, size=100000) == 1) / 100000.)
# 同时抛掷5枚硬币,则0次同时为反面发生的概率——采样size=5000次
print(sum(np.random.binomial(5, 0.5, size=100000) == 0) / 100000.)
# 6.3 深入理解正态分布
def pro_test2():
# 单独绘制正态分布 直方图例程
import matplotlib.pyplot as plt
plt.hist(np.random.normal(loc=-20, scale=10, size=10000), bins=50, density=True, color='g')
print(np.random.normal(loc=-20, scale=10, size=10))
plt.hist(np.random.normal(loc=10, scale=10, size=10000), bins=50, density=True, color='b')
plt.hist(np.random.normal(loc=0, scale=15, size=10000), bins=50, density=True, color='r')
plt.show()
def pro_test3():
# 标准正态分布转换实现
import matplotlib.pyplot as plt
plt.hist(np.random.normal(loc=0, scale=1, size=10000) * 0.5 - 2, bins=50, density=True, color='g')
plt.hist(np.random.normal(loc=0, scale=1, size=10000), bins=50, density=True, color='b')
plt.hist(
|
np.random.normal(loc=0, scale=1, size=10000)
|
numpy.random.normal
|
# -*- coding: utf-8 -*-
"""
"""
from typing import Union, Type, Tuple
import ctypes
import sys
import numpy as np
from cslug import CSlug, ptr, anchor, Header
from rockhopper import RequestMeError
NUMPY_REPR = False
BIG_ENDIAN = sys.byteorder == "big"
endians_header = Header(*anchor("src/endians.h", "src/endians.c"),
includes=["<stdbool.h>", '"_endian_typedefs.h"'])
slug = CSlug(anchor(
"_slugs/ragged_array",
"src/ragged_array.c",
"src/ragged_array.h",
"src/endians.c",
), headers=endians_header) # yapf: disable
dtype_like = Union[np.dtype, Type[np.generic]]
def prod(iterable):
"""Equivalent to :func:`math.prod` introduced in Python 3.8. """
out = 1
for i in iterable:
out = out * i
return out
class RaggedArray(object):
"""A 2D array with rows of mixed lengths.
A ragged array consists of three 1D arrays.
* :attr:`flat` contains the flattened contents. i.e. each row joined end
end without any delimiters or information describing the shape.
* :attr:`starts` and :attr:`ends` determine the shape. Each integer value
in these arrays is the start and stop of a :class:`slice` of
:attr:`flat`. Each slice is a :class:`RaggedArray` row.
A :class:`RaggedArray` is considered *packed* if the end of each row
is the same as the start of the next row.
"""
flat: np.ndarray
starts: np.ndarray
ends: np.ndarray
def __init__(self, flat, starts, ends=None, dtype=None, check=True):
"""The default way to construct a :class:`RaggedArray` is explicitly
from a :attr:`flat` contents array and either row :attr:`starts` and
:attr:`ends` arrays or, more commonly, a *bounds* array.
Args:
flat:
The contents of the array with no structure.
starts:
The index of **flat** where each row starts.
Or if **ends** is unspecified, the start of each row and the
end of the previous row.
ends:
The index of **flat** where each row ends.
dtype:
The :class:`numpy.dtype` of the array. Usually this can be
inferred from **flat** and is therefore not required to be set
explicitly. To indicate that multiple scalars should be
considered as one item, use a :class:`tuple` dtype.
check:
If true (default), verify that **starts** and **ends** are
valid (via :meth:`check`). Please only disable this if you need
to a construct a ragged array by first creating an uninitialised
array to then populating it. Invalid arrays can lead to
seg-faults.
.. seealso::
Explicit construction is rarely the most convenient way to build a
:class:`RaggedArray`.
See :meth:`from_nested` to construct from lists of lists.
Or :meth:`from_lengths` to construct from flat data and row lengths.
Or :meth:`group_by` to specify the row number explicitly for each
item.
Examples:
Assuming the setup code::
import numpy as np
from rockhopper import RaggedArray
flat = np.arange(10)
::
>>> bounds = [0, 4, 7, 10]
>>> RaggedArray(flat, bounds)
RaggedArray.from_nested([
[0, 1, 2, 3],
[4, 5, 6],
[7, 8, 9],
])
The **bounds** need not start at the beginning and end and the end.
Note however that the leading and trailing items in **flat** are not
represented in the repr. ::
>>> bounds = [2, 4, 4, 5, 9]
>>> RaggedArray(flat, bounds)
RaggedArray.from_nested([
[2, 3],
[],
[4],
[5, 6, 7, 8],
])
To be able to have gaps between rows or overlapping rows set both
**starts** and **ends**. ::
>>> starts = [0, 3, 1]
>>> ends = [6, 6, 5]
>>> RaggedArray(flat, starts, ends)
RaggedArray.from_nested([
[0, 1, 2, 3, 4, 5], # flat[0:6]
[3, 4, 5], # flat[3:6]
[1, 2, 3, 4], # flat[1:5]
])
This form is typically not very useful but is given more to explain
how the :class:`RaggedArray` works internally.
Copy-less slicing uses this form heavily.
"""
self.flat = np.asarray(flat, dtype=dtype, order="C")
if len(self.flat) >= (1 << 31): # pragma: 64bit
# Supporting large arrays would require promoting all ints in the C
# code to int64_t. Given that it takes at least 2GB of memory to get
# an array this big, I doubt that this would be useful but I could
# be wrong...
raise RequestMeError(
"Flat lengths >= 2^31 are disabled at compile time to save "
"memory at runtime.")
if ends is None:
bounds = np.asarray(starts, dtype=np.intc, order="C")
self.starts = bounds[:-1]
self.ends = bounds[1:]
else:
self.starts = np.asarray(starts, dtype=np.intc, order="C")
self.ends = np.asarray(ends, dtype=np.intc, order="C")
self._c_struct = slug.dll.RaggedArray(
ptr(self.flat),
self.itemsize,
len(self),
ptr(self.starts),
ptr(self.ends),
)
if check:
self.check()
def check(self):
"""Verify that this array has valid shape.
Raises:
ValueError:
If :attr:`starts` and :attr:`ends` are not of the same length.
ValueError:
If any row has a negative length. (0 length rows are ok.)
IndexError:
If any row starts (:attr:`starts`) are negative.
IndexError:
If any row ends (:attr:`ends`) are out of bounds (>= len(flat)).
"""
if len(self.starts) != len(self.ends):
raise ValueError(f"The lengths of starts ({len(self.starts)}) and "
f"ends ({len(self.ends)}) do not match.")
for index in _violates(self.starts > self.ends):
raise ValueError(f"Row {index}, "
f"starting at flat[{self.starts[index]}] "
f"and ending at flat[{self.ends[index]}], "
f"has a negative length "
f"({self.ends[index] - self.starts[index]}).")
for index in _violates(self.starts < 0):
raise IndexError(f"Invalid value in `starts` attribute: "
f"starts[{index}] = {self.starts[index]} < 0")
for index in _violates(self.ends > len(self.flat)):
raise IndexError(f"Invalid value in `ends` attribute: "
f"ends[{index}] = {self.ends[index]} >= "
f"len(flat) = {len(self.flat)}")
@property
def dtype(self):
"""The data type of the contents of this array.
Returns:
numpy.dtype: :py:`self.flat.dtype`.
"""
return self.flat.dtype
@property
def itemshape(self):
"""The shape of an individual element from :attr:`flat`.
Returns:
tuple: :py:`self.flat.shape[1:]`.
Assuming :attr:`flat` is not empty, this is equivalent to
:py:`self.flat[0].shape`. For a 2D ragged array, this is always simply
:py:`()`.
"""
return self.flat.shape[1:]
@property
def itemsize(self):
"""The size in bytes of an individual element from :attr:`flat`.
Returns:
int: Size of one element.
Assuming :attr:`flat` is not empty, this is equivalent to
:py:`len(self.flat[0].tobytes()`.
"""
return prod(self.itemshape) * self.dtype.itemsize
def astype(self, dtype):
"""Cast the contents to a given **dtype**. Analogous to
:meth:`numpy.ndarray.astype`.
Args:
dtype (Union[numpy.dtype, Type[numpy.generic]]):
Desired data type for the :attr:`flat` attribute.
Returns:
RaggedArray: A modified copy with :py:`copy.flat.dtype == dtype`.
Only the :attr:`flat` property is cast - :attr:`starts` and :attr:`ends`
remain unchanged.
The :attr:`flat` attribute is a copy if :meth:`numpy.ndarray.astype`
chooses to copy it. The :attr:`starts` and :attr:`ends` are never
copied.
>>> ragged = RaggedArray.from_nested([[1, 2], [3]], dtype=np.int32)
>>> ragged.astype(np.int32).flat is ragged.flat
False
>>> ragged.astype(np.int16).starts is ragged.starts
True
"""
return type(self)(self.flat.astype(dtype), self.starts, self.ends)
def byteswap(self, inplace=False):
"""Swap endian. Analogous to :meth:`numpy.ndarray.byteswap`.
Args:
inplace:
If true, modify this array. Otherwise create a new one.
Returns:
Either this array or a new ragged array with opposite byte order.
The byteorder of the :attr:`starts` and :attr:`ends` arrays are not
touched.
"""
if inplace:
self.flat.byteswap(inplace=inplace)
return self
return type(self)(self.flat.byteswap(), self.starts, self.ends,
self.dtype)
@classmethod
def from_lengths(cls, flat, lengths, dtype=None):
bounds = np.empty(len(lengths) + 1, dtype=np.intc)
bounds[0] = 0
np.cumsum(lengths, out=bounds[1:])
return cls(flat, bounds, dtype=dtype)
@classmethod
def from_nested(cls, nested, dtype=None):
_nested = [np.asarray(i, dtype=dtype) for i in nested if len(i)]
if _nested:
flat = np.concatenate(_nested)
else:
flat = np.empty(0, dtype=dtype)
lengths = [len(i) for i in nested]
return cls.from_lengths(flat, lengths, dtype=dtype)
def __getitem__(self, item) -> Union['RaggedArray', np.ndarray]:
index = self.__index_item__(item)
if isinstance(index, type(self)):
return index
return self.flat[index]
def __setitem__(self, key, value):
index = self.__index_item__(key)
if isinstance(index, type(self)):
raise RequestMeError
self.flat[index] = value
def __index_item__(self, item):
"""The brain behind __getitem__() and __setitem__().
To avoid having to write everything twice (for set and get item), this
function returns :attr:`flat` indices which may then be used as
``return flat[indices]`` or ``flat[indices] = value``.
Unfortunately, there are a lot of permutations of possible input types.
Some of these permutations return another RaggedArray which should be
returned directly by getitem and (once I've implemented vectorisation)
wrote to directly by setitem.
"""
# 2D indexing i.e. ragged[rows, columns]
if isinstance(item, tuple) and len(item) == 2:
rows, columns = item
if isinstance(columns, slice):
if _null_slice(columns):
# Case self[rows, :] should be simplified to ragged[rows]
return self.__index_item__(rows)
# Case self[rows, slice]
return self.__index_item_number_slice__(rows, columns)
# Case self[rows, numerical column numbers]
return self.__index_item_any_number__(rows, columns)
# 3+D indexing.
if isinstance(item, tuple) and len(item) > 2:
if self.itemshape:
# Covert to self[2D index, *other indices].
indices = self.__index_item__(item[:2])
if isinstance(indices, type(self)):
raise RequestMeError("Returning ragged arrays from >2D "
"indices is not implemented.")
return (indices, *item[2:])
raise IndexError(
f"Too many indices for ragged array: maximum allowed is 2 but "
f"{len(item)} were given.")
# 1D indexing (ragged[rows]).
if np.isscalar(item):
# A single row number implies just a regular array output.
return slice(self.starts[item], self.ends[item])
# Whereas any of slicing, bool masks, arrays of row numbers, ...
# return another ragged array.
return type(self)(self.flat, self.starts[item], self.ends[item])
def __index_item_any_number__(self, rows, columns):
"""Indices for self[rows, columns] where **columns** is numeric (not a
slice or bool mask)."""
rows_is_array_like = not (isinstance(rows, slice) or rows is None)
if rows_is_array_like:
rows = np.asarray(rows)
assert rows.dtype != object
columns = np.asarray(columns)
assert columns.dtype != object
starts = self.starts[rows]
ends = self.ends[rows]
if not rows_is_array_like:
columns = columns[np.newaxis]
while starts.ndim < columns.ndim:
starts = starts[..., np.newaxis]
ends = ends[..., np.newaxis]
lengths = ends - starts
out_of_bounds = (columns < -lengths) | (columns >= lengths)
for index in _violates(out_of_bounds):
rows = np.arange(len(self))[rows]
while rows.ndim < columns.ndim:
rows = rows[..., np.newaxis]
rows, columns, lengths = np.broadcast_arrays(rows, columns, lengths)
raise IndexError(f"Index {columns[index]} is out of bounds for row "
f"{rows[index]} with size {lengths[index]}")
columns = np.where(columns < 0, columns + lengths, columns)
return starts + columns
def __index_item_number_slice__(self, rows, columns: slice):
"""Indices for self[rows, columns] where **columns** is a slice."""
if columns.step not in (1, None):
raise RequestMeError(
"A stepped columns index ragged[x, ::step] is not implemented "
"as it would require strided ragged arrays (which are also not "
"implemented).")
starts = self.starts[rows]
ends = self.ends[rows]
lengths = ends - starts
if columns.start is None:
new_starts = starts
else:
new_starts = starts + _wrap_negative(columns.start, lengths)
new_starts.clip(starts, ends, out=new_starts)
if columns.stop is None:
new_ends = ends
else:
new_ends = starts + _wrap_negative(columns.stop, lengths)
new_ends.clip(starts, ends, out=new_ends)
new_ends.clip(new_starts, out=new_ends)
return type(self)(self.flat, *np.broadcast_arrays(new_starts, new_ends))
def __len__(self):
return len(self.starts)
def __iter__(self):
return (self[i] for i in range(len(self)))
def _to_string(self, prefix, separator):
"""Convert to :class:`str`. A loose ragged equivalent of
:func:`numpy.array2string()`.
Args:
prefix (str):
How far to indent. See the **prefix** option for
:func:`numpy.array2string()`.
separator (str):
The deliminator to be put between elements.
Returns:
str: Something stringy.
"""
# TODO: Maybe expand and make this method public.
_str = lambda x: np.array2string(x, prefix=prefix, separator=separator)
if len(self) > np.get_printoptions()['threshold']:
# Very long arrays should be summarised as [a, b, c, ..., x, y, z].
edge_items = np.get_printoptions()["edgeitems"]
rows = [_str(i) for i in self[:edge_items]]
rows.append("...")
rows += [_str(i) for i in self[-edge_items:]]
else:
rows = [_str(i) for i in self]
# A downside of doing everything per row is that each row gets formatted
# differently. NumPy don't expose any of their fancy dragon4 algorithm
# functionality for choosing format options so I don't see any practical
# way of changing this.
return (separator.rstrip() + "\n" + " " * len(prefix)).join(rows)
def __repr__(self):
prefix = type(self).__name__ + ".from_nested("
# I might make this a proper option in future.
if NUMPY_REPR: # pragma: no cover
# Old school NumPy style formatting.
return prefix + "[" + self._to_string(prefix + "[", ", ") + "])"
# More trendy trailing comma formatting for `black` fanatics.
return prefix + "[\n " + self._to_string(" ", ", ") + ",\n])"
def __str__(self):
return "[" + self._to_string(" ", " ") + "]"
def repacked(self):
length = (self.ends - self.starts).sum()
flat = np.empty((length,) + self.flat.shape[1:], self.flat.dtype)
bounds = np.empty(len(self.starts) + 1, np.intc)
new = type(self)(flat, bounds[:-1], bounds[1:], self.dtype, check=False)
slug.dll.repack(self._c_struct._ptr, new._c_struct._ptr)
return new
def dumps(self, ldtype=np.intc):
"""Serialise into a :class:`memoryview`.
Args:
ldtype (Union[numpy.dtype, Type[numpy.generic]]):
Integer type for the row lengths.
Returns:
memoryview:
A bytes-like binary blob.
The binary format is an undelimited sequence of ``(len(row), row)``
pairs. A pure Python approximation would be::
b"".join((len(row).tobytes() + row.tobytes() for row in ragged_array))
The integer types of the row lengths can be controlled by the
**ldtype** parameter. To change the type or byteorder of the data
itself, cast to that type with :meth:`astype` then call this function.
"""
ldtype = np.dtype(ldtype)
# --- Work out how many bytes the output will need. ---
# The total length of the flat data. Note, `self.flat.size` would not be
# a safe shortcut unless `self.repacked()` has been called 1st.
length = (self.ends - self.starts).sum() * self.itemsize
# And the lengths of the lengths...
length += len(self) * ldtype.itemsize
# Allocate `length` bytes to write to. `numpy.empty()` seems to be one
# of the only ways to create a lump of memory in Python without wasting
# time initialising it.
out = np.empty(length, dtype=np.byte)
failed_row = slug.dll.dump(self._c_struct._ptr, ptr(out),
_2_power(ldtype), _big_endian(ldtype))
if failed_row != -1:
raise OverflowError(
f"Row {failed_row} with length {len(self[failed_row])} "
f"is too long to write with an {ldtype.name} integer.")
return out.data
@classmethod
def loads(cls, bin, dtype, rows=-1,
ldtype=np.intc) -> Tuple['RaggedArray', int]:
"""Deserialize a ragged array. This is the reciprocal of :meth:`dumps`.
Args:
bin (bytes):
Raw data to unpack.
dtype (Union[numpy.dtype, Type[numpy.generic]]):
Data type of the row contents in **bin**.
rows (int):
Number of rows to parse. Defaults to :py:`-1` for unknown.
ldtype (Union[numpy.dtype, Type[numpy.generic]]):
Integer type of the row lengths in **bin**.
Returns:
RaggedArray:
The deserialised ragged array.
int:
The number of bytes from **bin** consumed.
Raises:
ValueError:
If **bin** ends prematurely or in the middle of a row. This is
indicative of either data corruption or, more likely, muddling
of dtypes.
"""
dtype = np.dtype(dtype)
ldtype = np.dtype(ldtype)
# We need to know how many rows there will be in this new ragged array
# before creating and populating it.
if rows == -1:
# If it's not already known then it has to be counted.
rows = slug.dll.count_rows(ptr(bin), len(bin), _2_power(ldtype),
_big_endian(ldtype), dtype.itemsize)
if rows == -1:
# `count_rows()` returns -1 on error.
raise ValueError(
"Raw `bin` data ended mid way through a row. Either this "
"data is corrupt or the dtype(s) given are incorrect.")
# Run again with known number of `rows`.
return cls.loads(bin, dtype, rows, ldtype)
free = len(bin) - rows * ldtype.itemsize
items = free // dtype.itemsize
if items < 0:
raise ValueError(
f"With `bin` of length {len(bin)}, {rows} rows of "
f"{ldtype.itemsize} byte lengths leaves {free} bytes "
f"for the flat data. Perhaps your data types are wrong?")
self = cls(np.empty(items, dtype=dtype), np.empty(rows + 1, np.intc),
check=False)
bin_consumed = ctypes.c_size_t(0)
_rows = slug.dll.load(self._c_struct._ptr, ptr(bin), len(bin),
ctypes.byref(bin_consumed), rows,
_2_power(ldtype), _big_endian(ldtype))
if _rows < rows:
raise ValueError(
f"Raw `bin` data ended too soon. "
f"Only {_rows} out of the requested {rows} rows were read. "
f"Either this data is corrupt or the dtype(s) given are "
"incorrect.")
return self, bin_consumed.value
def _rectangular_slice(self, start, end):
"""Slice ``self`` but convert the output to a regular rectangular array.
This requires that this array is packed, hence its being private.
"""
width = self.ends[start] - self.starts[start]
if end >= len(self):
flat = self.flat[self.starts[start]:]
return flat.reshape((len(self) - start, width) + self.itemshape)
flat = self.flat[self.starts[start]:self.starts[end]]
return flat.reshape((end - start, width) + self.itemshape)
def to_rectangular_arrays(self, reorder=False):
"""Convert to a :class:`list` of regular :class:`numpy.ndarray`\\ s.
Args:
reorder (bool):
If true, pre-sort into order of ascending lengths to minimise
divisions needed. Use if the row order is unimportant.
Returns:
Union[tuple, list]:
list[numpy.ndarray]:
If **reorder** is false.
numpy.ndarray, list[numpy.ndarray]:
If **reorder** is true. The first argument is the args (from
:func:`numpy.argsort`) used to pre-sort.
The :class:`RaggedArray` is divided into chunks of consecutive rows
which have the same length. Each chunk is then converted to a plain 2D
:class:`numpy.ndarray`. These 2D arrays are returned in a :class:`list`.
::
>>> ragged_array([
... [1, 2],
... [3, 4],
... [5, 6, 7],
... [8, 9, 10],
... ]).to_rectangular_arrays()
[array([[1, 2], [3, 4]]), array([[ 5, 6, 7], [ 8, 9, 10]])]
"""
if reorder:
args = np.argsort(self.ends - self.starts)
return args, self[args].to_rectangular_arrays()
# The empty case requires special handling or it hits index errors
# further on.
if len(self) == 0:
return []
# This function uses slices on raw ``self.flat`` and thereby assumes
# that consecutive rows are consecutive in ``self.flat``. To enforce
# this case:
self = self.repacked()
lengths = self.ends - self.starts
out = []
start = 0
# For every row number that isn't the same length as its next row:
for end in np.nonzero(lengths[1:] != lengths[:-1])[0]:
end += 1
# slice from the last slice end to this one.
out.append(self._rectangular_slice(start, end))
start = end
# The above catches everything before a change in row length but not the
# final chunk after the last change. Add it.
out.append(self._rectangular_slice(start, len(self)))
return out
def tolist(self):
"""Convert to a list of lists. This is analogous to
:meth:`numpy.ndarray.tolist` and is the reciprocal of
:meth:`from_nested`."""
return sum(map(np.ndarray.tolist, self.to_rectangular_arrays()), [])
@classmethod
def group_by(cls, data, ids, id_max=None, check_ids=True):
"""Group **data** by **ids**.
Args:
data (numpy.ndarray):
Arbitrary values to be grouped. **data** can be of any dtype and
be multidimensional.
ids (numpy.ndarray):
Integer array with the same dimensions as **data**.
id_max (int):
:py:`np.max(ids) + 1`. If already known, providing this value
prevents it from being redundantly recalculated.
check_ids (bool):
If true, verify that each ID in **ids** is in bounds
(:py:`0 <= ID < id_max`). Disable with caution - an uncaught
out of bounds ID can lead to a seg-fault.
Returns:
RaggedArray:
For each value in **data**, its corresponding ID in **ids** determines
in which row the data value is placed. The order of data within rows is
consistent with the order the appear in **data**.
This method is similar to :meth:`pandas.DataFrame.groupby`. However, it
will not uniquify and enumerate the property to group by.
"""
# Just run ``groups_by()`` but with only one ``datas``.
return next(cls.groups_by(ids, data, id_max=id_max,
check_ids=check_ids))
@classmethod
def groups_by(cls, ids, *datas, id_max=None, check_ids=True):
"""Group each data from **datas** by **ids**.
This function is equivalent to, but faster than, calling
:meth:`group_by` multiple times with the same **ids**.
"""
# Type normalisation and sanity checks.
ids = np.asarray(ids)
datas = [np.asarray(i) for i in datas]
if id_max is None:
id_max = np.max(ids) + 1
elif check_ids and np.any(ids >= id_max):
max = ids.argmax()
raise IndexError(f"All ids must be < id_max but "
f"ids[{max}] = {ids[max]} >= {id_max}.")
if check_ids and np.any(ids < 0):
min = ids.argmin()
raise IndexError(
f"All ids must be >= 0 but ids[{min}] = {ids[min]}.")
counts, sub_ids = sub_enumerate(ids, id_max)
# The ``counts`` determine the lengths of each row should.
# From there we can work out the start and end point for each row.
bounds = np.empty(id_max + 1, np.intc)
counts.cumsum(out=bounds[1:])
bounds[0] = 0
# The ``sub_ids`` are the position along the row for each element.
# Without ``sub_ids``, elements from the same group will all write to
# the beginning of their row, and thus overwrite each other.
unique_ids = bounds[ids] + sub_ids
# ``unique_ids`` should contain exactly one of each element in
# ``range(len(ids))``.
for data in datas:
flat = np.empty(data.shape, data.dtype, order="C")
flat[unique_ids] = data
yield cls(flat, bounds)
# For pickle.
def __getstate__(self):
# Unfortunately this will lose the memory efficiency of letting starts
# and ends overlap.
# I'm choosing to version this pickle function so that, if I fix the
# above, then I can avoid version mismatch chaos.
from rockhopper import __version__
return 0, __version__, self.flat, self.starts, self.ends
def __setstate__(self, state):
pickle_version, rockhopper_version, *state = state
if pickle_version > 0:
import pickle
raise pickle.UnpicklingError(
"This ragged array was pickled with a newer version of "
f"rockhopper ({rockhopper_version}) which wrote its pickles "
f'differently. Running:\n pip install '
f'"rockhopper >= {rockhopper_version}"\nshould fix this.')
self.__init__(*state)
ragged_array = RaggedArray.from_nested
def _2_power(dtype):
"""Convert an integer dtype to an enumerate used throughout the C code."""
# Functionally this is equivalent to ``int(math.log2(dtype.itemsize))``.
itemsize = np.dtype(dtype).itemsize
return next(i for i in range(8) if (1 << i) == itemsize)
def _big_endian(dtype):
"""Is **dtype** big endian?"""
byteorder = np.dtype(dtype).byteorder
if byteorder == "<":
return False
if byteorder == ">":
return True
# `byteorder` can also be '=' for native (`sys.endian == "big"`) or "|" for
# not applicable (for string types - which we shouldn't need anyway - or
# single byte types).
return BIG_ENDIAN
def sub_enumerate(ids, id_max):
"""Wrapper of :c:`sub_enumerate()` from src/ragged_array.c
Args:
ids (numpy.ndarray):
A group number for each element.
id_max (int):
A strict upper bound for the **ids**.
Returns:
counts (numpy.ndarray):
:py:`counts[x] := ids.count(x)`.
sub_ids (numpy.ndarray):
:py:`sub_ids[i] := ids[:i].count(ids[i])`.
Raises:
IndexError:
If either :py:`(0 <= ids).all() or :py`(ids < id_max).all()`
are not satisfied.
"""
ids = np.ascontiguousarray(ids, dtype=np.intc)
counts = np.zeros(int(id_max), np.intc)
sub_ids = np.empty_like(ids)
slug.dll.sub_enumerate(ptr(ids), ids.size, ptr(counts), ptr(sub_ids))
return counts, sub_ids
def _violates(mask: np.ndarray):
"""Yield the index of the first true element, if any, of the boolean array
**mask**. Otherwise don't yield at all."""
if np.any(mask):
index = np.argmax(mask)
yield np.unravel_index(index, mask.shape) if mask.ndim != 1 else index
def _null_slice(s: slice):
"""Return true if a slice does nothing e.g. list[:]"""
return s.start is s.step is s.stop is None
def _wrap_negative(indices, lengths):
"""Add **lengths** to **indices** which are negative. Mimics Python's usual
list[-1] => list[len(list) - 1] behaviour."""
return
|
np.where(indices < 0, indices + lengths, indices)
|
numpy.where
|
import numpy as np
from holoviews.core.data import Dataset
from holoviews.core.options import Cycle
from holoviews.core.spaces import HoloMap
from holoviews.core.util import LooseVersion
from holoviews.element import Graph, Nodes, TriMesh, Chord, circular_layout
from holoviews.util.transform import dim
from matplotlib.collections import LineCollection, PolyCollection
from .test_plot import TestMPLPlot, mpl_renderer
class TestMplGraphPlot(TestMPLPlot):
def setUp(self):
super().setUp()
N = 8
self.nodes = circular_layout(np.arange(N, dtype=np.int32))
self.source = np.arange(N, dtype=np.int32)
self.target = np.zeros(N, dtype=np.int32)
self.weights = np.random.rand(N)
self.graph = Graph(((self.source, self.target),))
self.node_info = Dataset(['Output']+['Input']*(N-1), vdims=['Label'])
self.node_info2 = Dataset(self.weights, vdims='Weight')
self.graph2 = Graph(((self.source, self.target), self.node_info))
self.graph3 = Graph(((self.source, self.target), self.node_info2))
self.graph4 = Graph(((self.source, self.target, self.weights),), vdims='Weight')
def test_plot_simple_graph(self):
plot = mpl_renderer.get_plot(self.graph)
nodes = plot.handles['nodes']
edges = plot.handles['edges']
self.assertEqual(np.asarray(nodes.get_offsets()), self.graph.nodes.array([0, 1]))
self.assertEqual([p.vertices for p in edges.get_paths()],
[p.array() for p in self.graph.edgepaths.split()])
def test_plot_graph_categorical_colored_nodes(self):
g = self.graph2.opts(plot=dict(color_index='Label'), style=dict(cmap='Set1'))
plot = mpl_renderer.get_plot(g)
nodes = plot.handles['nodes']
facecolors = np.array([[0.89411765, 0.10196078, 0.10980392, 1.],
[0.6 , 0.6 , 0.6 , 1.],
[0.6 , 0.6 , 0.6 , 1.],
[0.6 , 0.6 , 0.6 , 1.],
[0.6 , 0.6 , 0.6 , 1.],
[0.6 , 0.6 , 0.6 , 1.],
[0.6 , 0.6 , 0.6 , 1.],
[0.6 , 0.6 , 0.6 , 1.]])
self.assertEqual(nodes.get_facecolors(), facecolors)
def test_plot_graph_numerically_colored_nodes(self):
g = self.graph3.opts(plot=dict(color_index='Weight'), style=dict(cmap='viridis'))
plot = mpl_renderer.get_plot(g)
nodes = plot.handles['nodes']
self.assertEqual(np.asarray(nodes.get_array()), self.weights)
self.assertEqual(nodes.get_clim(), (self.weights.min(), self.weights.max()))
def test_plot_graph_categorical_colored_edges(self):
g = self.graph3.opts(plot=dict(edge_color_index='start'),
style=dict(edge_cmap=['#FFFFFF', '#000000']))
plot = mpl_renderer.get_plot(g)
edges = plot.handles['edges']
colors = np.array([[1., 1., 1., 1.],
[0., 0., 0., 1.],
[1., 1., 1., 1.],
[0., 0., 0., 1.],
[1., 1., 1., 1.],
[0., 0., 0., 1.],
[1., 1., 1., 1.],
[0., 0., 0., 1.]])
self.assertEqual(edges.get_colors(), colors)
def test_plot_graph_numerically_colored_edges(self):
g = self.graph4.opts(plot=dict(edge_color_index='Weight'),
style=dict(edge_cmap=['#FFFFFF', '#000000']))
plot = mpl_renderer.get_plot(g)
edges = plot.handles['edges']
self.assertEqual(np.asarray(edges.get_array()), self.weights)
self.assertEqual(edges.get_clim(), (self.weights.min(), self.weights.max()))
###########################
# Styling mapping #
###########################
def test_graph_op_node_color(self):
edges = [(0, 1), (0, 2)]
nodes = Nodes([(0, 0, 0, '#000000'), (0, 1, 1, '#FF0000'), (1, 1, 2, '#00FF00')],
vdims='color')
graph = Graph((edges, nodes)).options(node_color='color')
plot = mpl_renderer.get_plot(graph)
artist = plot.handles['nodes']
self.assertEqual(artist.get_facecolors(),
np.array([[0, 0, 0, 1], [1, 0, 0, 1], [0, 1, 0, 1]]))
def test_graph_op_node_color_update(self):
edges = [(0, 1), (0, 2)]
def get_graph(i):
c1, c2, c3 = {0: ('#00FF00', '#0000FF', '#FF0000'),
1: ('#FF0000', '#00FF00', '#0000FF')}[i]
nodes = Nodes([(0, 0, 0, c1), (0, 1, 1, c2), (1, 1, 2, c3)],
vdims='color')
return Graph((edges, nodes))
graph = HoloMap({0: get_graph(0), 1: get_graph(1)}).options(node_color='color')
plot = mpl_renderer.get_plot(graph)
artist = plot.handles['nodes']
self.assertEqual(artist.get_facecolors(),
np.array([[0, 1, 0, 1], [0, 0, 1, 1], [1, 0, 0, 1]]))
plot.update((1,))
self.assertEqual(artist.get_facecolors(),
np.array([[1, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1]]))
def test_graph_op_node_color_linear(self):
edges = [(0, 1), (0, 2)]
nodes = Nodes([(0, 0, 0, 0.5), (0, 1, 1, 1.5), (1, 1, 2, 2.5)],
vdims='color')
graph = Graph((edges, nodes)).options(node_color='color')
plot = mpl_renderer.get_plot(graph)
artist = plot.handles['nodes']
self.assertEqual(np.asarray(artist.get_array()), np.array([0.5, 1.5, 2.5]))
self.assertEqual(artist.get_clim(), (0.5, 2.5))
def test_graph_op_node_color_linear_update(self):
edges = [(0, 1), (0, 2)]
def get_graph(i):
c1, c2, c3 = {0: (0.5, 1.5, 2.5),
1: (3, 2, 1)}[i]
nodes = Nodes([(0, 0, 0, c1), (0, 1, 1, c2), (1, 1, 2, c3)],
vdims='color')
return Graph((edges, nodes))
graph = HoloMap({0: get_graph(0), 1: get_graph(1)}).options(node_color='color', framewise=True)
plot = mpl_renderer.get_plot(graph)
artist = plot.handles['nodes']
self.assertEqual(np.asarray(artist.get_array()), np.array([0.5, 1.5, 2.5]))
self.assertEqual(artist.get_clim(), (0.5, 2.5))
plot.update((1,))
self.assertEqual(np.asarray(artist.get_array()), np.array([3, 2, 1]))
self.assertEqual(artist.get_clim(), (1, 3))
def test_graph_op_node_color_categorical(self):
edges = [(0, 1), (0, 2)]
nodes = Nodes([(0, 0, 0, 'A'), (0, 1, 1, 'B'), (1, 1, 2, 'A')],
vdims='color')
graph = Graph((edges, nodes)).options(node_color='color')
plot = mpl_renderer.get_plot(graph)
artist = plot.handles['nodes']
self.assertEqual(np.asarray(artist.get_array()), np.array([0, 1, 0]))
def test_graph_op_node_size(self):
edges = [(0, 1), (0, 2)]
nodes = Nodes([(0, 0, 0, 2), (0, 1, 1, 4), (1, 1, 2, 6)],
vdims='size')
graph = Graph((edges, nodes)).options(node_size='size')
plot = mpl_renderer.get_plot(graph)
artist = plot.handles['nodes']
self.assertEqual(artist.get_sizes(), np.array([4, 16, 36]))
def test_graph_op_node_size_update(self):
edges = [(0, 1), (0, 2)]
def get_graph(i):
c1, c2, c3 = {0: (2, 4, 6),
1: (12, 3, 5)}[i]
nodes = Nodes([(0, 0, 0, c1), (0, 1, 1, c2), (1, 1, 2, c3)],
vdims='size')
return Graph((edges, nodes))
graph = HoloMap({0: get_graph(0), 1: get_graph(1)}).options(node_size='size')
plot = mpl_renderer.get_plot(graph)
artist = plot.handles['nodes']
self.assertEqual(artist.get_sizes(), np.array([4, 16, 36]))
plot.update((1,))
self.assertEqual(artist.get_sizes(), np.array([144, 9, 25]))
def test_graph_op_node_linewidth(self):
edges = [(0, 1), (0, 2)]
nodes = Nodes([(0, 0, 0, 2), (0, 1, 1, 4), (1, 1, 2, 3.5)], vdims='line_width')
graph = Graph((edges, nodes)).options(node_linewidth='line_width')
plot = mpl_renderer.get_plot(graph)
artist = plot.handles['nodes']
self.assertEqual(artist.get_linewidths(), [2, 4, 3.5])
def test_graph_op_node_linewidth_update(self):
edges = [(0, 1), (0, 2)]
def get_graph(i):
c1, c2, c3 = {0: (2, 4, 6),
1: (12, 3, 5)}[i]
nodes = Nodes([(0, 0, 0, c1), (0, 1, 1, c2), (1, 1, 2, c3)],
vdims='line_width')
return Graph((edges, nodes))
graph = HoloMap({0: get_graph(0), 1: get_graph(1)}).options(node_linewidth='line_width')
plot = mpl_renderer.get_plot(graph)
artist = plot.handles['nodes']
self.assertEqual(artist.get_linewidths(), [2, 4, 6])
plot.update((1,))
self.assertEqual(artist.get_linewidths(), [12, 3, 5])
def test_graph_op_node_alpha(self):
import matplotlib as mpl
edges = [(0, 1), (0, 2)]
nodes = Nodes([(0, 0, 0, 0.2), (0, 1, 1, 0.6), (1, 1, 2, 1)], vdims='alpha')
graph = Graph((edges, nodes)).options(node_alpha='alpha')
if LooseVersion(mpl.__version__) < LooseVersion("3.4.0"):
# Python 3.6 only support up to matplotlib 3.3
with self.assertRaises(Exception):
mpl_renderer.get_plot(graph)
else:
plot = mpl_renderer.get_plot(graph)
artist = plot.handles['nodes']
self.assertEqual(artist.get_alpha(), np.array([0.2, 0.6, 1]))
def test_graph_op_edge_color(self):
edges = [(0, 1, 'red'), (0, 2, 'green'), (1, 3, 'blue')]
graph = Graph(edges, vdims='color').options(edge_color='color')
plot = mpl_renderer.get_plot(graph)
edges = plot.handles['edges']
self.assertEqual(edges.get_edgecolors(), np.array([
[1. , 0. , 0. , 1. ], [0. , 0.50196078, 0. , 1. ],
[0. , 0. , 1. , 1. ]]
))
def test_graph_op_edge_color_update(self):
graph = HoloMap({
0: Graph([(0, 1, 'red'), (0, 2, 'green'), (1, 3, 'blue')],
vdims='color'),
1: Graph([(0, 1, 'green'), (0, 2, 'blue'), (1, 3, 'red')],
vdims='color')}).options(edge_color='color')
plot = mpl_renderer.get_plot(graph)
edges = plot.handles['edges']
self.assertEqual(edges.get_edgecolors(), np.array([
[1. , 0. , 0. , 1. ], [0. , 0.50196078, 0. , 1. ],
[0. , 0. , 1. , 1. ]]
))
plot.update((1,))
self.assertEqual(edges.get_edgecolors(), np.array([
[0. , 0.50196078, 0. , 1. ], [0. , 0. , 1. , 1. ],
[1. , 0. , 0. , 1. ]]
))
def test_graph_op_edge_color_linear(self):
edges = [(0, 1, 2), (0, 2, 0.5), (1, 3, 3)]
graph = Graph(edges, vdims='color').options(edge_color='color')
plot = mpl_renderer.get_plot(graph)
edges = plot.handles['edges']
self.assertEqual(np.asarray(edges.get_array()), np.array([2, 0.5, 3]))
self.assertEqual(edges.get_clim(), (0.5, 3))
def test_graph_op_edge_color_linear_update(self):
graph = HoloMap({
0: Graph([(0, 1, 2), (0, 2, 0.5), (1, 3, 3)],
vdims='color'),
1: Graph([(0, 1, 4.3), (0, 2, 1.4), (1, 3, 2.6)],
vdims='color')}).options(edge_color='color', framewise=True)
plot = mpl_renderer.get_plot(graph)
edges = plot.handles['edges']
self.assertEqual(np.asarray(edges.get_array()), np.array([2, 0.5, 3]))
self.assertEqual(edges.get_clim(), (0.5, 3))
plot.update((1,))
self.assertEqual(np.asarray(edges.get_array()), np.array([4.3, 1.4, 2.6]))
self.assertEqual(edges.get_clim(), (1.4, 4.3))
def test_graph_op_edge_color_categorical(self):
edges = [(0, 1, 'C'), (0, 2, 'B'), (1, 3, 'A')]
graph = Graph(edges, vdims='color').options(edge_color='color')
plot = mpl_renderer.get_plot(graph)
edges = plot.handles['edges']
self.assertEqual(np.asarray(edges.get_array()), np.array([0, 1, 2]))
self.assertEqual(edges.get_clim(), (0, 2))
def test_graph_op_edge_alpha(self):
edges = [(0, 1, 0.1), (0, 2, 0.5), (1, 3, 0.3)]
graph = Graph(edges, vdims='alpha').options(edge_alpha='alpha')
with self.assertRaises(Exception):
mpl_renderer.get_plot(graph)
def test_graph_op_edge_linewidth(self):
edges = [(0, 1, 2), (0, 2, 10), (1, 3, 6)]
graph = Graph(edges, vdims='line_width').options(edge_linewidth='line_width')
plot = mpl_renderer.get_plot(graph)
edges = plot.handles['edges']
self.assertEqual(edges.get_linewidths(), [2, 10, 6])
def test_graph_op_edge_line_width_update(self):
graph = HoloMap({
0: Graph([(0, 1, 2), (0, 2, 0.5), (1, 3, 3)],
vdims='line_width'),
1: Graph([(0, 1, 4.3), (0, 2, 1.4), (1, 3, 2.6)],
vdims='line_width')}).options(edge_linewidth='line_width')
plot = mpl_renderer.get_plot(graph)
edges = plot.handles['edges']
self.assertEqual(edges.get_linewidths(), [2, 0.5, 3])
plot.update((1,))
self.assertEqual(edges.get_linewidths(), [4.3, 1.4, 2.6])
class TestMplTriMeshPlot(TestMPLPlot):
def setUp(self):
super().setUp()
self.nodes = [(0, 0, 0), (0.5, 1, 1), (1., 0, 2), (1.5, 1, 3)]
self.simplices = [(0, 1, 2, 0), (1, 2, 3, 1)]
self.trimesh = TriMesh((self.simplices, self.nodes))
self.trimesh_weighted = TriMesh((self.simplices, self.nodes), vdims='weight')
def test_plot_simple_trimesh(self):
plot = mpl_renderer.get_plot(self.trimesh)
nodes = plot.handles['nodes']
edges = plot.handles['edges']
self.assertIsInstance(edges, LineCollection)
self.assertEqual(np.asarray(nodes.get_offsets()), self.trimesh.nodes.array([0, 1]))
self.assertEqual([p.vertices for p in edges.get_paths()],
[p.array() for p in self.trimesh._split_edgepaths.split()])
def test_plot_simple_trimesh_filled(self):
plot = mpl_renderer.get_plot(self.trimesh.opts(plot=dict(filled=True)))
nodes = plot.handles['nodes']
edges = plot.handles['edges']
self.assertIsInstance(edges, PolyCollection)
self.assertEqual(np.asarray(nodes.get_offsets()), self.trimesh.nodes.array([0, 1]))
paths = self.trimesh._split_edgepaths.split(datatype='array')
self.assertEqual([p.vertices[:4] for p in edges.get_paths()],
paths)
def test_plot_trimesh_colored_edges(self):
opts = dict(plot=dict(edge_color_index='weight'), style=dict(edge_cmap='Greys'))
plot = mpl_renderer.get_plot(self.trimesh_weighted.opts(**opts))
edges = plot.handles['edges']
colors = np.array([[ 1., 1., 1., 1.],
[ 0., 0., 0., 1.]])
self.assertEqual(edges.get_edgecolors(), colors)
def test_plot_trimesh_categorically_colored_edges(self):
opts = dict(plot=dict(edge_color_index='node1'), style=dict(edge_color=Cycle('Set1')))
plot = mpl_renderer.get_plot(self.trimesh_weighted.opts(**opts))
edges = plot.handles['edges']
colors = np.array([[0.894118, 0.101961, 0.109804, 1.],
[0.215686, 0.494118, 0.721569, 1.]])
self.assertEqual(edges.get_edgecolors(), colors)
def test_plot_trimesh_categorically_colored_edges_filled(self):
opts = dict(plot=dict(edge_color_index='node1', filled=True),
style=dict(edge_color=Cycle('Set1')))
plot = mpl_renderer.get_plot(self.trimesh_weighted.opts(**opts))
edges = plot.handles['edges']
colors = np.array([[0.894118, 0.101961, 0.109804, 1.],
[0.215686, 0.494118, 0.721569, 1.]])
self.assertEqual(edges.get_facecolors(), colors)
###########################
# Styling mapping #
###########################
def test_trimesh_op_node_color(self):
edges = [(0, 1, 2), (1, 2, 3)]
nodes = [(-1, -1, 0, 'red'), (0, 0, 1, 'green'), (0, 1, 2, 'blue'), (1, 0, 3, 'black')]
trimesh = TriMesh((edges, Nodes(nodes, vdims='color'))).options(node_color='color')
plot = mpl_renderer.get_plot(trimesh)
artist = plot.handles['nodes']
self.assertEqual(artist.get_facecolors(),
np.array([[1, 0, 0, 1], [0, 0.501961, 0, 1], [0, 0, 1, 1], [0, 0, 0, 1]]))
def test_trimesh_op_node_color_linear(self):
edges = [(0, 1, 2), (1, 2, 3)]
nodes = [(-1, -1, 0, 2), (0, 0, 1, 1), (0, 1, 2, 3), (1, 0, 3, 4)]
trimesh = TriMesh((edges, Nodes(nodes, vdims='color'))).options(node_color='color')
plot = mpl_renderer.get_plot(trimesh)
artist = plot.handles['nodes']
self.assertEqual(np.asarray(artist.get_array()), np.array([2, 1, 3, 4]))
self.assertEqual(artist.get_clim(), (1, 4))
def test_trimesh_op_node_color_categorical(self):
edges = [(0, 1, 2), (1, 2, 3)]
nodes = [(-1, -1, 0, 'B'), (0, 0, 1, 'C'), (0, 1, 2, 'A'), (1, 0, 3, 'B')]
trimesh = TriMesh((edges, Nodes(nodes, vdims='color'))).options(node_color='color')
plot = mpl_renderer.get_plot(trimesh)
artist = plot.handles['nodes']
self.assertEqual(np.asarray(artist.get_array()), np.array([0, 1, 2, 0]))
self.assertEqual(artist.get_clim(), (0, 2))
def test_trimesh_op_node_size(self):
edges = [(0, 1, 2), (1, 2, 3)]
nodes = [(-1, -1, 0, 3), (0, 0, 1, 2), (0, 1, 2, 8), (1, 0, 3, 4)]
trimesh = TriMesh((edges, Nodes(nodes, vdims='size'))).options(node_size='size')
plot = mpl_renderer.get_plot(trimesh)
artist = plot.handles['nodes']
self.assertEqual(artist.get_sizes(), np.array([9, 4, 64, 16]))
def test_trimesh_op_node_alpha(self):
import matplotlib as mpl
edges = [(0, 1, 2), (1, 2, 3)]
nodes = [(-1, -1, 0, 0.2), (0, 0, 1, 0.6), (0, 1, 2, 1), (1, 0, 3, 0.3)]
trimesh = TriMesh((edges, Nodes(nodes, vdims='alpha'))).options(node_alpha='alpha')
if LooseVersion(mpl.__version__) < LooseVersion("3.4.0"):
# Python 3.6 only support up to matplotlib 3.3
with self.assertRaises(Exception):
mpl_renderer.get_plot(trimesh)
else:
plot = mpl_renderer.get_plot(trimesh)
artist = plot.handles['nodes']
self.assertEqual(artist.get_alpha(), np.array([0.2, 0.6, 1, 0.3]))
def test_trimesh_op_node_line_width(self):
edges = [(0, 1, 2), (1, 2, 3)]
nodes = [(-1, -1, 0, 0.2), (0, 0, 1, 0.6), (0, 1, 2, 1), (1, 0, 3, 0.3)]
trimesh = TriMesh((edges, Nodes(nodes, vdims='line_width'))).options(node_linewidth='line_width')
plot = mpl_renderer.get_plot(trimesh)
artist = plot.handles['nodes']
self.assertEqual(artist.get_linewidths(), [0.2, 0.6, 1, 0.3])
def test_trimesh_op_edge_color_linear_mean_node(self):
edges = [(0, 1, 2), (1, 2, 3)]
nodes = [(-1, -1, 0, 2), (0, 0, 1, 1), (0, 1, 2, 3), (1, 0, 3, 4)]
trimesh = TriMesh((edges, Nodes(nodes, vdims='color'))).options(edge_color='color')
plot = mpl_renderer.get_plot(trimesh)
artist = plot.handles['edges']
self.assertEqual(np.asarray(artist.get_array()), np.array([2, 8/3.]))
self.assertEqual(artist.get_clim(), (1, 4))
def test_trimesh_op_edge_color(self):
edges = [(0, 1, 2, 'red'), (1, 2, 3, 'blue')]
nodes = [(-1, -1, 0), (0, 0, 1), (0, 1, 2), (1, 0, 3)]
trimesh = TriMesh((edges, nodes), vdims='color').options(edge_color='color')
plot = mpl_renderer.get_plot(trimesh)
artist = plot.handles['edges']
self.assertEqual(artist.get_edgecolors(), np.array([
[1, 0, 0, 1], [0, 0, 1, 1]]))
def test_trimesh_op_edge_color_linear(self):
edges = [(0, 1, 2, 2.4), (1, 2, 3, 3.6)]
nodes = [(-1, -1, 0), (0, 0, 1), (0, 1, 2), (1, 0, 3)]
trimesh = TriMesh((edges, nodes), vdims='color').options(edge_color='color')
plot = mpl_renderer.get_plot(trimesh)
artist = plot.handles['edges']
self.assertEqual(np.asarray(artist.get_array()), np.array([2.4, 3.6]))
self.assertEqual(artist.get_clim(), (2.4, 3.6))
def test_trimesh_op_edge_color_categorical(self):
edges = [(0, 1, 2, 'A'), (1, 2, 3, 'B')]
nodes = [(-1, -1, 0), (0, 0, 1), (0, 1, 2), (1, 0, 3)]
trimesh = TriMesh((edges, nodes), vdims='color').options(edge_color='color')
plot = mpl_renderer.get_plot(trimesh)
artist = plot.handles['edges']
self.assertEqual(np.asarray(artist.get_array()), np.array([0, 1]))
self.assertEqual(artist.get_clim(), (0, 1))
def test_trimesh_op_edge_alpha(self):
edges = [(0, 1, 2, 0.7), (1, 2, 3, 0.3)]
nodes = [(-1, -1, 0), (0, 0, 1), (0, 1, 2), (1, 0, 3)]
trimesh = TriMesh((edges, nodes), vdims='alpha').options(edge_alpha='alpha')
with self.assertRaises(Exception):
mpl_renderer.get_plot(trimesh)
def test_trimesh_op_edge_line_width(self):
edges = [(0, 1, 2, 7), (1, 2, 3, 3)]
nodes = [(-1, -1, 0), (0, 0, 1), (0, 1, 2), (1, 0, 3)]
trimesh = TriMesh((edges, nodes), vdims='line_width').options(edge_linewidth='line_width')
plot = mpl_renderer.get_plot(trimesh)
artist = plot.handles['edges']
self.assertEqual(artist.get_linewidths(), [7, 3])
class TestMplChordPlot(TestMPLPlot):
def setUp(self):
super().setUp()
self.edges = [(0, 1, 1), (0, 2, 2), (1, 2, 3)]
self.nodes = Dataset([(0, 'A'), (1, 'B'), (2, 'C')], 'index', 'Label')
self.chord = Chord((self.edges, self.nodes))
def make_chord(self, i):
edges = [(0, 1, 1+i), (0, 2, 2+i), (1, 2, 3+i)]
nodes = Dataset([(0, 0+i), (1, 1+i), (2, 2+i)], 'index', 'Label')
return Chord((edges, nodes), vdims='weight')
def test_chord_nodes_label_text(self):
g = self.chord.opts(plot=dict(label_index='Label'))
plot = mpl_renderer.get_plot(g)
labels = plot.handles['labels']
self.assertEqual([l.get_text() for l in labels], ['A', 'B', 'C'])
def test_chord_nodes_labels_mapping(self):
g = self.chord.opts(plot=dict(labels='Label'))
plot = mpl_renderer.get_plot(g)
labels = plot.handles['labels']
self.assertEqual([l.get_text() for l in labels], ['A', 'B', 'C'])
def test_chord_nodes_categorically_colormapped(self):
g = self.chord.opts(plot=dict(color_index='Label'),
style=dict(cmap=['#FFFFFF', '#CCCCCC', '#000000']))
plot = mpl_renderer.get_plot(g)
arcs = plot.handles['arcs']
nodes = plot.handles['nodes']
colors = np.array([[ 1., 1., 1., 1. ],
[ 0.8, 0.8, 0.8, 1. ],
[ 0., 0., 0., 1. ]])
self.assertEqual(arcs.get_colors(), colors)
self.assertEqual(nodes.get_facecolors(), colors)
def test_chord_node_color_style_mapping(self):
g = self.chord.opts(style=dict(node_color='Label', cmap=['#FFFFFF', '#CCCCCC', '#000000']))
plot = mpl_renderer.get_plot(g)
arcs = plot.handles['arcs']
nodes = plot.handles['nodes']
self.assertEqual(np.asarray(nodes.get_array()), np.array([0, 1, 2]))
self.assertEqual(np.asarray(arcs.get_array()),
|
np.array([0, 1, 2])
|
numpy.array
|
# coding: utf-8
# In[ ]:
'''Compute the posterior estimates of spectral index, S1.4GHz, and P1.4GHz
as well as the posterior estimates of measured fluxes (S_i) using the Metropolis Hastings algorithm.
We assume priors: Gaussian measurments fluxes, uniform spectral index, uniform S1.4, and uniform P1.4.
Detection is defined as 5*sigma_rms.
The detection mask can be defined to include nondetection measurements (a valid assumption for point sources).
The posterior density is then: prior x Likelihood (with priors described above).
The likelihood is an L2 on spectral index and S1.4 due to the Gaussian prior on observables.
Likelihood = exp(-1/2 * Sum (S_obs - g(alpha_i,S1.4))**2 / (Cd_i + Ct_i))
where S_obs are the measured fluxes
g(alpha_i,S1.4) gives model S_i
Cd_i is the measurement variance S_i
Ct_i is a systematic for g(...) taken to be (0.15*S_obs)**2
assuming z ~ 0.516 +- 0.002 we use the sampling of alpha and S14 to monte carlo compute the mean and variances of
posterior S_i and P14 in lognormal as suggested by their posterior plots.
We find that the posterior distributions for:
alpha is Gaussian
S1.4 is lognormal
P1.4 is lognormal
S_i is lognormal
'''
import numpy as np
import pylab as plt
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
def g(alpha,S14,nu):
'''Forward equation, evaluate model at given nu array'''
out = S14*(nu/1400e6)**alpha
return out
def L(Sobs,alpha,S14,nu,CdCt):
'''Likeihood for alpha and S14'''
#only as nu_obs
d = g(alpha,S14,nu)
L2 = np.sum((Sobs - d)**2/CdCt)
#L1 = np.sum(np.abs(Sobs - d)/np.sqrt(CdCt))
return np.exp(-L2/2.)
def P(nu,z,alpha,S14):
c = 3e8
h0 = 0.7
ch = 1.32151838
q0 = 0.5
D = ch*z*(1+z*(1-q0)/(np.sqrt(1+2*q0*z) + 1 + q0*z))
S = S14*(nu/1400e6)
out = 4*np.pi*S*D**2 / (1+z)**(1+alpha) * 1e26
return out/1e24
def MHSolveSpectealIndex(nu,S,Cd,Ct,name,z,dz,nuModel=None,plot=False,plotDir=None):
'''Assumes S in mJy'''
if nuModel is None:
nuModel = nu
if plotDir is not None:
import os
try:
os.makedirs(plotDir)
except:
pass
N = int(1e6)
alpha_ = np.zeros(N,dtype=np.double)
S14_ = np.zeros(N,dtype=np.double)
alpha_[0] = -0.8
S14_[0] = S[0]*(1400e6/nu[0])**-0.8
print("Working on source {}".format(name))
mask = detectionMask[idx,:]
CdCt = Cd + Ct
Li = L(S,alpha_[0],S14_[0],nu,CdCt)
print("Initial L: {}".format(Li))
maxL = Li
alphaMAP = alpha_[0]
S14MAP = S14_[0]
accepted = 0
binning = 50
i = 1
while accepted < binning*binning and i < N:
#sample priors in uniform steps
alpha_j = np.random.uniform(low=alpha_[i-1] - 0.5,high=alpha_[i-1] + 0.5)
S14_j = 10**(np.random.uniform(low = np.log10(S14_[i-1]/100),high=np.log10(S14_[i-1]*100)))
Lj = L(S,alpha_j,S14_j,nu,CdCt)
if np.random.uniform() < Lj/Li:
alpha_[i] = alpha_j
S14_[i] = S14_j
Li = Lj
accepted += 1
else:
alpha_[i] = alpha_[i-1]
S14_[i] = S14_[i-1]
if Lj > maxL:
maxL = Lj
alphaMAP = alpha_j
S14MAP = S14_j
i += 1
if accepted == binning**2:
print("Converged in {} steps".format(i))
print("Acceptance: {}, rate : {}".format(accepted,float(accepted)/i))
else:
print("Acceptance: {}, rate : {}".format(accepted,float(accepted)/i))
alpha_ = alpha_[:i]
S14_ = S14_[:i]
#integrate out uncertainty unsing MC integration
logS_int = np.zeros([len(alpha_),len(nuModel)],dtype=np.double)
logP14_int = np.zeros(len(alpha_),dtype=np.double)
i = 0
while i < len(alpha_):
logS_int[i,:] = np.log(g(alpha_[i],S14_[i],nuModel))
logP14_int[i] = np.log(P(1400e6,np.random.normal(loc=z,scale=dz),alpha_[i],S14_[i]/1e3))
i += 1
logS_mu = np.mean(logS_int,axis=0)
logS_std = np.sqrt(np.mean(logS_int**2,axis=0) - logS_mu**2)
logP14_mu = np.mean(logP14_int)
logP14_std = np.sqrt(np.mean(logP14_int**2) - logP14_mu**2)
S_post_mu = np.exp(logS_mu)
S_post_up = np.exp(logS_mu + logS_std) - S_post_mu
S_post_low = S_post_mu - np.exp(logS_mu - logS_std)
P14_post_mu = np.exp(logP14_mu)
P14_post_up =
|
np.exp(logP14_mu + logP14_std)
|
numpy.exp
|
"""
[Adapted and refactored from Firedrake test suite by <NAME>]
[Not working! Need to be update replacing deprecated C expressions]
This demo program solves Helmholtz's equation
- div D(u) grad u(x, y) + kappa u(x,y) = f(x, y)
with
D(u) = 1 + alpha * u**2
alpha = 0.1
kappa = 1
on the unit square with source f given by
f(x, y) = -8*pi^2*alpha*cos(2*pi*x)*cos(2*pi*y)^3*sin(2*pi*x)^2
- 8*pi^2*alpha*cos(2*pi*x)^3*cos(2*pi*y)*sin(2*pi*y)^2
+ 8*pi^2*(alpha*cos(2*pi*x)^2*cos(2*pi*y)^2 + 1)
*cos(2*pi*x)*cos(2*pi*y)
+ kappa*cos(2*pi*x)*cos(2*pi*y)
and the analytical solution
u(x, y) = cos(x*2*pi)*cos(y*2*pi)
"""
from firedrake import *
def create_mesh_and_function_space(numel_x, numel_y, degree=1, quadrilateral=False):
# Create mesh and define function space
mesh = UnitSquareMesh(numel_x, numel_y, quadrilateral=quadrilateral)
V = FunctionSpace(mesh, "CG", degree)
return mesh, V
def helmholtz(
V,
kappa,
alpha,
parameters={},
source=Constant(0.0)
):
# Define variational problem
u = Function(V)
v = TestFunction(V)
f = Function(V)
D = 1 + alpha * u * u
f.project(source)
a = (dot(grad(v), D * grad(u)) + kappa * v * u) * dx
L = f * v * dx
# Stiffness matrix assembling
A = assemble(a, mat_type='aij')
# Printing the stiffness matrix entries and plotting
A_entries = A.M.values
plt.spy(A_entries)
plt.show()
solve(a - L == 0, u, solver_parameters=parameters)
return u
def plot_result(u):
try:
import matplotlib.pyplot as plt
from matplotlib import rc
plt.rc('text', usetex=True)
plt.rc('font', size=14)
# Setting up the figure object
plt.figure(dpi=300, figsize=(8, 6))
plot(u)
plt.show()
return True
except Exception as exception:
print(exception)
return False
def compute_errors(u, u_exact, space):
f = Function(space)
f.interpolate(u_exact)
return sqrt(assemble(dot(u - f, u - f) * dx))
def run_convergence_test(
u_exact,
kappa,
alpha,
source,
degree=1,
exponent_min=4,
exponent_max=8,
quadrilateral=False,
parameters={}
):
import numpy as np
from scipy.stats import linregress
errors = np.array([])
mesh_size = np.array([])
for exponent in range(exponent_min, exponent_max):
mesh, V = create_mesh_and_function_space(2.0 ** exponent, 2.0 ** exponent, degree=degree, quadrilateral=quadrilateral)
u_h = helmholtz(V, kappa, alpha, source=source, parameters=parameters)
mesh_size =
|
np.append(mesh_size, 2 ** exponent)
|
numpy.append
|
import chess
import numpy as np
import time
import os
import random
class TrainingDataGenerator:
"""
Generates MLP/CNN input data from a dictionary of {FEN String:CP Score}
"""
def __init__(self,
score_dict_file,
num_classes=2,
flatten=True,
min_move_num=0,
max_move_num=500):
np.random.seed(42)
if not str(score_dict_file).endswith('.npy'):
score_dict_file += '.npy'
self.score_dict = self.import_score_dict(score_dict_file)
self.num_classes = num_classes
self.flatten = flatten
self.min_move_num = min_move_num
self.max_move_num = max_move_num
self.SquareSet = chess.SquareSet(
chess.BB_A1 | chess.BB_A2 | chess.BB_A3 | chess.BB_A4 | chess.BB_A5 |
chess.BB_A6 | chess.BB_A7 | chess.BB_A8 |
chess.BB_B1 | chess.BB_B2 | chess.BB_B3 | chess.BB_B4 | chess.BB_B5 |
chess.BB_B6 | chess.BB_B7 | chess.BB_B8 |
chess.BB_C1 | chess.BB_C2 | chess.BB_C3 | chess.BB_C4 | chess.BB_C5 |
chess.BB_C6 | chess.BB_C7 | chess.BB_C8 |
chess.BB_D1 | chess.BB_D2 | chess.BB_D3 | chess.BB_D4 | chess.BB_D5 |
chess.BB_D6 | chess.BB_D7 | chess.BB_D8 |
chess.BB_E1 | chess.BB_E2 | chess.BB_E3 | chess.BB_E4 | chess.BB_E5 |
chess.BB_E6 | chess.BB_E7 | chess.BB_E8 |
chess.BB_F1 | chess.BB_F2 | chess.BB_F3 | chess.BB_F4 | chess.BB_F5 |
chess.BB_F6 | chess.BB_F7 | chess.BB_F8 |
chess.BB_G1 | chess.BB_G2 | chess.BB_G3 | chess.BB_G4 | chess.BB_G5 |
chess.BB_G6 | chess.BB_G7 | chess.BB_G8 |
chess.BB_H1 | chess.BB_H2 | chess.BB_H3 | chess.BB_H4 | chess.BB_H5 |
chess.BB_H6 | chess.BB_H7 | chess.BB_H8
)
self.ImportantSquareSet = chess.SquareSet(
chess.BB_D4 | chess.BB_D5 |
chess.BB_C4 | chess.BB_C5 |
chess.BB_E4 | chess.BB_E5 |
chess.BB_F2 | chess.BB_F7 |
chess.BB_H2 | chess.BB_H7
)
def import_score_dict(self, file):
score_dict = {}
# Attempt to load already processed boards
if os.path.isfile(file):
score_dict = np.load(file).item()
else:
print('No hash table found.')
return score_dict
def parse_FEN_dict(self, num_boards=-1):
boards = []
scores = []
start = time.time()
print('Generating training data for ' + str(len(self.score_dict)) + " boards.")
items = list(self.score_dict.items())
random.shuffle(items)
counter = 0
# MLP:
if self.flatten:
for FEN, score in items:
if 0 < num_boards < counter:
break
board, parsed_score, turn = self.parse_FEN(FEN, score)
if board is not None and parsed_score is not None and turn is not None:
boards.append(board)
if self.num_classes == 2:
scores.append(parsed_score)
else:
scores.append(self.score_to_ternary(score))
counter += 1
# CNN:
else:
for FEN, score in items:
if 0 < num_boards < counter:
break
board, parsed_score, turn = self.parse_FEN_3D(FEN, score)
if board is not None and score is not None:
boards.append(board)
if self.num_classes == 2:
scores.append(parsed_score)
else:
scores.append(self.score_to_ternary(score))
counter += 1
end = time.time()
boards = np.asarray(boards)
scores = np.asarray(scores)
print('Elapsed time: ' + str(end - start))
return boards, scores
# FEN to MLP input
def parse_FEN(self, FEN, score):
board = chess.Board(FEN)
turn = board.turn
if board.fullmove_number < self.min_move_num \
or board.fullmove_number > self.max_move_num:
return None, None, None
# Mirror board on blacks turn, and negate score
# if not board.turn:
# board = board.mirror()
# score = score * -1
if score >= 0:
binary_score = 1
else:
binary_score = 0
w_pawn = np.asarray(board.pieces(chess.PAWN, chess.WHITE).tolist()).astype(int)
w_rook = np.asarray(board.pieces(chess.ROOK, chess.WHITE).tolist()).astype(int)
w_knight = np.asarray(board.pieces(chess.KNIGHT, chess.WHITE).tolist()).astype(int)
w_bishop = np.asarray(board.pieces(chess.BISHOP, chess.WHITE).tolist()).astype(int)
w_queen = np.asarray(board.pieces(chess.QUEEN, chess.WHITE).tolist()).astype(int)
w_king = np.asarray(board.pieces(chess.KING, chess.WHITE).tolist()).astype(int)
b_pawn = (np.asarray(board.pieces(chess.PAWN, chess.BLACK).tolist()) * -1).astype(int)
b_rook = (np.asarray(board.pieces(chess.ROOK, chess.BLACK).tolist()) * -1).astype(int)
b_knight = (np.asarray(board.pieces(chess.KNIGHT, chess.BLACK).tolist()) * -1).astype(int)
b_bishop = (np.asarray(board.pieces(chess.BISHOP, chess.BLACK).tolist()) * -1).astype(int)
b_queen = (np.asarray(board.pieces(chess.QUEEN, chess.BLACK).tolist()) * -1).astype(int)
b_king = (np.asarray(board.pieces(chess.KING, chess.BLACK).tolist()) * -1).astype(int)
# White/Black check, or no check
if board.is_check() and board.turn is True:
white_checked = 1
black_checked = 0
elif board.is_check() and board.turn is False:
white_checked = 0
black_checked = 1
else:
white_checked = 0
black_checked = 0
# [turn, white check, black check] bits
turn_check_bits = np.asarray([turn, white_checked, black_checked], dtype=int)
binary_board = np.concatenate((w_pawn, w_rook, w_knight, w_bishop, w_queen, w_king,
b_pawn, b_rook, b_knight, b_bishop, b_queen, b_king,
turn_check_bits))
return binary_board, binary_score, turn
# FEN to CNN input
def parse_FEN_3D(self, FEN, score):
board = chess.Board(FEN)
turn = board.turn
if board.fullmove_number < self.min_move_num \
or board.fullmove_number > self.max_move_num:
return None, None
# Mirror board on blacks turn, and negate score
# if not board.turn:
# board = board.mirror()
# score = score * -1
if score >= 0:
score = 1
else:
score = 0
w_pawn = np.reshape(board.pieces(chess.PAWN, chess.WHITE).tolist(), (-1, 8)).astype(int)
w_rook = np.reshape(board.pieces(chess.ROOK, chess.WHITE).tolist(), (-1, 8)).astype(int)
w_knight = np.reshape(board.pieces(chess.KNIGHT, chess.WHITE).tolist(), (-1, 8)).astype(int)
w_bishop = np.reshape(board.pieces(chess.BISHOP, chess.WHITE).tolist(), (-1, 8)).astype(int)
w_queen = np.reshape(board.pieces(chess.QUEEN, chess.WHITE).tolist(), (-1, 8)).astype(int)
w_king = np.reshape(board.pieces(chess.KING, chess.WHITE).tolist(), (-1, 8)).astype(int)
b_pawn = (np.reshape(board.pieces(chess.PAWN, chess.BLACK).tolist(), (-1, 8)) * -1).astype(int)
b_rook = (np.reshape(board.pieces(chess.ROOK, chess.BLACK).tolist(), (-1, 8)) * -1).astype(int)
b_knight = (np.reshape(board.pieces(chess.KNIGHT, chess.BLACK).tolist(), (-1, 8)) * -1).astype(int)
b_bishop = (np.reshape(board.pieces(chess.BISHOP, chess.BLACK).tolist(), (-1, 8)) * -1).astype(int)
b_queen = (np.reshape(board.pieces(chess.QUEEN, chess.BLACK).tolist(), (-1, 8)) * -1).astype(int)
b_king = (np.reshape(board.pieces(chess.KING, chess.BLACK).tolist(), (-1, 8)) * -1).astype(int)
checked_info = []
if board.turn is True:
turn = [1] * 64
else:
turn = [0] * 64
if board.is_check() and board.turn is True:
checked_info = [-1] * 64
elif board.is_check() and board.turn is False:
checked_info = [1] * 64
elif not board.is_check():
checked_info = [0] * 64
square_attackers = []
pinned_squares = []
important_attackers_features = []
for square in self.SquareSet:
if board.is_attacked_by(chess.WHITE, square):
square_attackers.append(1)
elif board.is_attacked_by(chess.BLACK, square):
square_attackers.append(-1)
else:
square_attackers.append(0)
if board.is_pinned(chess.WHITE, square):
pinned_squares.append(1)
elif board.is_pinned(chess.BLACK, square):
pinned_squares.append(-1)
else:
pinned_squares.append(0)
for ImportantSquare in self.ImportantSquareSet:
WhiteAttackers = board.attackers(chess.WHITE, ImportantSquare)
BlackAttackers = board.attackers(chess.BLACK, ImportantSquare)
if len(WhiteAttackers) > len(BlackAttackers):
important_attackers_features = [1] * 64
elif len(WhiteAttackers) < len(BlackAttackers):
important_attackers_features = [-1] * 64
else:
important_attackers_features = [0] * 64
turn = np.asarray(turn)
checked_info = np.asarray(checked_info)
square_attackers = np.asarray(square_attackers)
pinned_squares = np.asarray(pinned_squares)
important_attackers_features = np.asarray(important_attackers_features)
turn =
|
np.reshape(turn, (-1, 8))
|
numpy.reshape
|
# Author: <NAME>
###############################################
# Quantum State
###############################################
import numpy as np
from numba import njit
from const import U0, L, M, H, G
import mathieu_functions as mf
from scipy.integrate import odeint
from scipy.interpolate import UnivariateSpline
def energy(val: float):
return (H ** 2 / (8 * M * L ** 2)) * val + U0
@njit
def energy_numba(val: float):
return (H ** 2 / (8 * M * L ** 2)) * val + U0
def energy_crit(vals: list):
# for indx, val in enumerate(vals):
# if energy(val) > 2 * U0:
# n_crit = indx - 1
# break
# return n_crit, energy(vals[n_crit])
n_crit = np.argmax(energy(vals) > 2 * U0) - 1
return n_crit, energy(vals[n_crit])
# Time evolution operator
def time_opr(val: float, t: float):
return np.exp(1j * energy(val) * t / H)
# Gaussian coeficients that describe the eigen states distribution
# for creating the state of the quantum pendulum
def gauss_coeff(nbar: int, n: int, sigma: float):
# return ((-1) ** np.random.randint(2)) * np.exp(-((n - nbar) ** 2) / (2 * sigma))
return np.exp(-((n - nbar) ** 2) / (2 * sigma ** 2))
@njit
def gauss_coeff_numba(nbar: int, n: int, sigma: float):
return np.exp(-((n - nbar) ** 2) / (2 * sigma ** 2))
# Normalization factor for the quantum state
def norm(nbar: int, n_max: int, sigma: float):
# summation = np.zeros(n_max)
# for i in range(len(summation)):
# summation[i] = gauss_coeff(nbar, i, sigma)
# return 1 / np.sqrt(np.sum(summation ** 2))
summation = np.array([gauss_coeff(nbar, i, sigma) for i in range(n_max)])
return 1 / np.sqrt(np.sum(summation ** 2))
@njit
def norm_numba(nbar: int, n_max: int, sigma: float):
# sum = 0
# for i in range(n_max):
# sum += gauss_coeff_numba(nbar, i, sigma) ** 2
# return 1 / np.sqrt(sum)
summation = np.zeros(n_max)
for i in range(n_max):
summation[i] = gauss_coeff_numba(nbar, i, sigma)
return 1 / np.sqrt(np.sum(summation ** 2))
# Eigen states selection function.
def eigen_state(n: int, x: np.ndarray, vects: np.ndarray):
norm_factor = 1 / np.sqrt(np.pi)
# Select Ce or Se depending on n.
if n % 2 == 0:
# Select a_i vectors
return mf.ce_even(int(n / 2), x, vects[0::2]) * norm_factor
else:
# Select b_i vetors
return mf.se_even(int((n - 1) / 2), x, vects[1::2]) * norm_factor
@njit
def eigen_state_numba(n: int, x: list, vects: list):
norm_factor = 1 / np.sqrt(np.pi)
# Adjust n for ce & se, respectively
if n % 2 == 0:
# Select a_i vectors
return mf.ce_even_numba(int(n / 2), x, vects[0::2]) * norm_factor
else:
# Select b_i vetors
return mf.se_even_numba(int((n - 1) / 2), x, vects[1::2]) * norm_factor
# Quantum state
def state(nbar: int, sigma: float, x: np.ndarray, vects: np.ndarray):
# n_max = len(vects)
# summation = [gauss_coeff(nbar, i, sigma) * eigen_state(i, x, vects) for i in range(n_max)]
# return norm(nbar, n_max, sigma) * np.sum(summation, axis=0)
n_max = len(vects)
summation = np.zeros((n_max, len(x)))
for i in range(n_max):
summation[i] = gauss_coeff(nbar, i, sigma) * eigen_state(i, x, vects)
return norm(nbar, n_max, sigma) *
|
np.sum(summation, axis=0)
|
numpy.sum
|
"""
PROJECT: POLARIZATION OF THE CMB BY FOREGROUNDS
"""
import numpy as np
import itertools
from math import atan2, pi, acos
from matplotlib import pyplot as plt
from matplotlib import ticker
import cmfg
from Parser import Parser
from sys import argv
import healpy as hp
from scipy.spatial.transform import Rotation as R
from sklearn.neighbors import NearestNeighbors
import pickle
import multiprocessing
from joblib import Parallel, delayed
import cmfg
import tqdm
import time
start_time = time.time()
# LOAD config
if len(argv) > 1:
config = Parser(argv[1])
else:
config = Parser()
X = cmfg.profile2d(config)
X.load_centers()
X.select_subsample_centers()
# CMB MAP
nside = int(config['cmb']['filedata_cmb_nside'])
filedata = (f'{config.filenames.datadir_cmb}'
f'{config.filenames.filedata_cmb_mapa}')
T = hp.read_map(filedata, field=0, h=False, dtype=float)
Q = hp.read_map(filedata, field=1, h=False, dtype=float)
U = hp.read_map(filedata, field=2, h=False, dtype=float)
mask = hp.read_map(filedata, field=4, h=False, dtype=float)
# main computations
def f(x, N, rmax, rmax_deg, xr, yr, idxs, G):
"""
Funcion que calcula la contribucion de un centro
a los mapas de temperatura y polarización.
"""
center = x[1]
neigh = NearestNeighbors(n_neighbors=3, radius=0.01)
Zt = np.zeros((N,N))
Zq = np.zeros((N,N))
Zu = np.zeros((N,N))
Zqr = np.zeros((N,N))
Zur = np.zeros((N,N))
Msk = np.zeros((N,N), dtype=int)
# compute rotation matrix
phi = float(center.phi)
theta = float(center.theta)
pa = float(center.pa)
vector = hp.ang2vec(theta, phi)
rotate_pa = R.from_euler('zyz', [-phi, -theta, pa])
listpixs = hp.query_disc(nside, vector, rmax.value,
inclusive=False, fact=4, nest=False)
dists, thetas, tt, qq, uu, qr, mm = [], [], [], [], [], [], []
for ipix in listpixs:
v = hp.pix2vec(nside, ipix)
w = rotate_pa.apply(v)
dist = hp.rotator.angdist(w, [0, 0, 1])
theta = atan2(w[1], w[0])
if theta < 0:
theta = theta + 2*pi
# check mask
if mask[ipix]:
dists.append(dist[0])
thetas.append(theta)
tt.append(T[ipix])
qq.append(Q[ipix])
uu.append(U[ipix])
mm.append(mask[ipix])
thetas = np.array(thetas)
dists = np.array(dists)
tt = np.array(tt)
qq = np.array(qq)
uu = np.array(uu)
mm = np.array(mm)
# calcular el ángulo psi
psi2 = 2*thetas
qr = -qq*np.cos(psi2) - uu*np.sin(psi2)
ur = qq*np.sin(psi2) - uu*np.cos(psi2)
x = dists*np.cos(thetas)*180/pi
y = dists*np.sin(thetas)*180/pi
neigh.fit(np.column_stack([x, y]))
for i, ix in zip(idxs, G):
rr = np.linalg.norm(ix)
if rr < rmax_deg.value:
dist, ind = neigh.kneighbors([ix], 3, return_distance=True)
dd = np.exp(-dist*25)
dsts = dd.sum()
if mm[ind].astype(int).sum()==3:
val = np.dot(dd, tt[ind][0])/dsts
Zt[i[0], i[1]] = Zt[i[0], i[1]] + val
val = np.dot(dd, qq[ind][0])/dsts
Zq[i[0], i[1]] = Zq[i[0], i[1]] + val
val =
|
np.dot(dd, uu[ind][0])
|
numpy.dot
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.