prompt
stringlengths 15
655k
| completion
stringlengths 3
32.4k
| api
stringlengths 8
52
|
---|---|---|
from .base import Transformer
import pandas as pd
import numpy as np
import os
ISO_COUNTRY_CODES = os.path.join(os.path.dirname(__file__), 'countrycodes.csv')
class SystemicPeaceTransformer(Transformer):
""" Data source specific transformers """
def __init__(self, source, target):
super().__init__(source, target)
self.iso = pd.read_csv(ISO_COUNTRY_CODES,
usecols=[0, 2],
names=['name', 'iso3'],
header=0)
def read(self):
try:
self.forc_disp_pop_df = pd.read_excel(self.source[0],
usecols="C:G")
self.pol_viol_df = pd.read_excel(self.source[1],
usecols="C:L")
self.state_fail_df = pd.read_excel(self.source[2],
usecols="A:L")
self.ethnic_war_df = pd.read_excel(self.source[3],
usecols="A:L")
self.revolu_war_df = pd.read_excel(self.source[4],
usecols="A:L")
self.genocide_df = pd.read_excel(self.source[5],
usecols="A:L")
except FileNotFoundError as exc:
raise ValueError("Source file {} not found.".format(self.source)) \
from exc
def write(self):
self.df.to_csv(self.target, mode='w', index=False)
def transform(self):
# self.transform_forcibly_displaced_populations()
self.transform_political_violence()
self.transform_state_failure()
self.transform_ethnic_war()
self.transform_revolu_war()
self.transform_genocide()
self.transform_country_code()
def __repr__(self):
return "<SystemicPeaceTransformer data for {}-{} ({} rows)>".format(self.df['year'].min(),
self.df['year'].max(),
len(self.df))
"""
def transform_forcibly_displaced_populations(self):
refg_orig_df = self.forc_disp_pop_df[["country", "year", "source"]]
refg_orig_df.columns.values[2] = 'value'
idp_df = self.forc_disp_pop_df[["country", "year", "idp"]]
idp_df.columns.values[2] = 'value'
refg_host_df = self.forc_disp_pop_df[["country", "year", "host"]]
refg_host_df.columns.values[2] = 'value'
refg_orig_df.loc[:, "Indicator Code"] = "SP.FDP.REFG.ORIG"
refg_orig_df.loc[:, "Indicator Name"] = "Number of Refugees (x1000) origination from country"
idp_df.loc[:, "Indicator Code"] = "SP.FDP.IDP"
idp_df.loc[:, "Indicator Name"] = "Number of internally displaced persons (x1000)"
refg_host_df.loc[:, "Indicator Code"] = "SP.FDP.REFG.HOST"
refg_host_df.loc[:, "Indicator Name"] = "Number of Refugees (x1000) hosted by the country"
self.forc_disp_pop_df = refg_orig_df.append(idp_df, sort="True").append(refg_host_df, sort="True")
self.forc_disp_pop_df = self.forc_disp_pop_df.dropna(how='any', axis=0)
self.df = self.forc_disp_pop_df
"""
def transform_political_violence(self):
indp_df = self.pol_viol_df[["country", "year", "intind"]]
indp_df.columns.values[2] = 'value'
int_viol_df = self.pol_viol_df[["country", "year", "intviol"]]
int_viol_df.columns.values[2] = 'value'
int_war_df = self.pol_viol_df[["country", "year", "intwar"]]
int_war_df.columns.values[2] = 'value'
civ_viol_df = self.pol_viol_df[["country", "year", "civviol"]]
civ_viol_df.columns.values[2] = 'value'
civ_war_df = self.pol_viol_df[["country", "year", "civwar"]]
civ_war_df.columns.values[2] = 'value'
eth_viol_df = self.pol_viol_df[["country", "year", "ethviol"]]
eth_viol_df.columns.values[2] = 'value'
eth_war_df = self.pol_viol_df[["country", "year", "ethwar"]]
eth_war_df.columns.values[2] = 'value'
indp_df.loc[:, "Indicator Code"] = "SP.PV.INDP"
indp_df.loc[:, "Indicator Name"] = "Magnitude score of episode of warfare episode. Scale: 1 (lowest) to 10 (highest)"
int_viol_df.loc[:, "Indicator Code"] = "SP.PV.INT.VIOL"
int_viol_df.loc[:, "Indicator Name"] = "Magnitude score of episode(s) of international violence. Scale: 1 (lowest) to 10 (highest)"
int_war_df.loc[:, "Indicator Code"] = "SP.PV.INT.WAR"
int_war_df.loc[:, "Indicator Name"] = "Magnitude score of episode(s) of international warfare.Scale: 1 (lowest) to 10 (highest)"
civ_viol_df.loc[:, "Indicator Code"] = "SP.PV.CIV.VIOL"
civ_viol_df.loc[:, "Indicator Name"] = "Magnitude score of episode(s) of civil violence. Scale: 1 (lowest) to 10 (highest) "
civ_war_df.loc[:, "Indicator Code"] = "SP.PV.CIV.WAR"
civ_war_df.loc[:, "Indicator Name"] = "Magnitude score of episode(s) of civil warfare. Scale: 1 (lowest) to 10 (highest) "
eth_viol_df.loc[:, "Indicator Code"] = "SP.PV.ETH.VIOL"
eth_viol_df.loc[:, "Indicator Name"] = "Magnitude score of episode(s) of ethnic violence. Scale: 1 (lowest) to 10 (highest) "
eth_war_df.loc[:, "Indicator Code"] = "SP.PV.ETH.WAR"
eth_war_df.loc[:, "Indicator Name"] = "Magnitude score of episode(s) of ethnic warfare. Scale: 1 (lowest) to 10 (highest) "
self.pol_viol_df = indp_df.append(int_viol_df, sort="True")\
.append(int_war_df, sort="True").append(civ_viol_df, sort="True").append(civ_war_df, sort="True").append(eth_viol_df, sort="True").append(eth_war_df, sort="True")
self.pol_viol_df = self.pol_viol_df.dropna(how='any', axis=0)
self.df = self.pol_viol_df
# self.df = self.df.append(self.pol_viol_df, sort="False")
def transform_state_failure(self):
unique_states = self.state_fail_df.drop_duplicates("COUNTRY")[["COUNTRY"]].reset_index(drop = True)
unique_states["key"] = 1
unique_years = self.state_fail_df.drop_duplicates("YEAR")[["YEAR"]].reset_index(drop = True)
unique_years["key"] = 1
states_years_df = pd.merge(unique_states, unique_years, on = "key")
self.state_fail_df = pd.merge(states_years_df, self.state_fail_df, how = "left", on = ["COUNTRY","YEAR"]).fillna(0)
self.state_fail_df["YRON"] = np.where(self.state_fail_df["YRBEGIN"] == 0, 0, self.state_fail_df.YEAR - self.state_fail_df.YRBEGIN + 1)
yr_on_df = self.state_fail_df[["COUNTRY", "YEAR", "YRON"]]
yr_on_df.columns.values[2] = 'value'
mag_fail_df = self.state_fail_df[["COUNTRY", "YEAR", "MAGFAIL"]]
mag_fail_df.columns.values[2] = 'value'
mag_col_df = self.state_fail_df[["COUNTRY", "YEAR", "MAGCOL"]]
mag_col_df.columns.values[2] = 'value'
mag_viol_df = self.state_fail_df[["COUNTRY", "YEAR", "MAGVIOL"]]
mag_viol_df.columns.values[2] = 'value'
yr_on_df.loc[:, "Indicator Code"] = "SP.SF.YR.LENGTH"
yr_on_df.loc[:, "Indicator Name"] = "Length of conflict in years"
mag_fail_df.loc[:, "Indicator Code"] = "SP.SF.MAG.FAIL"
mag_fail_df.loc[:, "Indicator Name"] = "Scaled failure of State authority (range 1-4; 9=missing)"
mag_col_df.loc[:, "Indicator Code"] = "SP.SF.MAG.COLLAPSE"
mag_col_df.loc[:, "Indicator Name"] = "Scaled collapse of democratic institutions (range 1-4; 9=missing)"
mag_viol_df.loc[:, "Indicator Code"] = "SP.SF.MAG.VIOL"
mag_viol_df.loc[:, "Indicator Name"] = "Scaled violence associated with regime transition (range 1-4; 9=missing)"
self.state_fail_df = yr_on_df.append(mag_fail_df, sort="True").append(mag_col_df, sort="True").append(mag_viol_df, sort="True")
self.state_fail_df.rename(columns={'COUNTRY': 'country', 'YEAR': 'year'}, inplace=True)
self.state_fail_df = self.state_fail_df.dropna(how='any', axis=0)
self.df = self.df.append(self.state_fail_df, sort="False")
def transform_ethnic_war(self):
unique_states = self.ethnic_war_df.drop_duplicates("COUNTRY")[["COUNTRY"]].reset_index(drop = True)
unique_states["key"] = 1
unique_years = self.ethnic_war_df.drop_duplicates("YEAR")[["YEAR"]].reset_index(drop = True)
unique_years["key"] = 1
states_years_df = pd.merge(unique_states, unique_years, on = "key")
self.ethnic_war_df = pd.merge(states_years_df, self.ethnic_war_df, how = "left", on = ["COUNTRY","YEAR"]).fillna(0)
self.ethnic_war_df["YRON"] =
|
np.where(self.ethnic_war_df["YRBEGIN"] == 0, 0, self.ethnic_war_df.YEAR - self.ethnic_war_df.YRBEGIN + 1)
|
numpy.where
|
import numpy
import pytest
import scipy.linalg
from numpy.testing import (
assert_almost_equal,
assert_array_almost_equal,
assert_array_equal,
assert_equal,
)
import krypy
def get_matrix_spd():
a = numpy.linspace(1, 2, 10)
a[-1] = 1e-2
return numpy.diag(a)
def get_matrix_hpd():
a = numpy.array(numpy.linspace(1, 2, 10), dtype=numpy.complex)
a[0] = 5
a[-1] = 1e-1
A = numpy.diag(a)
A[-1, 0] = 1e-1j
A[0, -1] = -1e-1j
return A
def get_matrix_symm_indef():
a = numpy.linspace(1, 2, 10)
a[-1] = -1
return numpy.diag(a)
def get_matrix_herm_indef():
a = numpy.array(numpy.linspace(1, 2, 10), dtype=numpy.complex)
a[-1] = 1e-3
A = numpy.diag(a)
A[-1, 0] = 10j
A[0, -1] = -10j
return A
def get_matrix_nonsymm():
a = numpy.array(range(1, 11), dtype=numpy.float)
a[-1] = -1e1
A = numpy.diag(a)
A[0, -1] = 1e1
return A
def get_matrix_comp_nonsymm():
a = numpy.array(range(1, 11), dtype=numpy.complex)
a[-1] = -1e1
A = numpy.diag(a)
A[0, -1] = 1.0e1j
return A
def get_matrices(
spd=True,
hpd=True,
symm_indef=True,
herm_indef=True,
nonsymm=True,
comp_nonsymm=True,
):
matrices = []
if spd:
matrices.append(get_matrix_spd())
if hpd:
matrices.append(get_matrix_hpd())
if symm_indef:
matrices.append(get_matrix_symm_indef())
if herm_indef:
matrices.append(get_matrix_herm_indef())
if nonsymm:
matrices.append(get_matrix_nonsymm())
if comp_nonsymm:
matrices.append(get_matrix_comp_nonsymm())
return matrices
def get_ip_Bs():
B = numpy.diag(numpy.linspace(1, 5, 10))
return [
None,
krypy.utils.MatrixLinearOperator(B),
lambda x, y: numpy.dot(x.T.conj(), numpy.dot(B, y)),
]
def get_operators(A):
return [A, krypy.utils.MatrixLinearOperator(A)]
def get_vecs(v):
return [v, numpy.reshape(v, (v.shape[0],))]
_factors = [0.0, 1.0, 1.0j, 1.0 + 1.0j, 1e8, 1e-8]
@pytest.mark.parametrize("a", _factors)
@pytest.mark.parametrize("b", _factors)
@pytest.mark.parametrize("length", [10, 1])
def test_house(a, b, length):
x = numpy.ones((length, 1), dtype=numpy.array([a]).dtype) * b
x[0] = a
H = krypy.utils.House(x)
y = H.apply(x)
I = numpy.eye(len(x))
# check that H.matrix() equals to H.apply(I)
HI = H.apply(I)
Hm = H.matrix()
assert numpy.linalg.norm(HI - Hm, 2) <= 1e-14
# check that H.matrix() is Hermitian
assert numpy.linalg.norm(Hm - Hm.T.conj(), 2) <= 1e-14
# check that H.matrix() is unitary/involutory
assert numpy.linalg.norm(I - numpy.dot(Hm.T.conj(), Hm), 2) <= 1e-14
# check that absolute value of y[0] equals norm(x)
assert numpy.abs(
numpy.linalg.norm(x, 2) - numpy.abs(y[0])
) <= 1e-14 * numpy.linalg.norm(x, 2)
# check that abs(alpha)=1
assert numpy.abs(1 - numpy.abs(H.alpha)) <= 1e-14
# check that y[0] = alpha*norm(x)
assert numpy.abs(y[0] - H.alpha * H.xnorm) <= 1e-14 * numpy.linalg.norm(x, 2)
if y.shape[0] > 1:
# check that all elements of r except the first are zero
assert numpy.linalg.norm(y[1:], 2) <= 1e-14 * numpy.linalg.norm(x, 2)
@pytest.mark.parametrize("a", _factors)
@pytest.mark.parametrize("b", _factors)
def test_givens(a, b):
x = numpy.array([[a], [b]])
G = krypy.utils.Givens(x)
y = G.apply(x)
I = numpy.eye(2)
# check that G.G is unitary
assert numpy.linalg.norm(I - numpy.dot(G.G.T.conj(), G.G), 2) <= 1e-14
# check that absolute value of y[0] equals norm(x)
assert numpy.abs(
numpy.linalg.norm(x, 2) - numpy.abs(y[0])
) <= 1e-14 * numpy.linalg.norm(x, 2)
# check that y[0] == 0
assert numpy.linalg.norm(y[1], 2) <= 1e-14 * numpy.linalg.norm(x, 2)
@pytest.mark.parametrize(
"X",
[
numpy.eye(10, 1),
numpy.eye(10, 5),
|
numpy.eye(10, 5)
|
numpy.eye
|
import numpy as np
import pandas as pd
import statsmodels.api as sm
from collections import namedtuple
from statsmodels.tsa.arima_process import arma_generate_sample
from statsmodels.tsa.arima.model import ARIMA
from scipy.linalg import toeplitz
ModelWithResults = namedtuple("ModelWithResults", ["model", "alg", "inference_dataframe"])
"""
Fixtures for a number of models available in statsmodels
https://www.statsmodels.org/dev/api.html
"""
def ols_model(**kwargs):
# Ordinary Least Squares (OLS)
np.random.seed(9876789)
nsamples = 100
x = np.linspace(0, 10, 100)
X = np.column_stack((x, x ** 2))
beta = np.array([1, 0.1, 10])
e = np.random.normal(size=nsamples)
X = sm.add_constant(X)
y = np.dot(X, beta) + e
ols = sm.OLS(y, X)
model = ols.fit(**kwargs)
return ModelWithResults(model=model, alg=ols, inference_dataframe=X)
def failing_logit_model():
X = pd.DataFrame(
{
"x0": np.array([2.0, 3.0, 1.0, 2.0, 20.0, 30.0, 10.0, 20.0]),
"x1": np.array([2.0, 3.0, 1.0, 2.0, 20.0, 30.0, 10.0, 20.0]),
},
columns=["x0", "x1"],
)
y = np.array([0, 0, 0, 0, 1, 1, 1, 1])
# building the model and fitting the data
log_reg = sm.Logit(y, X)
model = log_reg.fit()
return ModelWithResults(model=model, alg=log_reg, inference_dataframe=X)
def get_dataset(name):
dataset_module = getattr(sm.datasets, name)
data = dataset_module.load()
data.exog = np.asarray(data.exog)
data.endog = np.asarray(data.endog)
return data
def gls_model():
# Generalized Least Squares (GLS)
data = get_dataset("longley")
data.exog = sm.add_constant(data.exog)
ols_resid = sm.OLS(data.endog, data.exog).fit().resid
res_fit = sm.OLS(ols_resid[1:], ols_resid[:-1]).fit()
rho = res_fit.params
order = toeplitz(np.arange(16))
sigma = rho ** order
gls = sm.GLS(data.endog, data.exog, sigma=sigma)
model = gls.fit()
return ModelWithResults(model=model, alg=gls, inference_dataframe=data.exog)
def glsar_model():
# Generalized Least Squares with AR covariance structure
X = range(1, 8)
X = sm.add_constant(X)
Y = [1, 3, 4, 5, 8, 10, 9]
glsar = sm.GLSAR(Y, X, rho=2)
model = glsar.fit()
return ModelWithResults(model=model, alg=glsar, inference_dataframe=X)
def wls_model():
# Weighted Least Squares
Y = [1, 3, 4, 5, 2, 3, 4]
X = range(1, 8)
X = sm.add_constant(X)
wls = sm.WLS(Y, X, weights=list(range(1, 8)))
model = wls.fit()
return ModelWithResults(model=model, alg=wls, inference_dataframe=X)
def recursivels_model():
# Recursive Least Squares
dta = sm.datasets.copper.load_pandas().data
dta.index = pd.date_range("1951-01-01", "1975-01-01", freq="AS")
endog = dta.WORLDCONSUMPTION
# To the regressors in the dataset, we add a column of ones for an intercept
exog = sm.add_constant(
dta[["COPPERPRICE", "INCOMEINDEX", "ALUMPRICE", "INVENTORYINDEX"]] # pylint: disable=E1136
)
rls = sm.RecursiveLS(endog, exog)
model = rls.fit()
inference_dataframe = pd.DataFrame([["1951-01-01", "1975-01-01"]], columns=["start", "end"])
return ModelWithResults(model=model, alg=rls, inference_dataframe=inference_dataframe)
def rolling_ols_model():
# Rolling Ordinary Least Squares (Rolling OLS)
from statsmodels.regression.rolling import RollingOLS
data = get_dataset("longley")
exog = sm.add_constant(data.exog, prepend=False)
rolling_ols = RollingOLS(data.endog, exog)
model = rolling_ols.fit(reset=50)
return ModelWithResults(model=model, alg=rolling_ols, inference_dataframe=exog)
def rolling_wls_model():
# Rolling Weighted Least Squares (Rolling WLS)
from statsmodels.regression.rolling import RollingWLS
data = get_dataset("longley")
exog = sm.add_constant(data.exog, prepend=False)
rolling_wls = RollingWLS(data.endog, exog)
model = rolling_wls.fit(reset=50)
return ModelWithResults(model=model, alg=rolling_wls, inference_dataframe=exog)
def gee_model():
# Example taken from
# https://www.statsmodels.org/devel/examples/notebooks/generated/gee_nested_simulation.html
np.random.seed(9876789)
p = 5
groups_var = 1
level1_var = 2
level2_var = 3
resid_var = 4
n_groups = 100
group_size = 20
level1_size = 10
level2_size = 5
n = n_groups * group_size * level1_size * level2_size
xmat = np.random.normal(size=(n, p))
# Construct labels showing which group each observation belongs to at each level.
groups_ix = np.kron(np.arange(n // group_size), np.ones(group_size)).astype(np.int)
level1_ix = np.kron(np.arange(n // level1_size), np.ones(level1_size)).astype(np.int)
level2_ix = np.kron(np.arange(n // level2_size), np.ones(level2_size)).astype(np.int)
# Simulate the random effects.
groups_re = np.sqrt(groups_var) * np.random.normal(size=n // group_size)
level1_re = np.sqrt(level1_var) * np.random.normal(size=n // level1_size)
level2_re = np.sqrt(level2_var) * np.random.normal(size=n // level2_size)
# Simulate the response variable
y = groups_re[groups_ix] + level1_re[level1_ix] + level2_re[level2_ix]
y += np.sqrt(resid_var) * np.random.normal(size=n)
# Put everything into a dataframe.
df = pd.DataFrame(xmat, columns=["x%d" % j for j in range(p)])
df["y"] = y + xmat[:, 0] - xmat[:, 3]
df["groups_ix"] = groups_ix
df["level1_ix"] = level1_ix
df["level2_ix"] = level2_ix
# Fit the model
cs = sm.cov_struct.Nested()
dep_fml = "0 + level1_ix + level2_ix"
gee = sm.GEE.from_formula(
"y ~ x0 + x1 + x2 + x3 + x4", cov_struct=cs, dep_data=dep_fml, groups="groups_ix", data=df
)
model = gee.fit()
return ModelWithResults(model=model, alg=gee, inference_dataframe=df)
def glm_model():
# Generalized Linear Model (GLM)
data = get_dataset("scotland")
data.exog = sm.add_constant(data.exog)
glm = sm.GLM(data.endog, data.exog, family=sm.families.Gamma())
model = glm.fit()
return ModelWithResults(model=model, alg=glm, inference_dataframe=data.exog)
def glmgam_model():
# Generalized Additive Model (GAM)
from statsmodels.gam.tests.test_penalized import df_autos
x_spline = df_autos[["weight", "hp"]]
bs = sm.gam.BSplines(x_spline, df=[12, 10], degree=[3, 3])
alpha = np.array([21833888.8, 6460.38479])
gam_bs = sm.GLMGam.from_formula(
"city_mpg ~ fuel + drive", data=df_autos, smoother=bs, alpha=alpha
)
model = gam_bs.fit()
return ModelWithResults(model=model, alg=gam_bs, inference_dataframe=df_autos)
def arma_model():
# Autoregressive Moving Average (ARMA)
np.random.seed(12345)
arparams = np.array([1, -0.75, 0.25])
maparams =
|
np.array([1, 0.65, 0.35])
|
numpy.array
|
import pandas as pd
import numpy as np
from collections import defaultdict
from sklearn.metrics import f1_score,accuracy_score
import math
split_sequences=True
word2idx = {}
tag2idx = {}
pos2idx = {}
word_idx = 0
tag_idx = 0
pos_idx = 0
Xtrain = []
Ytrain = []
Ptrain=[]
currentX = []
currentY = []
currentP=[]
for line in open('train1_all.txt',encoding='utf-8'):
line = line.rstrip()
if line:
r = line.split()
word, tag, pos = r
if word not in word2idx:
word2idx[word] = word_idx
word_idx += 1
currentX.append(word2idx[word])
if tag not in tag2idx:
tag2idx[tag] = tag_idx
tag_idx += 1
currentY.append(tag2idx[tag])
if pos not in pos2idx:
pos2idx[pos] = pos_idx
pos_idx += 1
currentP.append(pos2idx[pos])
elif split_sequences:
Xtrain.append(currentX)
Ytrain.append(currentY)
Ptrain.append(currentP)
currentX = []
currentY = []
currentP=[]
if not split_sequences:
Xtrain = currentX
Ytrain = currentY
Ptrain=currentP
V = len(word2idx) + 1
M = max(max(p) for p in Ptrain) + 1
A = np.ones((M, M))
pi = np.ones(M)
for p in Ptrain:
pi[p[0]] += 1
for i in range(len(p)-1):
A[p[i], p[i+1]] += 1
A /= A.sum(axis=1, keepdims=True)
pi /= pi.sum()
# find the observation matrix
B = np.ones((M, V)) # add-one smoothing
for x, p in zip(Xtrain, Ptrain):
for xi, pii in zip(x, p):
B[pii, xi] += 1
B /= B.sum(axis=1, keepdims=True)
class HMM:
def __init__(self, M,A,B,C,C1,pi,SUFF,SUFF1,word2idx):
self.M = M # number of hidden states
self.A=A
self.B=B
self.C=C
self.C1=C1
self.pi=pi
self.SUFF=SUFF
self.SUFF1=SUFF1
self.word2idx=word2idx
def get_state_sequence(self, x):
# returns the most likely state sequence given observed sequence x
# using the Viterbi algorithm
T = len(x)
delta = np.zeros((T, self.M))
psi = np.zeros((T, self.M))
try:
delta[0] = np.log(self.pi) + np.log(self.B[:,x[0]])
except IndexError:
try:
delta[0] = np.log(self.pi) + np.log(self.C[:,SUFF.index([*word2idx][x[0]][:2])])
except IndexError:
delta[0] = np.log(self.pi)
except ValueError:
try:
delta[0] = np.log(self.pi) + np.log(self.C1[:,SUFF1.index([*word2idx][x[0]][:1])])
except ValueError:
delta[0] = np.log(self.pi)
for t in range(1, T):
for j in range(self.M):
try:
delta[t,j] = np.max(delta[t-1] + np.log(self.A[:,j])) + np.log(self.B[j, x[t]])
psi[t,j] = np.argmax(delta[t-1] + np.log(self.A[:,j]))
except IndexError:
try:
delta[t,j] = np.max(delta[t-1] + np.log(self.A[:,j])) + np.log(self.C[j, SUFF.index([*word2idx][x[t]][:2])])
psi[t,j] = np.argmax(delta[t-1] + np.log(self.A[:,j]))
except ValueError:
try:
delta[t,j] = np.max(delta[t-1] + np.log(self.A[:,j])) + np.log(self.C1[j, SUFF1.index([*word2idx][x[t]][:1])])
psi[t,j] = np.argmax(delta[t-1] + np.log(self.A[:,j]))
except ValueError:
delta[t,j] = np.max(delta[t-1] + np.log(self.A[:,j]))
psi[t,j] = np.argmax(delta[t-1] + np.log(self.A[:,j]))
except IndexError:
try:
delta[t,j] = np.max(delta[t-1] + np.log(self.A[:,j])) + np.log(self.C1[j, SUFF1.index([*word2idx][x[t]][:1])])
psi[t,j] = np.argmax(delta[t-1] + np.log(self.A[:,j]))
except IndexError:
delta[t,j] = np.max(delta[t-1] + np.log(self.A[:,j]))
psi[t,j] = np.argmax(delta[t-1] + np.log(self.A[:,j]))
# backtrack
states = np.zeros(T, dtype=np.int32)
states[T-1] = np.argmax(delta[T-1])
for t in range(T-2, -1, -1):
states[t] = psi[t+1, states[t+1]]
return states
SUFF=[]
SUFF1=[]
for w in [*word2idx]:
SUFF.append(w[:2])
SUFF1.append(w[:1])
suff_pos = defaultdict(list)
suff_pos1 = defaultdict(list)
idx=0
for suf in SUFF:
suff_pos[suf].append(idx)
idx+=1
idx=0
for suf in SUFF1:
suff_pos1[suf].append(idx)
idx+=1
C=np.ones((M,V))
C1=np.ones((M,V))
for l in suff_pos.values():
C[:,l]=B[:,l].sum(axis=1, keepdims=True)/len(l)
for l in suff_pos1.values():
C1[:,l]=B[:,l].sum(axis=1, keepdims=True)/len(l)
word_idx = len(word2idx)
w_known=len(word2idx)
word2idx_test={}
Xtest = []
currentX = []
for line in open('test1_all.txt',encoding='utf-8'):
line = line.rstrip()
if line:
r = line.split()
word = r[0]
if word not in word2idx:
word2idx_test[word] = word_idx
word2idx[word]= word_idx
word_idx += 1
else:
word2idx_test[word]=word2idx[word]
currentX.append(word2idx_test[word])
elif split_sequences:
Xtest.append(currentX)
currentX = []
hmm = HMM(M,A,B,C,C1,pi,SUFF,SUFF1,word2idx)
P1test = []
for x in Xtest:
p = hmm.get_state_sequence(x)
P1test.append(p)
Ptest=[]
list1=[]
for line in open('test1_all.txt',encoding='utf-8'):
line = line.rstrip()
if line:
r = line.split()
tag = r[2]
list1.append(pos2idx[tag])
elif split_sequences:
Ptest.append(list1)
list1 = []
Ytest=[]
list1=[]
for line in open('test1_all.txt',encoding='utf-8'):
line = line.rstrip()
if line:
r = line.split()
tag = r[1]
list1.append(tag2idx[tag])
elif split_sequences:
Ytest.append(list1)
list1 = []
def accuracy(T, Y):
# inputs are lists of lists
n_correct = 0
n_total = 0
for t, y in zip(T, Y):
n_correct += np.sum(t == y)
n_total += len(y)
return float(n_correct) / n_total
def accuracy_unknown(T, Y,X):
# inputs are lists of lists
n_correct = 0
n_total = 0
for t, y,x in zip(T, Y,X):
for ti,yi,xi in zip (t,y,x):
if xi>w_known :
n_correct += (ti == yi)
n_total += 1
return float(n_correct) / n_total
def accuracy_known(T, Y,X):
# inputs are lists of lists
n_correct = 0
n_total = 0
for t, y,x in zip(T, Y,X):
for ti,yi,xi in zip (t,y,x):
if xi<=w_known :
n_correct += (ti == yi)
n_total += 1
return float(n_correct) / n_total
def total_f1_score(T, Y):
# inputs are lists of lists
T = np.concatenate(T)
Y = np.concatenate(Y)
return f1_score(T, Y, average=None).mean()
print("test accuracy:", accuracy(P1test, Ptest))
accuracy=accuracy(P1test, Ptest)
print("test f1:", total_f1_score(P1test, Ptest))
f1=total_f1_score(P1test, Ptest)
print("test accuracy for unknown words:",accuracy_unknown(P1test, Ptest,Xtest))
unknown_ac=accuracy_unknown(Ptest, P1test,Xtest)
print("test accuracy for known words:",accuracy_known(P1test, Ptest,Xtest))
known_ac=accuracy_known(Ptest, P1test,Xtest)
Y = np.concatenate(Ytest)
P = np.concatenate(Ptest)
Z = np.concatenate(P1test)
X= np.concatenate(Xtest)
print("accuracy score for tag "+list(tag2idx.keys())[0]+" :", accuracy_score(Z[np.where(Y==0)[0]], P[np.where(Y==0)[0]]))
a11= accuracy_score(Z[np.where(Y==0)[0]], P[np.where(Y==0)[0]])
print("accuracy score for tag "+list(tag2idx.keys())[1]+" :", accuracy_score(Z[np.where(Y==1)[0]], P[np.where(Y==1)[0]]))
a12= accuracy_score(Z[np.where(Y==1)[0]], P[np.where(Y==1)[0]])
print("accuracy score for tag "+list(tag2idx.keys())[2]+" :", accuracy_score(Z[np.where(Y==2)[0]], P[np.where(Y==2)[0]]))
a13=accuracy_score(Z[np.where(Y==2)[0]], P[np.where(Y==2)[0]])
print("accuracy score for tag "+list(tag2idx.keys())[3]+" :", accuracy_score(Z[np.where(Y==3)[0]], P[np.where(Y==3)[0]]))
a14=accuracy_score(Z[np.where(Y==3)[0]], P[np.where(Y==3)[0]])
print("accuracy score for tag "+list(tag2idx.keys())[4]+" :",accuracy_score(Z[np.where(Y==4)[0]], P[np.where(Y==4)[0]]))
a15=accuracy_score(Z[np.where(Y==4)[0]], P[np.where(Y==4)[0]])
print("accuracy score for tag "+list(tag2idx.keys())[5]+" :", accuracy_score(Z[np.where(Y==5)[0]], P[np.where(Y==5)[0]]))
a16=accuracy_score(Z[np.where(Y==5)[0]], P[np.where(Y==5)[0]])
print("accuracy score for tag "+list(tag2idx.keys())[6]+" :", accuracy_score(Z[np.where(Y==6)[0]], P[np.where(Y==6)[0]]))
a17=accuracy_score(Z[np.where(Y==6)[0]], P[np.where(Y==6)[0]])
print("accuracy score for tag "+list(tag2idx.keys())[7]+" :", accuracy_score(Z[np.where(Y==7)[0]], P[np.where(Y==7)[0]]))
a18=accuracy_score(Z[np.where(Y==7)[0]], P[np.where(Y==7)[0]])
print("accuracy score for tag "+list(tag2idx.keys())[8]+" :", accuracy_score(Z[np.where(Y==8)[0]], P[np.where(Y==8)[0]]))
a19= accuracy_score(Z[np.where(Y==8)[0]], P[np.where(Y==8)[0]])
print("accuracy score for tag "+list(tag2idx.keys())[9]+" :", accuracy_score(Z[np.where(Y==9)[0]], P[np.where(Y==9)[0]]))
a110= accuracy_score(Z[np.where(Y==9)[0]], P[np.where(Y==9)[0]])
print("accuracy score for tag "+list(tag2idx.keys())[10]+" :", accuracy_score(Z[np.where(Y==10)[0]], P[np.where(Y==10)[0]]))
a111= accuracy_score(Z[np.where(Y==10)[0]], P[np.where(Y==10)[0]])
print("accuracy score for tag "+list(tag2idx.keys())[11]+" :", accuracy_score(Z[np.where(Y==11)[0]], P[np.where(Y==11)[0]]))
a112= accuracy_score(Z[np.where(Y==11)[0]], P[np.where(Y==11)[0]])
print("accuracy score for tag "+list(tag2idx.keys())[12]+" :", accuracy_score(Z[np.where(Y==12)[0]], P[np.where(Y==12)[0]]))
a113= accuracy_score(Z[np.where(Y==12)[0]], P[np.where(Y==12)[0]])
print("accuracy score for tag "+list(tag2idx.keys())[13]+" :", accuracy_score(Z[np.where(Y==13)[0]], P[np.where(Y==13)[0]]))
a114= accuracy_score(Z[np.where(Y==13)[0]], P[np.where(Y==13)[0]])
print("test f1 for tag "+list(tag2idx.keys())[0]+" :", f1_score(Z[np.where(Y==0)[0]], P[np.where(Y==0)[0]], average=None).mean())
a21= f1_score(Z[np.where(Y==0)[0]], P[np.where(Y==0)[0]], average=None).mean()
print("test f1 for tag "+list(tag2idx.keys())[1]+" :", f1_score(Z[np.where(Y==1)[0]], P[np.where(Y==1)[0]], average=None).mean())
a22= f1_score(Z[np.where(Y==1)[0]], P[np.where(Y==1)[0]], average=None).mean()
print("test f1 for tag "+list(tag2idx.keys())[2]+" :", f1_score(Z[np.where(Y==2)[0]], P[np.where(Y==2)[0]], average=None).mean())
a23=f1_score(Z[np.where(Y==2)[0]], P[np.where(Y==2)[0]], average=None).mean()
print("test f1 for tag "+list(tag2idx.keys())[3]+" :", f1_score(Z[np.where(Y==3)[0]], P[np.where(Y==3)[0]], average=None).mean())
a24=f1_score(Z[np.where(Y==3)[0]], P[np.where(Y==3)[0]], average=None).mean()
print("test f1 for tag "+list(tag2idx.keys())[4]+" :", f1_score(Z[np.where(Y==4)[0]], P[np.where(Y==4)[0]], average=None).mean())
a25=f1_score(Z[np.where(Y==4)[0]], P[np.where(Y==4)[0]], average=None).mean()
print("test f1 for tag "+list(tag2idx.keys())[5]+" :", f1_score(Z[np.where(Y==5)[0]], P[np.where(Y==5)[0]], average=None).mean())
a26=f1_score(Z[np.where(Y==5)[0]], P[np.where(Y==5)[0]], average=None).mean()
print("test f1 for tag "+list(tag2idx.keys())[6]+" :", f1_score(Z[np.where(Y==6)[0]], P[np.where(Y==6)[0]], average=None).mean())
a27=f1_score(Z[np.where(Y==6)[0]], P[np.where(Y==6)[0]], average=None).mean()
print("test f1 for tag "+list(tag2idx.keys())[7]+" :", f1_score(Z[np.where(Y==7)[0]], P[np.where(Y==7)[0]], average=None).mean())
a28=f1_score(Z[np.where(Y==7)[0]], P[np.where(Y==7)[0]], average=None).mean()
print("test f1 for tag "+list(tag2idx.keys())[8]+" :", f1_score(Z[np.where(Y==8)[0]], P[np.where(Y==8)[0]], average=None).mean())
a29= f1_score(Z[np.where(Y==8)[0]], P[np.where(Y==8)[0]], average=None).mean()
print("test f1 for tag "+list(tag2idx.keys())[9]+" :", f1_score(Z[np.where(Y==9)[0]], P[np.where(Y==9)[0]], average=None).mean())
a210= f1_score(Z[np.where(Y==9)[0]], P[np.where(Y==9)[0]], average=None).mean()
print("test f1 for tag "+list(tag2idx.keys())[10]+" :", f1_score(Z[np.where(Y==10)[0]], P[np.where(Y==10)[0]], average=None).mean())
a211= f1_score(Z[np.where(Y==10)[0]], P[np.where(Y==10)[0]], average=None).mean()
print("test f1 for tag "+list(tag2idx.keys())[11]+" :", f1_score(Z[np.where(Y==11)[0]], P[np.where(Y==11)[0]], average=None).mean())
a212= f1_score(Z[np.where(Y==11)[0]], P[np.where(Y==11)[0]], average=None).mean()
print("test f1 for tag "+list(tag2idx.keys())[12]+" :", f1_score(Z[np.where(Y==12)[0]], P[np.where(Y==12)[0]], average=None).mean())
a213= f1_score(Z[np.where(Y==12)[0]], P[np.where(Y==12)[0]], average=None).mean()
print("test f1 for tag "+list(tag2idx.keys())[13]+" :", f1_score(Z[np.where(Y==13)[0]], P[np.where(Y==13)[0]], average=None).mean())
a214= f1_score(Z[np.where(Y==13)[0]], P[np.where(Y==13)[0]], average=None).mean()
print("accuracy for unknown words for tag "+list(tag2idx.keys())[0]+" :", accuracy_score(Z[np.where(Y==0)[0][X[np.where(Y==0)[0]]>w_known]],P[np.where(Y==0)[0][X[np.where(Y==0)[0]]>w_known]]))
a31= accuracy_score(Z[np.where(Y==0)[0][X[np.where(Y==0)[0]]>w_known]],P[np.where(Y==0)[0][X[np.where(Y==0)[0]]>w_known]])
print("number of unknown words for tag "+list(tag2idx.keys())[0]+" :",len(set(np.where(X[np.where(Y==0)[0]]>w_known)[0])))
a41= len(set(np.where(X[np.where(Y==0)[0]]>w_known)[0]))
print("accuracy for unknown words for tag "+list(tag2idx.keys())[1]+" :", accuracy_score(Z[np.where(Y==1)[0][X[np.where(Y==1)[0]]>w_known]],P[np.where(Y==1)[0][X[np.where(Y==1)[0]]>w_known]]))
a32= accuracy_score(Z[np.where(Y==1)[0][X[np.where(Y==1)[0]]>w_known]],P[np.where(Y==1)[0][X[np.where(Y==1)[0]]>w_known]])
print("number of unknown words for tag "+list(tag2idx.keys())[1]+" :",len(set(np.where(X[np.where(Y==1)[0]]>w_known)[0])))
a42= len(set(np.where(X[np.where(Y==1)[0]]>w_known)[0]))
print("accuracy for unknown words for tag "+list(tag2idx.keys())[2]+" :", accuracy_score(Z[np.where(Y==2)[0][X[np.where(Y==2)[0]]>w_known]],P[np.where(Y==2)[0][X[np.where(Y==2)[0]]>w_known]]))
a33= accuracy_score(Z[np.where(Y==2)[0][X[np.where(Y==2)[0]]>w_known]],P[np.where(Y==2)[0][X[np.where(Y==2)[0]]>w_known]])
print("number of unknown words for tag "+list(tag2idx.keys())[2]+" :",len(set(np.where(X[np.where(Y==2)[0]]>w_known)[0])))
a43= len(set(np.where(X[np.where(Y==2)[0]]>w_known)[0]))
print("accuracy for unknown words for tag "+list(tag2idx.keys())[3]+" :", accuracy_score(Z[np.where(Y==3)[0][X[np.where(Y==3)[0]]>w_known]],P[np.where(Y==3)[0][X[np.where(Y==3)[0]]>w_known]]))
a34= accuracy_score(Z[np.where(Y==3)[0][X[np.where(Y==3)[0]]>w_known]],P[np.where(Y==3)[0][X[np.where(Y==3)[0]]>w_known]])
print("number of unknown words for tag "+list(tag2idx.keys())[3]+" :",len(set(np.where(X[np.where(Y==3)[0]]>w_known)[0])))
a44= len(set(np.where(X[np.where(Y==3)[0]]>w_known)[0]))
print("accuracy for unknown words for tag "+list(tag2idx.keys())[4]+" :", accuracy_score(Z[np.where(Y==4)[0][X[np.where(Y==4)[0]]>w_known]],P[np.where(Y==4)[0][X[np.where(Y==4)[0]]>w_known]]))
a35= accuracy_score(Z[np.where(Y==4)[0][X[np.where(Y==4)[0]]>w_known]],P[np.where(Y==4)[0][X[np.where(Y==4)[0]]>w_known]])
print("number of unknown words for tag "+list(tag2idx.keys())[4]+" :",len(set(np.where(X[np.where(Y==4)[0]]>w_known)[0])))
a45= len(set(np.where(X[np.where(Y==4)[0]]>w_known)[0]))
print("accuracy for unknown words for tag "+list(tag2idx.keys())[5]+" :", accuracy_score(Z[np.where(Y==5)[0][X[np.where(Y==5)[0]]>w_known]],P[np.where(Y==5)[0][X[np.where(Y==5)[0]]>w_known]]))
a36= accuracy_score(Z[np.where(Y==5)[0][X[np.where(Y==5)[0]]>w_known]],P[np.where(Y==5)[0][X[np.where(Y==5)[0]]>w_known]])
print("number of unknown words for tag "+list(tag2idx.keys())[5]+" :",len(set(np.where(X[np.where(Y==5)[0]]>w_known)[0])))
a46= len(set(np.where(X[np.where(Y==5)[0]]>w_known)[0]))
print("accuracy for unknown words for tag "+list(tag2idx.keys())[6]+" :", accuracy_score(Z[np.where(Y==6)[0][X[np.where(Y==6)[0]]>w_known]],P[np.where(Y==6)[0][X[np.where(Y==6)[0]]>w_known]]))
a37= accuracy_score(Z[np.where(Y==6)[0][X[np.where(Y==6)[0]]>w_known]],P[np.where(Y==6)[0][X[np.where(Y==6)[0]]>w_known]])
print("number of unknown words for tag "+list(tag2idx.keys())[6]+" :",len(set(np.where(X[np.where(Y==6)[0]]>w_known)[0])))
a47= len(set(np.where(X[np.where(Y==6)[0]]>w_known)[0]))
print("accuracy for unknown words for tag "+list(tag2idx.keys())[7]+" :", accuracy_score(Z[np.where(Y==7)[0][X[np.where(Y==7)[0]]>w_known]],P[np.where(Y==7)[0][X[np.where(Y==7)[0]]>w_known]]))
a38= accuracy_score(Z[np.where(Y==7)[0][X[np.where(Y==7)[0]]>w_known]],P[np.where(Y==7)[0][X[np.where(Y==7)[0]]>w_known]])
print("number of unknown words for tag "+list(tag2idx.keys())[7]+" :",len(set(np.where(X[np.where(Y==7)[0]]>608)[0])))
a48= len(set(np.where(X[np.where(Y==7)[0]]>w_known)[0]))
print("accuracy for unknown words for tag "+list(tag2idx.keys())[8]+" :", accuracy_score(Z[np.where(Y==8)[0][X[np.where(Y==8)[0]]>w_known]],P[np.where(Y==8)[0][X[np.where(Y==8)[0]]>w_known]]))
a39= accuracy_score(Z[np.where(Y==8)[0][X[np.where(Y==8)[0]]>w_known]],P[np.where(Y==8)[0][X[np.where(Y==8)[0]]>w_known]])
print("number of unknown words for tag "+list(tag2idx.keys())[8]+" :",len(set(np.where(X[np.where(Y==8)[0]]>w_known)[0])))
a49= len(set(np.where(X[np.where(Y==8)[0]]>w_known)[0]))
print("accuracy for unknown words for tag "+list(tag2idx.keys())[9]+" :", accuracy_score(Z[np.where(Y==9)[0][X[np.where(Y==9)[0]]>w_known]],P[np.where(Y==9)[0][X[np.where(Y==9)[0]]>w_known]]))
a310= accuracy_score(Z[np.where(Y==9)[0][X[np.where(Y==9)[0]]>w_known]],P[np.where(Y==9)[0][X[np.where(Y==9)[0]]>w_known]])
print("number of unknown words for tag "+list(tag2idx.keys())[9]+" :",len(set(np.where(X[np.where(Y==9)[0]]>w_known)[0])))
a410= len(set(np.where(X[np.where(Y==9)[0]]>w_known)[0]))
print("accuracy for unknown words for tag "+list(tag2idx.keys())[10]+" :", accuracy_score(Z[np.where(Y==10)[0][X[np.where(Y==10)[0]]>w_known]],P[np.where(Y==10)[0][X[np.where(Y==10)[0]]>w_known]]))
a311=accuracy_score(Z[np.where(Y==10)[0][X[np.where(Y==10)[0]]>w_known]],P[np.where(Y==10)[0][X[np.where(Y==10)[0]]>w_known]])
print("number of unknown words for tag "+list(tag2idx.keys())[10]+" :",len(set(np.where(X[np.where(Y==10)[0]]>w_known)[0])))
a411= len(set(np.where(X[np.where(Y==10)[0]]>w_known)[0]))
print("accuracy for unknown words for tag "+list(tag2idx.keys())[11]+" :", accuracy_score(Z[np.where(Y==11)[0][X[np.where(Y==11)[0]]>w_known]],P[np.where(Y==11)[0][X[np.where(Y==11)[0]]>w_known]]))
a312= accuracy_score(Z[np.where(Y==11)[0][X[np.where(Y==11)[0]]>w_known]],P[np.where(Y==11)[0][X[np.where(Y==11)[0]]>w_known]])
print("number of unknown words for tag "+list(tag2idx.keys())[11]+" :",len(set(np.where(X[np.where(Y==11)[0]]>w_known)[0])))
a412= len(set(np.where(X[np.where(Y==11)[0]]>w_known)[0]))
print("accuracy for unknown words for tag "+list(tag2idx.keys())[12]+" :", accuracy_score(Z[np.where(Y==12)[0][X[np.where(Y==12)[0]]>w_known]],P[np.where(Y==12)[0][X[np.where(Y==12)[0]]>w_known]]))
a313= accuracy_score(Z[np.where(Y==12)[0][X[np.where(Y==12)[0]]>w_known]],P[np.where(Y==12)[0][X[np.where(Y==12)[0]]>w_known]])
print("number of unknown words for tag "+list(tag2idx.keys())[12]+" :",len(set(np.where(X[np.where(Y==12)[0]]>w_known)[0])))
a413= len(set(np.where(X[np.where(Y==12)[0]]>w_known)[0]))
print("accuracy for unknown words for tag "+list(tag2idx.keys())[13]+" :", accuracy_score(Z[np.where(Y==13)[0][X[np.where(Y==13)[0]]>w_known]],P[np.where(Y==13)[0][X[np.where(Y==13)[0]]>w_known]]))
a314= accuracy_score(Z[np.where(Y==13)[0][X[np.where(Y==13)[0]]>w_known]],P[np.where(Y==13)[0][X[np.where(Y==13)[0]]>w_known]])
print("number of unknown words for tag "+list(tag2idx.keys())[13]+" :",len(set(np.where(X[np.where(Y==13)[0]]>w_known)[0])))
a414= len(set(np.where(X[np.where(Y==13)[0]]>w_known)[0]))
print("accuracy for known words for tag "+list(tag2idx.keys())[0]+" :", accuracy_score(Z[np.where(Y==0)[0][X[np.where(Y==0)[0]]<=w_known]],P[np.where(Y==0)[0][X[np.where(Y==0)[0]]<=w_known]]))
a51= accuracy_score(Z[np.where(Y==0)[0][X[np.where(Y==0)[0]]<=w_known]],P[np.where(Y==0)[0][X[np.where(Y==0)[0]]<=w_known]])
print("number of known words for tag "+list(tag2idx.keys())[0]+" :",len(set(np.where(X[np.where(Y==0)[0]]<=w_known)[0])))
a61= len(set(np.where(X[np.where(Y==0)[0]]<=w_known)[0]))
print("accuracy for known words for tag "+list(tag2idx.keys())[1]+" :", accuracy_score(Z[np.where(Y==1)[0][X[np.where(Y==1)[0]]<=w_known]],P[np.where(Y==1)[0][X[np.where(Y==1)[0]]<=w_known]]))
a52= accuracy_score(Z[np.where(Y==1)[0][X[np.where(Y==1)[0]]<=w_known]],P[np.where(Y==1)[0][X[np.where(Y==1)[0]]<=w_known]])
print("number of known words for tag "+list(tag2idx.keys())[1]+" :",len(set(np.where(X[np.where(Y==1)[0]]<=w_known)[0])))
a62= len(set(np.where(X[np.where(Y==1)[0]]<=w_known)[0]))
print("accuracy for known words for tag "+list(tag2idx.keys())[2]+" :", accuracy_score(Z[np.where(Y==2)[0][X[np.where(Y==2)[0]]<=w_known]],P[np.where(Y==2)[0][X[np.where(Y==2)[0]]<=w_known]]))
a53= accuracy_score(Z[np.where(Y==2)[0][X[np.where(Y==2)[0]]<=w_known]],P[np.where(Y==2)[0][X[np.where(Y==2)[0]]<=w_known]])
print("number of known words for tag "+list(tag2idx.keys())[2]+" :",len(set(np.where(X[np.where(Y==2)[0]]<=w_known)[0])))
a63= len(set(np.where(X[np.where(Y==2)[0]]<=w_known)[0]))
print("accuracy for known words for tag "+list(tag2idx.keys())[3]+" :", accuracy_score(Z[np.where(Y==3)[0][X[np.where(Y==3)[0]]<=w_known]],P[np.where(Y==3)[0][X[np.where(Y==3)[0]]<=w_known]]))
a54= accuracy_score(Z[np.where(Y==3)[0][X[np.where(Y==3)[0]]<=w_known]],P[np.where(Y==3)[0][X[np.where(Y==3)[0]]<=w_known]])
print("number of known words for tag "+list(tag2idx.keys())[3]+" :",len(set(np.where(X[np.where(Y==3)[0]]<=w_known)[0])))
a64=len(set(np.where(X[np.where(Y==3)[0]]<=w_known)[0]))
print("accuracy for known words for tag "+list(tag2idx.keys())[4]+" :", accuracy_score(Z[np.where(Y==4)[0][X[np.where(Y==4)[0]]<=w_known]],P[np.where(Y==4)[0][X[np.where(Y==4)[0]]<=w_known]]))
a55= accuracy_score(Z[np.where(Y==4)[0][X[np.where(Y==4)[0]]<=w_known]],P[np.where(Y==4)[0][X[np.where(Y==4)[0]]<=w_known]])
print("number of known words for tag "+list(tag2idx.keys())[4]+" :",len(set(np.where(X[np.where(Y==4)[0]]<=w_known)[0])))
a65=len(set(np.where(X[np.where(Y==4)[0]]<=w_known)[0]))
print("accuracy for known words for tag "+list(tag2idx.keys())[5]+" :", accuracy_score(Z[np.where(Y==5)[0][X[np.where(Y==5)[0]]<=w_known]],P[np.where(Y==5)[0][X[np.where(Y==5)[0]]<=w_known]]))
a56= accuracy_score(Z[np.where(Y==5)[0][X[np.where(Y==5)[0]]<=w_known]],P[np.where(Y==5)[0][X[np.where(Y==5)[0]]<=w_known]])
print("number of known words for tag "+list(tag2idx.keys())[5]+" :",len(set(np.where(X[np.where(Y==5)[0]]<=w_known)[0])))
a66=len(set(np.where(X[np.where(Y==5)[0]]<=w_known)[0]))
print("accuracy for known words for tag "+list(tag2idx.keys())[6]+" :", accuracy_score(Z[np.where(Y==6)[0][X[np.where(Y==6)[0]]<=w_known]],P[np.where(Y==6)[0][X[np.where(Y==6)[0]]<=w_known]]))
a57= accuracy_score(Z[np.where(Y==6)[0][X[np.where(Y==6)[0]]<=w_known]],P[np.where(Y==6)[0][X[np.where(Y==6)[0]]<=w_known]])
print("number of known words for tag "+list(tag2idx.keys())[6]+" :",len(set(np.where(X[np.where(Y==6)[0]]<=w_known)[0])))
a67=len(set(np.where(X[np.where(Y==6)[0]]<=w_known)[0]))
print("accuracy for known words for tag "+list(tag2idx.keys())[7]+" :", accuracy_score(Z[np.where(Y==7)[0][X[np.where(Y==7)[0]]<=w_known]],P[np.where(Y==7)[0][X[np.where(Y==7)[0]]<=w_known]]))
a58= accuracy_score(Z[np.where(Y==7)[0][X[np.where(Y==7)[0]]<=w_known]],P[np.where(Y==7)[0][X[np.where(Y==7)[0]]<=w_known]])
print("number of known words for tag "+list(tag2idx.keys())[7]+" :",len(set(np.where(X[np.where(Y==7)[0]]<=w_known)[0])))
a68=len(set(np.where(X[np.where(Y==7)[0]]<=w_known)[0]))
print("accuracy for known words for tag "+list(tag2idx.keys())[8]+" :", accuracy_score(Z[np.where(Y==8)[0][X[np.where(Y==8)[0]]<=w_known]],P[np.where(Y==8)[0][X[np.where(Y==8)[0]]<=w_known]]))
a59= accuracy_score(Z[np.where(Y==8)[0][X[np.where(Y==8)[0]]<=w_known]],P[np.where(Y==8)[0][X[np.where(Y==8)[0]]<=w_known]])
print("number of known words for tag "+list(tag2idx.keys())[8]+" :",len(set(np.where(X[np.where(Y==8)[0]]<=w_known)[0])))
a69=len(set(np.where(X[np.where(Y==8)[0]]<=w_known)[0]))
print("accuracy for known words for tag "+list(tag2idx.keys())[9]+" :", accuracy_score(Z[np.where(Y==9)[0][X[np.where(Y==9)[0]]<=w_known]],P[np.where(Y==9)[0][X[np.where(Y==9)[0]]<=w_known]]))
a510= accuracy_score(Z[np.where(Y==9)[0][X[np.where(Y==9)[0]]<=w_known]],P[np.where(Y==9)[0][X[np.where(Y==9)[0]]<=w_known]])
print("number of known words for tag "+list(tag2idx.keys())[9]+" :",len(set(np.where(X[np.where(Y==9)[0]]<=w_known)[0])))
a610=len(set(np.where(X[np.where(Y==9)[0]]<=w_known)[0]))
print("accuracy for known words for tag "+list(tag2idx.keys())[10]+" :", accuracy_score(Z[np.where(Y==10)[0][X[np.where(Y==10)[0]]<=w_known]],P[np.where(Y==10)[0][X[np.where(Y==10)[0]]<=w_known]]))
a511= accuracy_score(Z[np.where(Y==10)[0][X[np.where(Y==10)[0]]<=w_known]],P[np.where(Y==10)[0][X[np.where(Y==10)[0]]<=w_known]])
print("number of known words for tag "+list(tag2idx.keys())[10]+" :",len(set(np.where(X[np.where(Y==10)[0]]<=w_known)[0])))
a611=len(set(np.where(X[np.where(Y==10)[0]]<=w_known)[0]))
print("accuracy for known words for tag "+list(tag2idx.keys())[11]+" :", accuracy_score(Z[np.where(Y==11)[0][X[np.where(Y==11)[0]]<=w_known]],P[np.where(Y==11)[0][X[np.where(Y==11)[0]]<=w_known]]))
a512= accuracy_score(Z[np.where(Y==11)[0][X[np.where(Y==11)[0]]<=w_known]],P[np.where(Y==11)[0][X[np.where(Y==11)[0]]<=w_known]])
print("number of known words for tag "+list(tag2idx.keys())[11]+" :",len(set(np.where(X[np.where(Y==11)[0]]<=w_known)[0])))
a612=len(set(np.where(X[np.where(Y==11)[0]]<=w_known)[0]))
print("accuracy for known words for tag "+list(tag2idx.keys())[12]+" :", accuracy_score(Z[np.where(Y==12)[0][X[np.where(Y==12)[0]]<=w_known]],P[np.where(Y==12)[0][X[np.where(Y==12)[0]]<=w_known]]))
a513= accuracy_score(Z[np.where(Y==12)[0][X[np.where(Y==12)[0]]<=w_known]],P[np.where(Y==12)[0][X[np.where(Y==12)[0]]<=w_known]])
print("number of known words for tag "+list(tag2idx.keys())[12]+" :",len(set(np.where(X[np.where(Y==12)[0]]<=w_known)[0])))
a613=len(set(np.where(X[np.where(Y==12)[0]]<=w_known)[0]))
print("accuracy for known words for tag "+list(tag2idx.keys())[13]+" :", accuracy_score(Z[np.where(Y==13)[0][X[np.where(Y==13)[0]]<=w_known]],P[np.where(Y==13)[0][X[np.where(Y==13)[0]]<=w_known]]))
a514= accuracy_score(Z[
|
np.where(Y==13)
|
numpy.where
|
from parcels.tools.loggers import logger
from parcels.tools.converters import unitconverters_map, UnitConverter, Geographic, GeographicPolar
from parcels.tools.converters import TimeConverter
from parcels.tools.error import FieldSamplingError, FieldOutOfBoundError, TimeExtrapolationError
import parcels.tools.interpolation_utils as i_u
import collections
from py import path
import numpy as np
from ctypes import Structure, c_int, c_float, POINTER, pointer
import xarray as xr
import datetime
import math
from .grid import Grid, CGrid, GridCode
import dask.array as da
__all__ = ['Field', 'VectorField', 'SummedField', 'NestedField']
class Field(object):
"""Class that encapsulates access to field data.
:param name: Name of the field
:param data: 2D, 3D or 4D numpy array of field data.
1. If data shape is [xdim, ydim], [xdim, ydim, zdim], [xdim, ydim, tdim] or [xdim, ydim, zdim, tdim],
whichever is relevant for the dataset, use the flag transpose=True
2. If data shape is [ydim, xdim], [zdim, ydim, xdim], [tdim, ydim, xdim] or [tdim, zdim, ydim, xdim],
use the flag transpose=False
3. If data has any other shape, you first need to reorder it
:param lon: Longitude coordinates (numpy vector or array) of the field (only if grid is None)
:param lat: Latitude coordinates (numpy vector or array) of the field (only if grid is None)
:param depth: Depth coordinates (numpy vector or array) of the field (only if grid is None)
:param time: Time coordinates (numpy vector) of the field (only if grid is None)
:param mesh: String indicating the type of mesh coordinates and
units used during velocity interpolation: (only if grid is None)
1. spherical: Lat and lon in degree, with a
correction for zonal velocity U near the poles.
2. flat (default): No conversion, lat/lon are assumed to be in m.
:param timestamps: A numpy array containing the timestamps for each of the files in filenames, for loading
from netCDF files only. Default is None if the netCDF dimensions dictionary includes time.
:param grid: :class:`parcels.grid.Grid` object containing all the lon, lat depth, time
mesh and time_origin information. Can be constructed from any of the Grid objects
:param fieldtype: Type of Field to be used for UnitConverter when using SummedFields
(either 'U', 'V', 'Kh_zonal', 'Kh_meridional' or None)
:param transpose: Transpose data to required (lon, lat) layout
:param vmin: Minimum allowed value on the field. Data below this value are set to zero
:param vmax: Maximum allowed value on the field. Data above this value are set to zero
:param time_origin: Time origin (TimeConverter object) of the time axis (only if grid is None)
:param interp_method: Method for interpolation. Either 'linear' or 'nearest'
:param allow_time_extrapolation: boolean whether to allow for extrapolation in time
(i.e. beyond the last available time snapshot)
:param time_periodic: boolean whether to loop periodically over the time component of the Field
This flag overrides the allow_time_interpolation and sets it to False
"""
def __init__(self, name, data, lon=None, lat=None, depth=None, time=None, grid=None, mesh='flat', timestamps=None,
fieldtype=None, transpose=False, vmin=None, vmax=None, time_origin=None,
interp_method='linear', allow_time_extrapolation=None, time_periodic=False, **kwargs):
if not isinstance(name, tuple):
self.name = name
self.filebuffername = name
else:
self.name, self.filebuffername = name
self.data = data
time_origin = TimeConverter(0) if time_origin is None else time_origin
if grid:
self.grid = grid
else:
self.grid = Grid.create_grid(lon, lat, depth, time, time_origin=time_origin, mesh=mesh)
self.igrid = -1
# self.lon, self.lat, self.depth and self.time are not used anymore in parcels.
# self.grid should be used instead.
# Those variables are still defined for backwards compatibility with users codes.
self.lon = self.grid.lon
self.lat = self.grid.lat
self.depth = self.grid.depth
self.fieldtype = self.name if fieldtype is None else fieldtype
if self.grid.mesh == 'flat' or (self.fieldtype not in unitconverters_map.keys()):
self.units = UnitConverter()
elif self.grid.mesh == 'spherical':
self.units = unitconverters_map[self.fieldtype]
else:
raise ValueError("Unsupported mesh type. Choose either: 'spherical' or 'flat'")
self.timestamps = timestamps
if type(interp_method) is dict:
if self.name in interp_method:
self.interp_method = interp_method[self.name]
else:
raise RuntimeError('interp_method is a dictionary but %s is not in it' % name)
else:
self.interp_method = interp_method
if self.interp_method in ['bgrid_velocity', 'bgrid_w_velocity', 'bgrid_tracer'] and \
self.grid.gtype in [GridCode.RectilinearSGrid, GridCode.CurvilinearSGrid]:
logger.warning_once('General s-levels are not supported in B-grid. RectilinearSGrid and CurvilinearSGrid can still be used to deal with shaved cells, but the levels must be horizontal.')
self.fieldset = None
if allow_time_extrapolation is None:
self.allow_time_extrapolation = True if len(self.grid.time) == 1 else False
else:
self.allow_time_extrapolation = allow_time_extrapolation
self.time_periodic = time_periodic
if self.time_periodic and self.allow_time_extrapolation:
logger.warning_once("allow_time_extrapolation and time_periodic cannot be used together.\n \
allow_time_extrapolation is set to False")
self.allow_time_extrapolation = False
if self.time_periodic:
logger.warning_once("When using time_periodic=True, it is necessary that the first and last time steps\n \
of the series are the same, with time[-1] = time[0] + T")
self.vmin = vmin
self.vmax = vmax
if not self.grid.defer_load:
self.data = self.reshape(self.data, transpose)
# Hack around the fact that NaN and ridiculously large values
# propagate in SciPy's interpolators
self.data[np.isnan(self.data)] = 0.
if self.vmin is not None:
self.data[self.data < self.vmin] = 0.
if self.vmax is not None:
self.data[self.data > self.vmax] = 0.
self._scaling_factor = None
# Variable names in JIT code
self.dimensions = kwargs.pop('dimensions', None)
self.indices = kwargs.pop('indices', None)
self.dataFiles = kwargs.pop('dataFiles', None)
self.netcdf_engine = kwargs.pop('netcdf_engine', 'netcdf4')
self.loaded_time_indices = []
self.creation_log = kwargs.pop('creation_log', '')
self.field_chunksize = kwargs.pop('field_chunksize', 'auto')
# data_full_zdim is the vertical dimension of the complete field data, ignoring the indices.
# (data_full_zdim = grid.zdim if no indices are used, for A- and C-grids and for some B-grids). It is used for the B-grid,
# since some datasets do not provide the deeper level of data (which is ignored by the interpolation).
self.data_full_zdim = kwargs.pop('data_full_zdim', None)
self.data_chunks = []
self.c_data_chunks = []
self.nchunks = []
self.chunk_set = False
self.filebuffers = [None] * 3
@classmethod
def get_dim_filenames(cls, filenames, dim):
if isinstance(filenames, str) or not isinstance(filenames, collections.Iterable):
return [filenames]
elif isinstance(filenames, dict):
assert dim in filenames.keys(), \
'filename dimension keys must be lon, lat, depth or data'
filename = filenames[dim]
if isinstance(filename, str):
return [filename]
else:
return filename
else:
return filenames
@classmethod
def from_netcdf(cls, filenames, variable, dimensions, indices=None, grid=None,
mesh='spherical', timestamps=None, allow_time_extrapolation=None, time_periodic=False,
deferred_load=True, field_chunksize='auto', **kwargs):
"""Create field from netCDF file
:param filenames: list of filenames to read for the field. filenames can be a list [files] or
a dictionary {dim:[files]} (if lon, lat, depth and/or data not stored in same files as data)
In the latetr case, time values are in filenames[data]
:param variable: Tuple mapping field name to variable name in the NetCDF file.
:param dimensions: Dictionary mapping variable names for the relevant dimensions in the NetCDF file
:param indices: dictionary mapping indices for each dimension to read from file.
This can be used for reading in only a subregion of the NetCDF file.
Note that negative indices are not allowed.
:param mesh: String indicating the type of mesh coordinates and
units used during velocity interpolation:
1. spherical (default): Lat and lon in degree, with a
correction for zonal velocity U near the poles.
2. flat: No conversion, lat/lon are assumed to be in m.
:param timestamps: A numpy array of datetime64 objects containing the timestamps for each of the files in filenames.
Default is None if dimensions includes time.
:param allow_time_extrapolation: boolean whether to allow for extrapolation in time
(i.e. beyond the last available time snapshot)
Default is False if dimensions includes time, else True
:param time_periodic: boolean whether to loop periodically over the time component of the FieldSet
This flag overrides the allow_time_interpolation and sets it to False
:param deferred_load: boolean whether to only pre-load data (in deferred mode) or
fully load them (default: True). It is advised to deferred load the data, since in
that case Parcels deals with a better memory management during particle set execution.
deferred_load=False is however sometimes necessary for plotting the fields.
:param field_chunksize: size of the chunks in dask loading
:param netcdf_engine: engine to use for netcdf reading in xarray. Default is 'netcdf',
but in cases where this doesn't work, setting netcdf_engine='scipy' could help
"""
# Ensure the timestamps array is compatible with the user-provided datafiles.
if timestamps is not None:
if isinstance(filenames, list):
assert len(filenames) == len(timestamps), 'Number of files and number of timestamps must be equal.'
elif isinstance(filenames, dict):
for k in filenames.keys():
assert(len(filenames[k]) == len(timestamps)), 'Number of files and number of timestamps must be equal.'
else:
raise TypeError("filenames type is inconsistent with manual timestamp provision.")
if isinstance(variable, xr.core.dataarray.DataArray):
lonlat_filename = variable
depth_filename = variable
data_filenames = variable
netcdf_engine = 'xarray'
else:
if isinstance(variable, str): # for backward compatibility with Parcels < 2.0.0
variable = (variable, variable)
assert len(variable) == 2, 'The variable tuple must have length 2. Use FieldSet.from_netcdf() for multiple variables'
data_filenames = cls.get_dim_filenames(filenames, 'data')
lonlat_filename = cls.get_dim_filenames(filenames, 'lon')
if isinstance(filenames, dict):
assert len(lonlat_filename) == 1
if lonlat_filename != cls.get_dim_filenames(filenames, 'lat'):
raise NotImplementedError('longitude and latitude dimensions are currently processed together from one single file')
lonlat_filename = lonlat_filename[0]
if 'depth' in dimensions:
depth_filename = cls.get_dim_filenames(filenames, 'depth')
if isinstance(filenames, dict) and len(depth_filename) != 1:
raise NotImplementedError('Vertically adaptive meshes not implemented for from_netcdf()')
depth_filename = depth_filename[0]
netcdf_engine = kwargs.pop('netcdf_engine', 'netcdf4')
indices = {} if indices is None else indices.copy()
for ind in indices.values():
assert np.min(ind) >= 0, \
('Negative indices are currently not allowed in Parcels. '
+ 'This is related to the non-increasing dimension it could generate '
+ 'if the domain goes from lon[-4] to lon[6] for example. '
+ 'Please raise an issue on https://github.com/OceanParcels/parcels/issues '
+ 'if you would need such feature implemented.')
interp_method = kwargs.pop('interp_method', 'linear')
if type(interp_method) is dict:
if variable[0] in interp_method:
interp_method = interp_method[variable[0]]
else:
raise RuntimeError('interp_method is a dictionary but %s is not in it' % variable[0])
with NetcdfFileBuffer(lonlat_filename, dimensions, indices, netcdf_engine) as filebuffer:
lon, lat = filebuffer.read_lonlat
indices = filebuffer.indices
# Check if parcels_mesh has been explicitly set in file
if 'parcels_mesh' in filebuffer.dataset.attrs:
mesh = filebuffer.dataset.attrs['parcels_mesh']
if 'depth' in dimensions:
with NetcdfFileBuffer(depth_filename, dimensions, indices, netcdf_engine, interp_method=interp_method) as filebuffer:
filebuffer.name = filebuffer.parse_name(variable[1])
depth = filebuffer.read_depth
data_full_zdim = filebuffer.data_full_zdim
else:
indices['depth'] = [0]
depth = np.zeros(1)
data_full_zdim = 1
kwargs['data_full_zdim'] = data_full_zdim
if len(data_filenames) > 1 and 'time' not in dimensions and timestamps is None:
raise RuntimeError('Multiple files given but no time dimension specified')
if grid is None:
# Concatenate time variable to determine overall dimension
# across multiple files
if timestamps is not None:
timeslices = timestamps
time = np.concatenate(timeslices)
dataFiles = np.array(data_filenames)
elif netcdf_engine == 'xarray':
with NetcdfFileBuffer(data_filenames, dimensions, indices, netcdf_engine) as filebuffer:
time = filebuffer.time
timeslices = time if isinstance(time, (list, np.ndarray)) else [time]
dataFiles = data_filenames if isinstance(data_filenames, (list, np.ndarray)) else [data_filenames] * len(time)
else:
timeslices = []
dataFiles = []
for fname in data_filenames:
with NetcdfFileBuffer(fname, dimensions, indices, netcdf_engine,
field_chunksize=field_chunksize) as filebuffer:
ftime = filebuffer.time
timeslices.append(ftime)
dataFiles.append([fname] * len(ftime))
timeslices = np.array(timeslices)
time = np.concatenate(timeslices)
dataFiles = np.concatenate(np.array(dataFiles))
if time.size == 1 and time[0] is None:
time[0] = 0
time_origin = TimeConverter(time[0])
time = time_origin.reltime(time)
if not np.all((time[1:]-time[:-1]) > 0):
id_not_ordered = np.where(time[1:] < time[:-1])[0][0]
raise AssertionError('Please make sure your netCDF files are ordered in time. First pair of non-ordered files: %s, %s'
% (dataFiles[id_not_ordered], dataFiles[id_not_ordered+1]))
grid = Grid.create_grid(lon, lat, depth, time, time_origin=time_origin, mesh=mesh)
grid.timeslices = timeslices
kwargs['dataFiles'] = dataFiles
if 'time' in indices:
logger.warning_once('time dimension in indices is not necessary anymore. It is then ignored.')
if 'full_load' in kwargs: # for backward compatibility with Parcels < v2.0.0
deferred_load = not kwargs['full_load']
if grid.time.size <= 3 or deferred_load is False:
# Pre-allocate data before reading files into buffer
data_list = []
ti = 0
for tslice, fname in zip(grid.timeslices, data_filenames):
with NetcdfFileBuffer(fname, dimensions, indices, netcdf_engine,
interp_method=interp_method, data_full_zdim=data_full_zdim,
field_chunksize=field_chunksize) as filebuffer:
# If Field.from_netcdf is called directly, it may not have a 'data' dimension
# In that case, assume that 'name' is the data dimension
if netcdf_engine == 'xarray':
tslice = [tslice]
else:
filebuffer.name = filebuffer.parse_name(variable[1])
buffer_data = filebuffer.data
if len(buffer_data.shape) == 2:
data_list.append(buffer_data.reshape(sum(((len(tslice), 1), buffer_data.shape), ())))
elif len(buffer_data.shape) == 3:
if len(filebuffer.indices['depth']) > 1:
data_list.append(buffer_data.reshape(sum(((1,), buffer_data.shape), ())))
else:
data_list.append(buffer_data.reshape(sum(((len(tslice), 1), buffer_data.shape[1:]), ())))
else:
data_list.append(buffer_data)
ti += len(tslice)
data = da.concatenate(data_list, axis=0)
else:
grid.defer_load = True
grid.ti = -1
data = DeferredArray()
if allow_time_extrapolation is None:
allow_time_extrapolation = False if 'time' in dimensions else True
kwargs['dimensions'] = dimensions.copy()
kwargs['indices'] = indices
kwargs['time_periodic'] = time_periodic
kwargs['netcdf_engine'] = netcdf_engine
kwargs['field_chunksize'] = field_chunksize
variable = kwargs['var_name'] if netcdf_engine == 'xarray' else variable
return cls(variable, data, grid=grid, timestamps=timestamps,
allow_time_extrapolation=allow_time_extrapolation, interp_method=interp_method, **kwargs)
def reshape(self, data, transpose=False):
# Ensure that field data is the right data type
if not data.dtype == np.float32:
logger.warning_once("Casting field data to np.float32")
data = data.astype(np.float32)
lib = np if isinstance(data, np.ndarray) else da
if transpose:
data = lib.transpose(data)
if self.grid.lat_flipped:
data = lib.flip(data, axis=-2)
if self.grid.tdim == 1:
if len(data.shape) < 4:
data = data.reshape(sum(((1,), data.shape), ()))
if self.grid.zdim == 1:
if len(data.shape) == 4:
data = data.reshape(sum(((data.shape[0],), data.shape[2:]), ()))
if len(data.shape) == 4:
assert data.shape == (self.grid.tdim, self.grid.zdim, self.grid.ydim-2*self.grid.meridional_halo, self.grid.xdim-2*self.grid.zonal_halo), \
('Field %s expecting a data shape of a [ydim, xdim], [zdim, ydim, xdim], [tdim, ydim, xdim] or [tdim, zdim, ydim, xdim]. Flag transpose=True could help to reorder the data.')
else:
assert data.shape == (self.grid.tdim, self.grid.ydim-2*self.grid.meridional_halo, self.grid.xdim-2*self.grid.zonal_halo), \
('Field %s expecting a data shape of a [ydim, xdim], [zdim, ydim, xdim], [tdim, ydim, xdim] or [tdim, zdim, ydim, xdim]. Flag transpose=True could help to reorder the data.')
if self.grid.meridional_halo > 0 or self.grid.zonal_halo > 0:
data = self.add_periodic_halo(zonal=self.grid.zonal_halo > 0, meridional=self.grid.meridional_halo > 0, halosize=max(self.grid.meridional_halo, self.grid.zonal_halo), data=data)
return data
def set_scaling_factor(self, factor):
"""Scales the field data by some constant factor.
:param factor: scaling factor
"""
if self._scaling_factor:
raise NotImplementedError(('Scaling factor for field %s already defined.' % self.name))
self._scaling_factor = factor
if not self.grid.defer_load:
self.data *= factor
def __getitem__(self, key):
return self.eval(*key)
def calc_cell_edge_sizes(self):
"""Method to calculate cell sizes based on numpy.gradient method
Currently only works for Rectilinear Grids"""
if not self.grid.cell_edge_sizes:
if self.grid.gtype in (GridCode.RectilinearZGrid, GridCode.RectilinearSGrid):
self.grid.cell_edge_sizes['x'] = np.zeros((self.grid.ydim, self.grid.xdim), dtype=np.float32)
self.grid.cell_edge_sizes['y'] = np.zeros((self.grid.ydim, self.grid.xdim), dtype=np.float32)
x_conv = GeographicPolar() if self.grid.mesh == 'spherical' else UnitConverter()
y_conv = Geographic() if self.grid.mesh == 'spherical' else UnitConverter()
for y, (lat, dy) in enumerate(zip(self.grid.lat, np.gradient(self.grid.lat))):
for x, (lon, dx) in enumerate(zip(self.grid.lon, np.gradient(self.grid.lon))):
self.grid.cell_edge_sizes['x'][y, x] = x_conv.to_source(dx, lon, lat, self.grid.depth[0])
self.grid.cell_edge_sizes['y'][y, x] = y_conv.to_source(dy, lon, lat, self.grid.depth[0])
self.cell_edge_sizes = self.grid.cell_edge_sizes
else:
logger.error(('Field.cell_edge_sizes() not implemented for ', self.grid.gtype, 'grids.',
'You can provide Field.grid.cell_edge_sizes yourself',
'by in e.g. NEMO using the e1u fields etc from the mesh_mask.nc file'))
exit(-1)
def cell_areas(self):
"""Method to calculate cell sizes based on cell_edge_sizes
Currently only works for Rectilinear Grids"""
if not self.grid.cell_edge_sizes:
self.calc_cell_edge_sizes()
return self.grid.cell_edge_sizes['x'] * self.grid.cell_edge_sizes['y']
def search_indices_vertical_z(self, z):
grid = self.grid
z = np.float32(z)
if z < grid.depth[0] or z > grid.depth[-1]:
raise FieldOutOfBoundError(0, 0, z, field=self)
depth_index = grid.depth <= z
if z >= grid.depth[-1]:
zi = len(grid.depth) - 2
else:
zi = depth_index.argmin() - 1 if z >= grid.depth[0] else 0
zeta = (z-grid.depth[zi]) / (grid.depth[zi+1]-grid.depth[zi])
return (zi, zeta)
def search_indices_vertical_s(self, x, y, z, xi, yi, xsi, eta, ti, time):
grid = self.grid
if self.interp_method in ['bgrid_velocity', 'bgrid_w_velocity', 'bgrid_tracer']:
xsi = 1
eta = 1
if time < grid.time[ti]:
ti -= 1
if grid.z4d:
if ti == len(grid.time)-1:
depth_vector = (1-xsi)*(1-eta) * grid.depth[-1, :, yi, xi] + \
xsi*(1-eta) * grid.depth[-1, :, yi, xi+1] + \
xsi*eta * grid.depth[-1, :, yi+1, xi+1] + \
(1-xsi)*eta * grid.depth[-1, :, yi+1, xi]
else:
dv2 = (1-xsi)*(1-eta) * grid.depth[ti:ti+2, :, yi, xi] + \
xsi*(1-eta) * grid.depth[ti:ti+2, :, yi, xi+1] + \
xsi*eta * grid.depth[ti:ti+2, :, yi+1, xi+1] + \
(1-xsi)*eta * grid.depth[ti:ti+2, :, yi+1, xi]
tt = (time-grid.time[ti]) / (grid.time[ti+1]-grid.time[ti])
assert tt >= 0 and tt <= 1, 'Vertical s grid is being wrongly interpolated in time'
depth_vector = dv2[0, :] * (1-tt) + dv2[1, :] * tt
else:
depth_vector = (1-xsi)*(1-eta) * grid.depth[:, yi, xi] + \
xsi*(1-eta) * grid.depth[:, yi, xi+1] + \
xsi*eta * grid.depth[:, yi+1, xi+1] + \
(1-xsi)*eta * grid.depth[:, yi+1, xi]
z = np.float32(z)
depth_index = depth_vector <= z
if z >= depth_vector[-1]:
zi = len(depth_vector) - 2
else:
zi = depth_index.argmin() - 1 if z >= depth_vector[0] else 0
if z < depth_vector[zi] or z > depth_vector[zi+1]:
raise FieldOutOfBoundError(x, y, z, field=self)
zeta = (z - depth_vector[zi]) / (depth_vector[zi+1]-depth_vector[zi])
return (zi, zeta)
def reconnect_bnd_indices(self, xi, yi, xdim, ydim, sphere_mesh):
if xi < 0:
if sphere_mesh:
xi = xdim-2
else:
xi = 0
if xi > xdim-2:
if sphere_mesh:
xi = 0
else:
xi = xdim-2
if yi < 0:
yi = 0
if yi > ydim-2:
yi = ydim-2
if sphere_mesh:
xi = xdim - xi
return xi, yi
def search_indices_rectilinear(self, x, y, z, ti=-1, time=-1, search2D=False):
grid = self.grid
xi = yi = -1
if not grid.zonal_periodic:
if x < grid.lonlat_minmax[0] or x > grid.lonlat_minmax[1]:
raise FieldOutOfBoundError(x, y, z, field=self)
if y < grid.lonlat_minmax[2] or y > grid.lonlat_minmax[3]:
raise FieldOutOfBoundError(x, y, z, field=self)
if grid.mesh != 'spherical':
lon_index = grid.lon < x
if lon_index.all():
xi = len(grid.lon) - 2
else:
xi = lon_index.argmin() - 1 if lon_index.any() else 0
xsi = (x-grid.lon[xi]) / (grid.lon[xi+1]-grid.lon[xi])
if xsi < 0:
xi -= 1
xsi = (x-grid.lon[xi]) / (grid.lon[xi+1]-grid.lon[xi])
elif xsi > 1:
xi += 1
xsi = (x-grid.lon[xi]) / (grid.lon[xi+1]-grid.lon[xi])
else:
lon_fixed = grid.lon.copy()
indices = lon_fixed >= lon_fixed[0]
if not indices.all():
lon_fixed[indices.argmin():] += 360
if x < lon_fixed[0]:
lon_fixed -= 360
lon_index = lon_fixed < x
if lon_index.all():
xi = len(lon_fixed) - 2
else:
xi = lon_index.argmin() - 1 if lon_index.any() else 0
xsi = (x-lon_fixed[xi]) / (lon_fixed[xi+1]-lon_fixed[xi])
if xsi < 0:
xi -= 1
xsi = (x-lon_fixed[xi]) / (lon_fixed[xi+1]-lon_fixed[xi])
elif xsi > 1:
xi += 1
xsi = (x-lon_fixed[xi]) / (lon_fixed[xi+1]-lon_fixed[xi])
lat_index = grid.lat < y
if lat_index.all():
yi = len(grid.lat) - 2
else:
yi = lat_index.argmin() - 1 if lat_index.any() else 0
eta = (y-grid.lat[yi]) / (grid.lat[yi+1]-grid.lat[yi])
if eta < 0:
yi -= 1
eta = (y-grid.lat[yi]) / (grid.lat[yi+1]-grid.lat[yi])
elif eta > 1:
yi += 1
eta = (y-grid.lat[yi]) / (grid.lat[yi+1]-grid.lat[yi])
if grid.zdim > 1 and not search2D:
if grid.gtype == GridCode.RectilinearZGrid:
# Never passes here, because in this case, we work with scipy
try:
(zi, zeta) = self.search_indices_vertical_z(z)
except FieldOutOfBoundError:
raise FieldOutOfBoundError(x, y, z, field=self)
elif grid.gtype == GridCode.RectilinearSGrid:
(zi, zeta) = self.search_indices_vertical_s(x, y, z, xi, yi, xsi, eta, ti, time)
else:
zi = -1
zeta = 0
if not ((0 <= xsi <= 1) and (0 <= eta <= 1) and (0 <= zeta <= 1)):
raise FieldSamplingError(x, y, z, field=self)
return (xsi, eta, zeta, xi, yi, zi)
def search_indices_curvilinear(self, x, y, z, xi, yi, ti=-1, time=-1, search2D=False):
xsi = eta = -1
grid = self.grid
invA = np.array([[1, 0, 0, 0],
[-1, 1, 0, 0],
[-1, 0, 0, 1],
[1, -1, 1, -1]])
maxIterSearch = 1e6
it = 0
if not grid.zonal_periodic:
if x < grid.lonlat_minmax[0] or x > grid.lonlat_minmax[1]:
if grid.lon[0, 0] < grid.lon[0, -1]:
raise FieldOutOfBoundError(x, y, z, field=self)
elif x < grid.lon[0, 0] and x > grid.lon[0, -1]: # This prevents from crashing in [160, -160]
raise FieldOutOfBoundError(x, y, z, field=self)
if y < grid.lonlat_minmax[2] or y > grid.lonlat_minmax[3]:
raise FieldOutOfBoundError(x, y, z, field=self)
while xsi < 0 or xsi > 1 or eta < 0 or eta > 1:
px = np.array([grid.lon[yi, xi], grid.lon[yi, xi+1], grid.lon[yi+1, xi+1], grid.lon[yi+1, xi]])
if grid.mesh == 'spherical':
px[0] = px[0]+360 if px[0] < x-225 else px[0]
px[0] = px[0]-360 if px[0] > x+225 else px[0]
px[1:] = np.where(px[1:] - px[0] > 180, px[1:]-360, px[1:])
px[1:] = np.where(-px[1:] + px[0] > 180, px[1:]+360, px[1:])
py = np.array([grid.lat[yi, xi], grid.lat[yi, xi+1], grid.lat[yi+1, xi+1], grid.lat[yi+1, xi]])
a = np.dot(invA, px)
b = np.dot(invA, py)
aa = a[3]*b[2] - a[2]*b[3]
bb = a[3]*b[0] - a[0]*b[3] + a[1]*b[2] - a[2]*b[1] + x*b[3] - y*a[3]
cc = a[1]*b[0] - a[0]*b[1] + x*b[1] - y*a[1]
if abs(aa) < 1e-12: # Rectilinear cell, or quasi
eta = -cc / bb
else:
det2 = bb*bb-4*aa*cc
if det2 > 0: # so, if det is nan we keep the xsi, eta from previous iter
det = np.sqrt(det2)
eta = (-bb+det)/(2*aa)
if abs(a[1]+a[3]*eta) < 1e-12: # this happens when recti cell rotated of 90deg
xsi = ((y-py[0])/(py[1]-py[0]) + (y-py[3])/(py[2]-py[3])) * .5
else:
xsi = (x-a[0]-a[2]*eta) / (a[1]+a[3]*eta)
if xsi < 0 and eta < 0 and xi == 0 and yi == 0:
raise FieldOutOfBoundError(x, y, 0, field=self)
if xsi > 1 and eta > 1 and xi == grid.xdim-1 and yi == grid.ydim-1:
raise FieldOutOfBoundError(x, y, 0, field=self)
if xsi < 0:
xi -= 1
elif xsi > 1:
xi += 1
if eta < 0:
yi -= 1
elif eta > 1:
yi += 1
(xi, yi) = self.reconnect_bnd_indices(xi, yi, grid.xdim, grid.ydim, grid.mesh)
it += 1
if it > maxIterSearch:
print('Correct cell not found after %d iterations' % maxIterSearch)
raise FieldOutOfBoundError(x, y, 0, field=self)
if grid.zdim > 1 and not search2D:
if grid.gtype == GridCode.CurvilinearZGrid:
try:
(zi, zeta) = self.search_indices_vertical_z(z)
except FieldOutOfBoundError:
raise FieldOutOfBoundError(x, y, z, field=self)
elif grid.gtype == GridCode.CurvilinearSGrid:
(zi, zeta) = self.search_indices_vertical_s(x, y, z, xi, yi, xsi, eta, ti, time)
else:
zi = -1
zeta = 0
if not ((0 <= xsi <= 1) and (0 <= eta <= 1) and (0 <= zeta <= 1)):
raise FieldSamplingError(x, y, z, field=self)
return (xsi, eta, zeta, xi, yi, zi)
def search_indices(self, x, y, z, xi, yi, ti=-1, time=-1, search2D=False):
if self.grid.gtype in [GridCode.RectilinearSGrid, GridCode.RectilinearZGrid]:
return self.search_indices_rectilinear(x, y, z, ti, time, search2D=search2D)
else:
return self.search_indices_curvilinear(x, y, z, xi, yi, ti, time, search2D=search2D)
def interpolator2D(self, ti, z, y, x):
xi = 0
yi = 0
(xsi, eta, _, xi, yi, _) = self.search_indices(x, y, z, xi, yi)
if self.interp_method == 'nearest':
xii = xi if xsi <= .5 else xi+1
yii = yi if eta <= .5 else yi+1
return self.data[ti, yii, xii]
elif self.interp_method == 'linear':
val = (1-xsi)*(1-eta) * self.data[ti, yi, xi] + \
xsi*(1-eta) * self.data[ti, yi, xi+1] + \
xsi*eta * self.data[ti, yi+1, xi+1] + \
(1-xsi)*eta * self.data[ti, yi+1, xi]
return val
elif self.interp_method in ['cgrid_tracer', 'bgrid_tracer']:
return self.data[ti, yi+1, xi+1]
elif self.interp_method == 'cgrid_velocity':
raise RuntimeError("%s is a scalar field. cgrid_velocity interpolation method should be used for vector fields (e.g. FieldSet.UV)" % self.name)
else:
raise RuntimeError(self.interp_method+" is not implemented for 2D grids")
def interpolator3D(self, ti, z, y, x, time):
xi = int(self.grid.xdim / 2) - 1
yi = int(self.grid.ydim / 2) - 1
(xsi, eta, zeta, xi, yi, zi) = self.search_indices(x, y, z, xi, yi, ti, time)
if self.interp_method == 'nearest':
xii = xi if xsi <= .5 else xi+1
yii = yi if eta <= .5 else yi+1
zii = zi if zeta <= .5 else zi+1
return self.data[ti, zii, yii, xii]
elif self.interp_method == 'cgrid_velocity':
# evaluating W velocity in c_grid
f0 = self.data[ti, zi, yi+1, xi+1]
f1 = self.data[ti, zi+1, yi+1, xi+1]
return (1-zeta) * f0 + zeta * f1
elif self.interp_method in ['linear', 'bgrid_velocity', 'bgrid_w_velocity']:
if self.interp_method == 'bgrid_velocity':
zeta = 0.
elif self.interp_method == 'bgrid_w_velocity':
eta = 1.
xsi = 1.
data = self.data[ti, zi, :, :]
f0 = (1-xsi)*(1-eta) * data[yi, xi] + \
xsi*(1-eta) * data[yi, xi+1] + \
xsi*eta * data[yi+1, xi+1] + \
(1-xsi)*eta * data[yi+1, xi]
data = self.data[ti, zi+1, :, :]
f1 = (1-xsi)*(1-eta) * data[yi, xi] + \
xsi*(1-eta) * data[yi, xi+1] + \
xsi*eta * data[yi+1, xi+1] + \
(1-xsi)*eta * data[yi+1, xi]
return (1-zeta) * f0 + zeta * f1
elif self.interp_method in ['cgrid_tracer', 'bgrid_tracer']:
return self.data[ti, zi, yi+1, xi+1]
else:
raise RuntimeError(self.interp_method+" is not implemented for 3D grids")
def temporal_interpolate_fullfield(self, ti, time):
"""Calculate the data of a field between two snapshots,
using linear interpolation
:param ti: Index in time array associated with time (via :func:`time_index`)
:param time: Time to interpolate to
:rtype: Linearly interpolated field"""
t0 = self.grid.time[ti]
if time == t0:
return self.data[ti, :]
elif ti+1 >= len(self.grid.time):
raise TimeExtrapolationError(time, field=self, msg='show_time')
else:
t1 = self.grid.time[ti+1]
f0 = self.data[ti, :]
f1 = self.data[ti+1, :]
return f0 + (f1 - f0) * ((time - t0) / (t1 - t0))
def spatial_interpolation(self, ti, z, y, x, time):
"""Interpolate horizontal field values using a SciPy interpolator"""
if self.grid.zdim == 1:
val = self.interpolator2D(ti, z, y, x)
else:
val = self.interpolator3D(ti, z, y, x, time)
if np.isnan(val):
# Detect Out-of-bounds sampling and raise exception
raise FieldOutOfBoundError(x, y, z, field=self)
else:
if isinstance(val, da.core.Array):
val = val.compute()
return val
def time_index(self, time):
"""Find the index in the time array associated with a given time
Note that we normalize to either the first or the last index
if the sampled value is outside the time value range.
"""
if not self.time_periodic and not self.allow_time_extrapolation and (time < self.grid.time[0] or time > self.grid.time[-1]):
raise TimeExtrapolationError(time, field=self)
time_index = self.grid.time <= time
if self.time_periodic:
if time_index.all() or np.logical_not(time_index).all():
periods = int(math.floor((time-self.grid.time_full[0])/(self.grid.time_full[-1]-self.grid.time_full[0])))
if isinstance(self.grid.periods, c_int):
self.grid.periods.value = periods
else:
self.grid.periods = periods
time -= periods*(self.grid.time_full[-1]-self.grid.time_full[0])
time_index = self.grid.time <= time
ti = time_index.argmin() - 1 if time_index.any() else 0
return (ti, periods)
return (time_index.argmin() - 1 if time_index.any() else 0, 0)
if time_index.all():
# If given time > last known field time, use
# the last field frame without interpolation
return (len(self.grid.time) - 1, 0)
else:
return (time_index.argmin() - 1 if time_index.any() else 0, 0)
def depth_index(self, depth, lat, lon):
"""Find the index in the depth array associated with a given depth"""
if depth > self.grid.depth[-1]:
raise FieldOutOfBoundError(lon, lat, depth, field=self)
depth_index = self.grid.depth <= depth
if depth_index.all():
# If given depth == largest field depth, use the second-last
# field depth (as zidx+1 needed in interpolation)
return len(self.grid.depth) - 2
else:
return depth_index.argmin() - 1 if depth_index.any() else 0
def eval(self, time, z, y, x, applyConversion=True):
"""Interpolate field values in space and time.
We interpolate linearly in time and apply implicit unit
conversion to the result. Note that we defer to
scipy.interpolate to perform spatial interpolation.
"""
(ti, periods) = self.time_index(time)
time -= periods*(self.grid.time_full[-1]-self.grid.time_full[0])
if ti < self.grid.tdim-1 and time > self.grid.time[ti]:
f0 = self.spatial_interpolation(ti, z, y, x, time)
f1 = self.spatial_interpolation(ti + 1, z, y, x, time)
t0 = self.grid.time[ti]
t1 = self.grid.time[ti + 1]
value = f0 + (f1 - f0) * ((time - t0) / (t1 - t0))
else:
# Skip temporal interpolation if time is outside
# of the defined time range or if we have hit an
# excat value in the time array.
value = self.spatial_interpolation(ti, z, y, x, self.grid.time[ti])
if applyConversion:
return self.units.to_target(value, x, y, z)
else:
return value
def ccode_eval(self, var, t, z, y, x):
# Casting interp_methd to int as easier to pass on in C-code
return "temporal_interpolation(%s, %s, %s, %s, %s, particle->cxi, particle->cyi, particle->czi, particle->cti, &%s, %s)" \
% (x, y, z, t, self.ccode_name, var, self.interp_method.upper())
def ccode_convert(self, _, z, y, x):
return self.units.ccode_to_target(x, y, z)
def get_block_id(self, block):
return np.ravel_multi_index(block, self.nchunks)
def get_block(self, bid):
return np.unravel_index(bid, self.nchunks[1:])
def chunk_setup(self):
if isinstance(self.data, da.core.Array):
chunks = self.data.chunks
self.nchunks = self.data.numblocks
npartitions = 1
for n in self.nchunks[1:]:
npartitions *= n
else:
chunks = tuple((t,) for t in self.data.shape)
self.nchunks = (1,) * len(self.data.shape)
npartitions = 1
self.data_chunks = [None] * npartitions
self.c_data_chunks = [None] * npartitions
self.grid.load_chunk = np.zeros(npartitions, dtype=c_int)
# self.grid.chunk_info format: number of dimensions (without tdim); number of chunks per dimensions;
# chunksizes (the 0th dim sizes for all chunk of dim[0], then so on for next dims
self.grid.chunk_info = [[len(self.nchunks)-1], list(self.nchunks[1:]), sum(list(list(ci) for ci in chunks[1:]), [])]
self.grid.chunk_info = sum(self.grid.chunk_info, [])
self.chunk_set = True
def chunk_data(self):
if not self.chunk_set:
self.chunk_setup()
# self.grid.load_chunk code:
# 0: not loaded
# 1: was asked to load by kernel in JIT
# 2: is loaded and was touched last C call
# 3: is loaded
if isinstance(self.data, da.core.Array):
for block_id in range(len(self.grid.load_chunk)):
if self.grid.load_chunk[block_id] == 1 or self.grid.load_chunk[block_id] > 1 and self.data_chunks[block_id] is None:
block = self.get_block(block_id)
self.data_chunks[block_id] = np.array(self.data.blocks[(slice(self.grid.tdim),)+block])
self.grid.load_chunk[block_id] = 2
elif self.grid.load_chunk[block_id] == 0:
self.data_chunks[block_id] = None
self.c_data_chunks[block_id] = None
else:
self.grid.load_chunk[0] = 2
self.data_chunks[0] = self.data
@property
def ctypes_struct(self):
"""Returns a ctypes struct object containing all relevant
pointers and sizes for this field."""
# Ctypes struct corresponding to the type definition in parcels.h
class CField(Structure):
_fields_ = [('xdim', c_int), ('ydim', c_int), ('zdim', c_int),
('tdim', c_int), ('igrid', c_int),
('allow_time_extrapolation', c_int),
('time_periodic', c_int),
('data_chunks', POINTER(POINTER(POINTER(c_float)))),
('grid', POINTER(CGrid))]
# Create and populate the c-struct object
allow_time_extrapolation = 1 if self.allow_time_extrapolation else 0
time_periodic = 1 if self.time_periodic else 0
for i in range(len(self.grid.load_chunk)):
if self.grid.load_chunk[i] == 1:
raise ValueError('data_chunks should have been loaded by now if requested. grid.load_chunk[bid] cannot be 1')
if self.grid.load_chunk[i] > 1:
if not self.data_chunks[i].flags.c_contiguous:
self.data_chunks[i] = self.data_chunks[i].copy()
self.c_data_chunks[i] = self.data_chunks[i].ctypes.data_as(POINTER(POINTER(c_float)))
cstruct = CField(self.grid.xdim, self.grid.ydim, self.grid.zdim,
self.grid.tdim, self.igrid, allow_time_extrapolation, time_periodic,
(POINTER(POINTER(c_float)) * len(self.c_data_chunks))(*self.c_data_chunks),
pointer(self.grid.ctypes_struct))
return cstruct
def show(self, animation=False, show_time=None, domain=None, depth_level=0, projection=None, land=True,
vmin=None, vmax=None, savefile=None, **kwargs):
"""Method to 'show' a Parcels Field
:param animation: Boolean whether result is a single plot, or an animation
:param show_time: Time at which to show the Field (only in single-plot mode)
:param domain: dictionary (with keys 'N', 'S', 'E', 'W') defining domain to show
:param depth_level: depth level to be plotted (default 0)
:param projection: type of cartopy projection to use (default PlateCarree)
:param land: Boolean whether to show land. This is ignored for flat meshes
:param vmin: minimum colour scale (only in single-plot mode)
:param vmax: maximum colour scale (only in single-plot mode)
:param savefile: Name of a file to save the plot to
"""
from parcels.plotting import plotfield
plt, _, _, _ = plotfield(self, animation=animation, show_time=show_time, domain=domain, depth_level=depth_level,
projection=projection, land=land, vmin=vmin, vmax=vmax, savefile=savefile, **kwargs)
if plt:
plt.show()
def add_periodic_halo(self, zonal, meridional, halosize=5, data=None):
"""Add a 'halo' to all Fields in a FieldSet, through extending the Field (and lon/lat)
by copying a small portion of the field on one side of the domain to the other.
Before adding a periodic halo to the Field, it has to be added to the Grid on which the Field depends
:param zonal: Create a halo in zonal direction (boolean)
:param meridional: Create a halo in meridional direction (boolean)
:param halosize: size of the halo (in grid points). Default is 5 grid points
:param data: if data is not None, the periodic halo will be achieved on data instead of self.data and data will be returned
"""
dataNone = not isinstance(data, (np.ndarray, da.core.Array))
if self.grid.defer_load and dataNone:
return
data = self.data if dataNone else data
lib = np if isinstance(data, np.ndarray) else da
if zonal:
if len(data.shape) == 3:
data = lib.concatenate((data[:, :, -halosize:], data,
data[:, :, 0:halosize]), axis=len(data.shape)-1)
assert data.shape[2] == self.grid.xdim, "Third dim must be x."
else:
data = lib.concatenate((data[:, :, :, -halosize:], data,
data[:, :, :, 0:halosize]), axis=len(data.shape) - 1)
assert data.shape[3] == self.grid.xdim, "Fourth dim must be x."
self.lon = self.grid.lon
self.lat = self.grid.lat
if meridional:
if len(data.shape) == 3:
data = lib.concatenate((data[:, -halosize:, :], data,
data[:, 0:halosize, :]), axis=len(data.shape)-2)
assert data.shape[1] == self.grid.ydim, "Second dim must be y."
else:
data = lib.concatenate((data[:, :, -halosize:, :], data,
data[:, :, 0:halosize, :]), axis=len(data.shape) - 2)
assert data.shape[2] == self.grid.ydim, "Third dim must be y."
self.lat = self.grid.lat
if dataNone:
self.data = data
else:
return data
def write(self, filename, varname=None):
"""Write a :class:`Field` to a netcdf file
:param filename: Basename of the file
:param varname: Name of the field, to be appended to the filename"""
filepath = str(path.local('%s%s.nc' % (filename, self.name)))
if varname is None:
varname = self.name
# Derive name of 'depth' variable for NEMO convention
vname_depth = 'depth%s' % self.name.lower()
# Create DataArray objects for file I/O
if self.grid.gtype == GridCode.RectilinearZGrid:
nav_lon = xr.DataArray(self.grid.lon + np.zeros((self.grid.ydim, self.grid.xdim), dtype=np.float32),
coords=[('y', self.grid.lat), ('x', self.grid.lon)])
nav_lat = xr.DataArray(self.grid.lat.reshape(self.grid.ydim, 1) + np.zeros(self.grid.xdim, dtype=np.float32),
coords=[('y', self.grid.lat), ('x', self.grid.lon)])
elif self.grid.gtype == GridCode.CurvilinearZGrid:
nav_lon = xr.DataArray(self.grid.lon, coords=[('y', range(self.grid.ydim)),
('x', range(self.grid.xdim))])
nav_lat = xr.DataArray(self.grid.lat, coords=[('y', range(self.grid.ydim)),
('x', range(self.grid.xdim))])
else:
raise NotImplementedError('Field.write only implemented for RectilinearZGrid and CurvilinearZGrid')
attrs = {'units': 'seconds since ' + str(self.grid.time_origin)} if self.grid.time_origin.calendar else {}
time_counter = xr.DataArray(self.grid.time,
dims=['time_counter'],
attrs=attrs)
vardata = xr.DataArray(self.data.reshape((self.grid.tdim, self.grid.zdim, self.grid.ydim, self.grid.xdim)),
dims=['time_counter', vname_depth, 'y', 'x'])
# Create xarray Dataset and output to netCDF format
attrs = {'parcels_mesh': self.grid.mesh}
dset = xr.Dataset({varname: vardata}, coords={'nav_lon': nav_lon,
'nav_lat': nav_lat,
'time_counter': time_counter,
vname_depth: self.grid.depth}, attrs=attrs)
dset.to_netcdf(filepath)
def rescale_and_set_minmax(self, data):
if self._scaling_factor:
data *= self._scaling_factor
data[np.isnan(data)] = 0
if self.vmin is not None:
data[data < self.vmin] = 0
if self.vmax is not None:
data[data > self.vmax] = 0
return data
def data_concatenate(self, data, data_to_concat, tindex):
lib = np if isinstance(data_to_concat, np.ndarray) else da
if tindex == 0:
data = lib.concatenate([data_to_concat, data[tindex+1:, :]], axis=0)
elif tindex == 1:
data = lib.concatenate([data[:tindex, :], data_to_concat, data[tindex+1:, :]], axis=0)
elif tindex == 2:
data = lib.concatenate([data[:tindex, :], data_to_concat], axis=0)
else:
raise ValueError("data_concatenate is used for computeTimeChunk, with tindex in [0, 1, 2]")
return data
def advancetime(self, field_new, advanceForward):
if advanceForward == 1: # forward in time, so appending at end
self.data = np.concatenate((self.data[1:, :, :], field_new.data[:, :, :]), 0)
self.time = self.grid.time
else: # backward in time, so prepending at start
self.data = np.concatenate((field_new.data[:, :, :], self.data[:-1, :, :]), 0)
self.time = self.grid.time
def computeTimeChunk(self, data, tindex):
g = self.grid
timestamp = None if self.timestamps is None else self.timestamps[tindex]
filebuffer = NetcdfFileBuffer(self.dataFiles[g.ti+tindex], self.dimensions, self.indices,
self.netcdf_engine, timestamp=timestamp,
interp_method=self.interp_method,
data_full_zdim=self.data_full_zdim,
field_chunksize=self.field_chunksize)
filebuffer.__enter__()
time_data = filebuffer.time
time_data = g.time_origin.reltime(time_data)
filebuffer.ti = (time_data <= g.time[tindex]).argmin() - 1
if self.netcdf_engine != 'xarray':
filebuffer.name = filebuffer.parse_name(self.filebuffername)
buffer_data = filebuffer.data
lib = np if isinstance(buffer_data, np.ndarray) else da
if len(buffer_data.shape) == 2:
buffer_data = lib.reshape(buffer_data, sum(((1, 1), buffer_data.shape), ()))
elif len(buffer_data.shape) == 3 and g.zdim > 1:
buffer_data = lib.reshape(buffer_data, sum(((1, ), buffer_data.shape), ()))
elif len(buffer_data.shape) == 3:
buffer_data = lib.reshape(buffer_data, sum(((buffer_data.shape[0], 1, ), buffer_data.shape[1:]), ()))
data = self.data_concatenate(data, buffer_data, tindex)
self.filebuffers[tindex] = filebuffer
return data
def __add__(self, field):
if isinstance(self, Field) and isinstance(field, Field):
return SummedField('_SummedField', [self, field])
elif isinstance(field, SummedField):
assert isinstance(self, type(field[0])), 'Fields in a SummedField should be either all scalars or all vectors'
field.insert(0, self)
return field
class VectorField(object):
"""Class VectorField stores 2 or 3 fields which defines together a vector field.
This enables to interpolate them as one single vector field in the kernels.
:param name: Name of the vector field
:param U: field defining the zonal component
:param V: field defining the meridional component
:param W: field defining the vertical component (default: None)
"""
def __init__(self, name, U, V, W=None):
self.name = name
self.U = U
self.V = V
self.W = W
self.vector_type = '3D' if W else '2D'
if self.U.interp_method == 'cgrid_velocity':
assert self.V.interp_method == 'cgrid_velocity', (
'Interpolation methods of U and V are not the same.')
assert self.U.grid is self.V.grid, (
'Grids of U and V are not the same.')
if self.vector_type == '3D':
assert self.W.interp_method == 'cgrid_velocity', (
'Interpolation methods of U and W are not the same.')
assert self.W.grid is self.U.grid, (
'Grids of U and W are not the same.')
def dist(self, lon1, lon2, lat1, lat2, mesh, lat):
if mesh == 'spherical':
rad = np.pi/180.
deg2m = 1852 * 60.
return np.sqrt(((lon2-lon1)*deg2m*math.cos(rad * lat))**2 + ((lat2-lat1)*deg2m)**2)
else:
return np.sqrt((lon2-lon1)**2 + (lat2-lat1)**2)
def jacobian(self, xsi, eta, px, py):
dphidxsi = [eta-1, 1-eta, eta, -eta]
dphideta = [xsi-1, -xsi, xsi, 1-xsi]
dxdxsi = np.dot(px, dphidxsi)
dxdeta = np.dot(px, dphideta)
dydxsi = np.dot(py, dphidxsi)
dydeta = np.dot(py, dphideta)
jac = dxdxsi*dydeta - dxdeta*dydxsi
return jac
def spatial_c_grid_interpolation2D(self, ti, z, y, x, time):
grid = self.U.grid
xi = int(grid.xdim / 2) - 1
yi = int(grid.ydim / 2) - 1
(xsi, eta, zeta, xi, yi, zi) = self.U.search_indices(x, y, z, xi, yi, ti, time)
if grid.gtype in [GridCode.RectilinearSGrid, GridCode.RectilinearZGrid]:
px = np.array([grid.lon[xi], grid.lon[xi+1], grid.lon[xi+1], grid.lon[xi]])
py = np.array([grid.lat[yi], grid.lat[yi], grid.lat[yi+1], grid.lat[yi+1]])
else:
px = np.array([grid.lon[yi, xi], grid.lon[yi, xi+1], grid.lon[yi+1, xi+1], grid.lon[yi+1, xi]])
py = np.array([grid.lat[yi, xi], grid.lat[yi, xi+1], grid.lat[yi+1, xi+1], grid.lat[yi+1, xi]])
if grid.mesh == 'spherical':
px[0] = px[0]+360 if px[0] < x-225 else px[0]
px[0] = px[0]-360 if px[0] > x+225 else px[0]
px[1:] =
|
np.where(px[1:] - px[0] > 180, px[1:]-360, px[1:])
|
numpy.where
|
"""
Coadd spectra
"""
from __future__ import absolute_import, division, print_function
import os, sys, time
import numpy as np
import scipy.sparse
import scipy.linalg
import scipy.sparse.linalg
from astropy.table import Column
# for debugging
import astropy.io.fits as pyfits
import multiprocessing
from desiutil.log import get_logger
from desispec.interpolation import resample_flux
from desispec.spectra import Spectra
from desispec.resolution import Resolution
from desispec.fiberbitmasking import get_all_fiberbitmask_with_amp, get_all_nonamp_fiberbitmask_val, get_justamps_fiberbitmask
def coadd_fibermap(fibermap) :
log = get_logger()
log.debug("'coadding' fibermap")
targets = np.unique(fibermap["TARGETID"])
ntarget = targets.size
jj=np.zeros(ntarget,dtype=int)
for i,tid in enumerate(targets) :
jj[i]=np.where(fibermap["TARGETID"]==tid)[0][0]
tfmap=fibermap[jj]
#- initialize NUMEXP=-1 to check that they all got filled later
tfmap['COADD_NUMEXP'] = np.zeros(len(tfmap), dtype=np.int16) - 1
# smarter values for some columns
for k in ['DELTA_X','DELTA_Y'] :
if k in fibermap.colnames :
tfmap.rename_column(k,'MEAN_'+k)
xx = Column(np.zeros(ntarget))
tfmap.add_column(xx,name='RMS_'+k)
for k in ['NIGHT','EXPID','TILEID','SPECTROID','FIBER'] :
if k in fibermap.colnames :
xx = Column(np.arange(ntarget))
tfmap.add_column(xx,name='FIRST_'+k)
xx = Column(np.arange(ntarget))
tfmap.add_column(xx,name='LAST_'+k)
xx = Column(np.arange(ntarget))
tfmap.add_column(xx,name='NUM_'+k)
for i,tid in enumerate(targets) :
jj = fibermap["TARGETID"]==tid
#- coadded FIBERSTATUS = bitwise AND of input FIBERSTATUS
tfmap['FIBERSTATUS'][i] = np.bitwise_and.reduce(fibermap['FIBERSTATUS'][jj])
#- Only FIBERSTATUS=0 were included in the coadd
fiberstatus_nonamp_bits = get_all_nonamp_fiberbitmask_val()
fiberstatus_amp_bits = get_justamps_fiberbitmask()
targ_fibstatuses = fibermap['FIBERSTATUS'][jj]
nonamp_fiberstatus_flagged = ( (targ_fibstatuses & fiberstatus_nonamp_bits) > 0 )
allamps_flagged = ( (targ_fibstatuses & fiberstatus_amp_bits) == fiberstatus_amp_bits )
good_coadds = np.bitwise_not( nonamp_fiberstatus_flagged | allamps_flagged )
tfmap['COADD_NUMEXP'][i] = np.count_nonzero(good_coadds)
for k in ['DELTA_X','DELTA_Y'] :
if k in fibermap.colnames :
vals=fibermap[k][jj]
tfmap['MEAN_'+k][i] = np.mean(vals)
tfmap['RMS_'+k][i] = np.sqrt(np.mean(vals**2)) # inc. mean offset, not same as std
for k in ['NIGHT','EXPID','TILEID','SPECTROID','FIBER'] :
if k in fibermap.colnames :
vals=fibermap[k][jj]
tfmap['FIRST_'+k][i] = np.min(vals)
tfmap['LAST_'+k][i] = np.max(vals)
tfmap['NUM_'+k][i] = np.unique(vals).size
for k in ['FIBERASSIGN_X', 'FIBERASSIGN_Y','FIBER_RA', 'FIBER_DEC'] :
if k in fibermap.colnames :
tfmap[k][i]=np.mean(fibermap[k][jj])
for k in ['FIBER_RA_IVAR', 'FIBER_DEC_IVAR','DELTA_X_IVAR', 'DELTA_Y_IVAR'] :
if k in fibermap.colnames :
tfmap[k][i]=np.sum(fibermap[k][jj])
return tfmap
def coadd(spectra, cosmics_nsig=0.) :
"""
Coaddition the spectra for each target and each camera. The input spectra is modified.
Args:
spectra: desispec.spectra.Spectra object
Options:
cosmics_nsig: float, nsigma clipping threshold for cosmics rays
"""
log = get_logger()
targets = np.unique(spectra.fibermap["TARGETID"])
ntarget=targets.size
log.debug("number of targets= {}".format(ntarget))
for b in spectra.bands :
log.debug("coadding band '{}'".format(b))
nwave=spectra.wave[b].size
tflux=
|
np.zeros((ntarget,nwave),dtype=spectra.flux[b].dtype)
|
numpy.zeros
|
#!/usr/bin/env python
# ==========================================================
# Author: <NAME> - <EMAIL>
# ==========================================================
import numpy as np
import os
import shutil
from subprocess import Popen, PIPE
from util_func import randomstring
from util_lenstool import BayesLens_writer
def prior_creator(vector, priors_lowbounds, priors_highbounds):
"""
Generates flat priors between *priors_lowbounds and *priors_highbounds for parameters in *vector
:param vector: array containing parameters optimized within flat priors
:param priors_lowbounds: array containing lower bound of flat priors
:param priors_highbounds: array containing higher bound of flat priors
:return: selection. selection = True if all *vector entries are within their flat prior. Otherwise selection = False
"""
selection = True
for i, entry in enumerate(vector):
if entry > priors_lowbounds[i] and entry < priors_highbounds[i]:
selection = selection * True
else:
selection = selection * False
return selection
def ln_gauss(vector, mu, std):
"""
Generates Gaussian priors with mean *mu and standard deviation *std for parameters in *vector
:param vector: array containing parameters with Gaussian priors
:param mu: array containing mean of Gaussian priors
:param std: array containing standard deviation of Gaussian priors
:return: array with log values of Gaussian priors
"""
lg = -0.5 * (np.sum(np.log(2 * np.pi * std ** 2) + ((vector - mu) ** 2) / std ** 2))
return lg
def priors_scaling_relations(theta, priors_bounds):
"""
Flat priors on scaling relation parameters (see eq.9 Bergamini et al. 2020)
:param theta: array containing all parameters optimized by BayesLens
:param priors_bounds: see *BayesLens_parser
:return: -inf if scaling relation parameters are outside their flat priors. Otherwise -ln(scatter). scatter = scatter of galaxies around fitted scaling relation
"""
selection_vd = prior_creator(theta[:4], priors_bounds[:4, 0], priors_bounds[:4, 1])
if selection_vd:
lnprior_vdgalaxies = -np.log(theta[2])
else:
lnprior_vdgalaxies = -np.inf
del selection_vd
return lnprior_vdgalaxies
def lnlike_vdgalaxies(theta, priors_bounds, translation_vector):
"""
Likelihood on sigma-mag scaling relation parameters using measured velocity dispersions (see eq.10 Bergamini et al. 2020)
:param theta: array containing all parameters optimized by BayesLens
:param priors_bounds: see *BayesLens_parser
:param translation_vector: see *BayesLens_parser
:return: summed log likelihood for measured galaxies
"""
vdslope, vdq, vdscatter = np.asarray(theta[0:3], dtype='float')
mask_vdgalaxies = (np.asarray(translation_vector[:, 0], dtype=float) >= 2) & (
np.asarray(translation_vector[:, 0], dtype=float) < 3) & (translation_vector[:, 1] == 'v_disp')
mag = np.asarray(priors_bounds[:, 2][mask_vdgalaxies], dtype='float')
sigma = np.asarray(priors_bounds[:, 0][mask_vdgalaxies], dtype='float')
dsigma = np.asarray(priors_bounds[:, 1][mask_vdgalaxies], dtype='float')
mag_ref = float(priors_bounds[1, 2])
model = vdq * 10 ** ((vdslope / 2.5) * (mag_ref - mag))
inc2 = (dsigma ** 2 + vdscatter ** 2)
lnlike_vdgalaxies = -0.5 * (np.sum(np.log(2 * np.pi * inc2) + ((sigma - model) ** 2) / inc2))
del vdslope, vdq, vdscatter, mask_vdgalaxies, mag, sigma, dsigma, mag_ref, model, inc2
return lnlike_vdgalaxies
def prior_vdgalaxies(theta, priors_bounds, translation_vector):
"""
Gaussian priors on measured galaxies velocity dispersions. Priors are centered on measured values and have a std = 5 * dsigma (see eq.12 Bergamini et al. 2020)
:param theta: array containing all parameters optimized by BayesLens
:param priors_bounds: see *BayesLens_parser
:param translation_vector: see *BayesLens_parser
:return: summed log value of gaussian priors
"""
mask_vdgalaxies = (np.asarray(translation_vector[:, 0], dtype=float) >= 2) & (
np.asarray(translation_vector[:, 0], dtype=float) < 3) & (translation_vector[:, 1] == 'v_disp')
sigma_mea = np.asarray(theta[mask_vdgalaxies], dtype='float')
sigma = np.asarray(priors_bounds[:, 0][mask_vdgalaxies], dtype='float')
dsigma = np.asarray(priors_bounds[:, 1][mask_vdgalaxies], dtype='float')
ln_prior_vdgalaxies = ln_gauss(sigma_mea, sigma, 5 * dsigma)
del sigma_mea, sigma, dsigma, mask_vdgalaxies
return ln_prior_vdgalaxies
def priors_galaxies(theta, priors_bounds, translation_vector):
"""
Large flat priors, around the proposed sigma-mag scaling relation, for galaxies without a measure velocity dispersion. IT IS NOT USE IN THE CURRENT VERSION OF BayesLens
:param theta: array containing all parameters optimized by BayesLens
:param priors_bounds: see *BayesLens_parser
:param translation_vector: see *BayesLens_parser
:return: -inf if scaling relation parameters are outside their flat priors. Otherwise 0.
"""
mask_galaxies_vd = (np.asarray(translation_vector[:, 0], dtype=float) >= 3) & (translation_vector[:, 1] == 'v_disp')
sigmas = np.asarray(theta[mask_galaxies_vd], dtype='float')
down_prior = np.asarray(priors_bounds[:, 0][mask_galaxies_vd], dtype='float')
up_prior = np.asarray(priors_bounds[:, 1][mask_galaxies_vd], dtype='float')
selection_gal = prior_creator(sigmas, down_prior, up_prior)
if selection_gal:
lnprior_gal = 0.
else:
lnprior_gal = -np.inf
del mask_galaxies_vd, sigmas, selection_gal, down_prior, up_prior
return lnprior_gal
def lnlike_galaxies(theta, priors_bounds, translation_vector):
"""
Gaussian priors on unmeasured galaxies velocity dispersions. Priors are centered on the proposed sigma-mag scaling relation and have std = scatter (see eq.13 Bergamini et al. 2020)
:param theta: array containing all parameters optimized by BayesLens
:param priors_bounds: see *BayesLens_parser
:param translation_vector: see *BayesLens_parser
:return: summed log value of gaussian priors
"""
vdslope, vdq, vdscatter = theta[0:3]
mask_galaxies_vd = (np.asarray(translation_vector[:, 0], dtype=float) >= 3) & (translation_vector[:, 1] == 'v_disp')
sigmas = np.asarray(theta[mask_galaxies_vd], dtype='float')
mags = np.asarray(priors_bounds[:, 2][mask_galaxies_vd], dtype='float')
mag_ref_vd = float(priors_bounds[1, 2])
vdmodel = vdq * (10.0 ** (0.4 * vdslope * (mag_ref_vd - mags)))
vdinc2s = (vdscatter ** 2)
lnlike_vdscaling = -0.5 * (np.sum(np.log(2 * np.pi * vdinc2s) + ((sigmas - vdmodel) ** 2) / vdinc2s))
del vdslope, vdq, vdscatter, mask_galaxies_vd, sigmas, mags, mag_ref_vd
return lnlike_vdscaling
def priors_halos(theta, priors_bounds, translation_vector):
"""
Flat priors on DM halo parameters (see eq.14 Bergamini et al. 2020)
:param theta: array containing all parameters optimized by BayesLens
:param priors_bounds: see *BayesLens_parser
:param translation_vector: see *BayesLens_parser
:return: -inf if DM halo parameters are outside their flat priors. Otherwise 0
"""
mask_halos = ((np.asarray(translation_vector[:, 0], dtype=float) >= 1) & (
np.asarray(translation_vector[:, 0], dtype=float) < 2)) | (
|
np.asarray(translation_vector[:, 0], dtype=float)
|
numpy.asarray
|
import sys
sys.path.append("../src")
import os
from os import listdir
import shutil
import numpy as np
from training import load_model
from sindy_utils import sindy_simulate, sindy_library_names
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from lorenz import Lorenz
from waterlorenz import LorenzWW
from predprey import PredPrey
from rossler import Rossler
import pandas as pd
import tensorflow as tf
import matplotlib.pyplot as plt
from IPython.display import display
import pdb
pd.options.display.float_format = '{:,.3f}'.format
def pickle2dict(params):
params2 = {key: val[0] for key, val in params.to_dict().items()}
list_to_int = ['input_dim', 'latent_dim', 'poly_order', 'n_ics', 'include_sine', 'exact_features']
listwrap_to_list = ['normalization', 'system_coefficients', 'widths', 'widths_ratios']
for key in list_to_int:
if key in params2.keys():
params2[key] = int(params2[key])
for key in listwrap_to_list:
if key in params2.keys():
params2[key] = list(params2[key])
return params2
def get_checkpoint_names(cpath):
all_files = os.listdir(cpath)
all_files = set([n.split('.')[0] for n in all_files])
if 'checkpoint' in all_files:
all_files.remove('checkpoint')
all_files = list(all_files)
all_files.sort()
print('number of checkpoints = ', len(all_files))
return all_files
def get_names(cases, path):
directory = listdir(path)
name_list = []
for name in directory:
for case in cases:
if '.' not in name and case in name:
name_list.append(name)
name_list = list(set(name_list))
sortidx = np.argsort(np.array([int(s.split('_')[1]) for s in name_list]))[::-1]
name_list = [name_list[i] for i in sortidx]
return name_list
def get_display_params(params, display_params=None):
filt_params = dict()
if display_params is not None:
for key in display_params:
if key in params.keys():
filt_params[key] = params[key]
print(key, ' : ', params[key])
else:
for key in params.keys():
filt_params[key] = params[key]
print(key, ' : ', params[key])
return filt_params
def read_results(name_list, path, end_time=30, threshold=1e-2, t0_frac=0.0, end_time_plot=30, display_params=None, query_remove=False):
## TODO: replace by global variable DATAPATH
varname = ['x', 'y', 'z', '1', '2']
path = '../data/'
known_attractor = True
non_existing_files = []
remove_files = []
for name in name_list:
print('name: ', name)
model, params, result = load_model(name, path)
if model is None or params is None:
non_existing_files.append(name)
continue
params = {key: val[0] for key, val in params.to_dict().items()}
end_time_idx = int(end_time_plot/params['dt'])
option = params['option']
# Backward compatibility
if 'lorenz_coefficients' in params.keys():
coefficients = np.array(params['lorenz_coefficients'])
elif 'system_coefficients' in params.keys():
coefficients = np.array(params['system_coefficients'])
if 'model' not in params.keys():
params['model'] = 'lorenz'
if 'model' in params.keys():
if params['model'] == 'lorenz':
PhysicalModel = Lorenz
elif params['model'] == 'predprey':
PhysicalModel = PredPrey
elif params['model'] == 'rossler':
PhysicalModel = Rossler
elif params['model'] == 'lorenzww':
PhysicalModel = LorenzWW
else:
raise Exception('model doesn"t exist')
if params['model'] in ['lorenzww']:
known_attractor = False
noise = params['noise']
input_dim = int(params['input_dim'])
latent_dim = int(params['latent_dim'])
poly_order = int(params['poly_order'])
IC_num = int(params['n_ics'])
dt = params['dt']
poly_order = int(params['poly_order'])
include_sine = bool(params['include_sine'])
exact_features=False
if 'exact_features' in params.keys():
exact_features = bool(params['exact_features'])
coef_names = sindy_library_names(latent_dim, poly_order, include_sine=False, exact_features=exact_features)
coef_names_full = sindy_library_names(latent_dim, poly_order, include_sine=False, exact_features=False)
L = PhysicalModel(option=option, coefficients=coefficients, noise=noise, input_dim=input_dim, poly_order=poly_order)
if params['model'] == 'lorenzww':
L.filename='/home/joebakarji/delay-auto/main/examples/data/lorenzww.json'
data = L.get_solution()
else:
data = L.get_solution(1, end_time, dt)
if params['svd_dim'] is not None:
print('Running SVD decomposition...')
reduced_dim = int( params['svd_dim'] )
U, s, VT =
|
np.linalg.svd(data.x.T, full_matrices=False)
|
numpy.linalg.svd
|
# Copyright (c) 2015-2019 by the parties listed in the AUTHORS file.
# All rights reserved. Use of this source code is governed by
# a BSD-style license that can be found in the LICENSE file.
import warnings
import numpy as np
from scipy.constants import arcmin
import healpy as hp
from ..mpi import use_mpi, MPIShared
from ..timing import function_timer
from .._libtoast import fast_scanning_float32
DTYPE = np.float32
@function_timer
def plug_holes(m, verbose=False, in_place=True, nest=False):
"""Use simple downgrading to derive estimates of the missing pixel values
"""
nbad_start = np.sum(np.isclose(m, hp.UNSEEN))
if nbad_start == m.size:
if verbose:
print("plug_holes: All map pixels are empty. Cannot plug holes", flush=True)
return
if nbad_start == 0:
return
nside = hp.get_nside(m)
npix = m.size
if nest:
mnest = m.copy()
else:
mnest = hp.reorder(m, r2n=True)
lowres = mnest
nside_lowres = nside
bad = np.isclose(mnest, hp.UNSEEN)
while np.any(bad) and nside_lowres > 1:
nside_lowres //= 2
lowres = hp.ud_grade(lowres, nside_lowres, order_in="NESTED")
hires = hp.ud_grade(lowres, nside, order_in="NESTED")
bad = np.isclose(mnest, hp.UNSEEN)
mnest[bad] = hires[bad]
nbad_end = np.sum(bad)
if nbad_end != 0:
mn = np.mean(mnest[np.logical_not(bad)])
mnest[bad] = mn
if not in_place:
m = m.copy()
if nest:
m[:] = mnest
else:
m[:] = hp.reorder(mnest, n2r=True)
if verbose and nbad_start != 0:
print(
"plug_holes: Filled {} missing pixels ({:.2f}%), lowest "
"resolution was Nside={}.".format(
nbad_start, (100.0 * nbad_start) // npix, nside_lowres
)
)
return m
class MapSampler:
"""
MapSampler objects store maps in the node shared memory and allow
bilinear interpolation of the maps into TOD.
"""
@function_timer
def __init__(
self,
map_path,
pol=False,
pol_fwhm=None,
no_temperature=False,
dtype=None,
verbose=False,
nside=None,
comm=None,
cache=None,
preloaded_map=None,
buflen=1000000,
nest=False,
):
"""
Instantiate the map sampler object, load a healpix
map in a file located at map_path
if pol==True, reads I,Q,U maps from extensions 0, 1, 2
"""
if not pol and no_temperature:
raise RuntimeError("You cannot have pol=False, " "no_temperature=True")
self.path = map_path
self.pol = pol
self.pol_fwhm = pol_fwhm
self._map = None
self._map_Q = None
self._map_U = None
self.nest = nest
if nest:
self.order = "NESTED"
else:
self.order = "RING"
self.buflen = buflen
# Output data type, internal is always DTYPE
if dtype is not None:
warnings.warn("MapSampler no longer supports dtype", DeprecationWarning)
# Use healpy to load the map into memory.
if comm is None:
self.comm = None
self.rank = 0
self.ntask = 1
else:
self.comm = comm
self.rank = comm.Get_rank()
self.ntask = comm.Get_size()
self.shmem = self.ntask > 1
self.pol = pol
if self.rank == 0:
if self.pol:
if preloaded_map is not None:
if no_temperature:
(self._map_Q, self._map_U) = np.array(
preloaded_map, dtype=DTYPE
)
else:
(self._map, self._map_Q, self._map_U) = np.array(
preloaded_map, dtype=DTYPE
)
else:
if no_temperature:
self._map_Q, self._map_U = hp.read_map(
self.path,
field=[1, 2],
dtype=DTYPE,
verbose=verbose,
memmmap=True,
nest=self.nest,
)
else:
try:
self._map, self._map_Q, self._map_U = hp.read_map(
self.path,
field=[0, 1, 2],
dtype=DTYPE,
verbose=verbose,
memmap=True,
nest=self.nest,
)
except IndexError:
print(
"WARNING: {} is not polarized".format(self.path),
flush=True,
)
self.pol = False
self._map = hp.read_map(
self.path,
dtype=DTYPE,
verbose=verbose,
memmap=True,
nest=self.nest,
)
if nside is not None:
if not no_temperature:
self._map = hp.ud_grade(
self._map,
nside,
dtype=DTYPE,
order_in=self.order,
order_out=self.order,
)
if self.pol:
self._map_Q = hp.ud_grade(
self._map_Q,
nside,
dtype=DTYPE,
order_in=self.order,
order_out=self.order,
)
self._map_U = hp.ud_grade(
self._map_U,
nside,
dtype=DTYPE,
order_in=self.order,
order_out=self.order,
)
if self.pol_fwhm is not None:
if not no_temperature:
plug_holes(self._map, verbose=verbose, nest=self.nest)
if self.pol:
plug_holes(self._map_Q, verbose=verbose, nest=self.nest)
plug_holes(self._map_U, verbose=verbose, nest=self.nest)
else:
if preloaded_map is not None:
self._map = np.array(preloaded_map, dtype=DTYPE)
else:
self._map = hp.read_map(
map_path,
field=[0],
dtype=DTYPE,
verbose=verbose,
memmap=True,
nest=self.nest,
)
if nside is not None:
self._map = hp.ud_grade(
self._map,
nside,
dtype=DTYPE,
order_in=self.order,
order_out=self.order,
)
plug_holes(self._map, verbose=verbose, nest=self.nest)
if self.ntask > 1:
self.pol = comm.bcast(self.pol, root=0)
npix = 0
if self.rank == 0:
if self.pol:
npix = len(self._map_Q)
else:
npix = len(self._map)
npix = comm.bcast(npix, root=0)
if self.shmem:
shared = MPIShared((npix,), np.dtype(DTYPE), comm)
if not no_temperature:
shared.set(self._map, (0,), fromrank=0)
self._map = shared
if self.pol:
shared_Q = MPIShared((npix,), np.dtype(DTYPE), comm)
shared_Q.set(self._map_Q, (0,), fromrank=0)
self._map_Q = shared_Q
shared_U = MPIShared((npix,), np.dtype(DTYPE), comm)
shared_U.set(self._map_U, (0,), fromrank=0)
self._map_U = shared_U
else:
if self.rank != 0:
if not no_temperature:
self._map = np.zeros(npix, dtype=DTYPE)
if self.pol:
self._map_Q = np.zeros(npix, dtype=DTYPE)
self._map_U = np.zeros(npix, dtype=DTYPE)
if not no_temperature:
comm.Bcast(self._map, root=0)
if self.pol:
comm.Bcast(self._map_Q, root=0)
comm.Bcast(self._map_U, root=0)
if self.pol:
self.npix = len(self._map_Q[:])
else:
self.npix = len(self._map[:])
self.nside = hp.npix2nside(self.npix)
self.cache = cache
self.instance = 0
if self.cache is not None and not self.shmem:
# Increase the instance counter until we find an unused
# instance. If the user did not want to store duplicates,
# they would not have created two identical mapsampler
# objects.
while self.cache.exists(self._cachename("I")):
self.instance += 1
if not no_temperature:
self._map = self.cache.put(self._cachename("I"), self._map)
if self.pol:
self._map_Q = self.cache.put(self._cachename("Q"), self._map_Q)
self._map_U = self.cache.put(self._cachename("U"), self._map_U)
if self.pol_fwhm is not None:
self.smooth(self.pol_fwhm, pol_only=True)
return
@function_timer
def smooth(self, fwhm, lmax=None, pol_only=False):
"""Smooth the map with a Gaussian kernel.
"""
if self.rank == 0:
if pol_only:
print(
"Smoothing the polarization to {} arcmin".format(fwhm), flush=True
)
else:
print("Smoothing the map to {} arcmin".format(fwhm), flush=True)
if lmax is None:
lmax = min(np.int(fwhm / 60 * 512), 2 * self.nside)
# If the map is in node-shared memory, only the root process on each
# node does the smoothing.
if not self.shmem or self._map.nodecomm.rank == 0:
if self.pol:
m = np.vstack([self._map[:], self._map_Q[:], self._map_U[:]])
else:
m = self._map[:]
if self.nest:
m = hp.reorder(m, n2r=True)
smap = hp.smoothing(m, fwhm=fwhm * arcmin, lmax=lmax, verbose=False)
del m
if self.nest:
smap = hp.reorder(smap, r2n=True)
else:
# Convenience dummy variable
smap = np.zeros([3, 12])
if not pol_only:
if self.shmem:
self._map.set(smap[0].astype(DTYPE), (0,), fromrank=0)
else:
self._map[:] = smap[0]
if self.pol:
if self.shmem:
self._map_Q.set(smap[1].astype(DTYPE), (0,), fromrank=0)
self._map_U.set(smap[2].astype(DTYPE), (0,), fromrank=0)
else:
self._map_Q[:] = smap[1]
self._map_U[:] = smap[2]
self.pol_fwhm = fwhm
return
def _cachename(self, stokes):
"""
Construct a cache name string for the selected Stokes map
"""
return "{}_ns{:04}_{}_{:04}".format(
self.path, self.nside, stokes, self.instance
)
@function_timer
def __del__(self):
"""
Explicitly free memory taken up in the cache.
"""
if self.cache is not None:
if self._map is not None:
del self._map
if not self.shmem:
self.cache.destroy(self._cachename("I"))
if self.pol:
del self._map_Q
del self._map_U
if not self.shmem:
self.cache.destroy(self._cachename("Q"))
self.cache.destroy(self._cachename("U"))
return
@function_timer
def __iadd__(self, other):
"""Accumulate provided Mapsampler object with this one.
"""
if self.shmem:
# One process does the manipulation on each node
self._map._nodecomm.Barrier()
if self._map._noderank == 0:
self._map._data[:] += other._map[:]
if self.pol and other.pol:
if self._map_Q._noderank == (1 % self._map_Q._nodeprocs):
self._map_Q._data[:] += other._map_Q[:]
if self._map_U._noderank == (2 % self._map_U._nodeprocs):
self._map_U._data[:] += other._map_U[:]
self._map._nodecomm.Barrier()
else:
self._map += other._map
if self.pol and other.pol:
self._map_Q += other._map_Q
self._map_U += other._map_U
return self
@function_timer
def __isub__(self, other):
"""Subtract provided Mapsampler object from this one.
"""
if self.shmem:
# One process does the manipulation on each node
self._map._nodecomm.Barrier()
if self._map._noderank == 0:
self._map._data[:] -= other._map[:]
if self.pol and other.pol:
if self._map_Q._noderank == (1 % self._map_Q._nodeprocs):
self._map_Q._data[:] -= other._map_Q[:]
if self._map_U._noderank == (2 % self._map_U._nodeprocs):
self._map_U._data[:] -= other._map_U[:]
self._map._nodecomm.Barrier()
else:
self._map -= other._map
if self.pol and other.pol:
self._map_Q -= other._map_Q
self._map_U -= other._map_U
return self
@function_timer
def __imul__(self, other):
"""Scale the maps in this MapSampler object
"""
if self.shmem:
# One process does the manipulation on each node
self._map._nodecomm.Barrier()
if self._map._noderank == 0:
self._map._data[:] *= other
if self.pol:
if self._map_Q._noderank == (1 % self._map_Q._nodeprocs):
self._map_Q._data[:] *= other
if self._map_U._noderank == (2 % self._map_U._nodeprocs):
self._map_U._data[:] *= other
self._map._nodecomm.Barrier()
else:
self._map *= other
if self.pol:
self._map_Q *= other
self._map_U *= other
return self
@function_timer
def __itruediv__(self, other):
""" Divide the maps in this MapSampler object
"""
if self.shmem:
self._map._nodecomm.Barrier()
if self._map._noderank == 0:
self._map._data[:] /= other
if self.pol:
if self._map_Q._noderank == (1 % self._map_Q._nodeprocs):
self._map_Q._data[:] /= other
if self._map_U._noderank == (2 % self._map_U._nodeprocs):
self._map_U._data[:] /= other
self._map._nodecomm.Barrier()
else:
self._map /= other
if self.pol:
self._map_Q /= other
self._map_U /= other
return self
@function_timer
def at(self, theta, phi, interp_pix=None, interp_weights=None):
"""
Use healpy bilinear interpolation to interpolate the
map. User must make sure that coordinate system used
for theta and phi matches the map coordinate system.
"""
if self._map is None:
raise RuntimeError("No temperature map to sample")
n = len(theta)
stepsize = self.buflen
signal = np.zeros(n, dtype=np.float32)
# DEBUG begin
if np.any(theta < 0) or
|
np.any(theta > np.pi)
|
numpy.any
|
import REBA.body_part_reba_calculator.Degree_to_REBA.neck_reba_score as REBA_neck
import REBA.body_part_reba_calculator.Degree_to_REBA.trunk_reba_score as REBA_trunk
import REBA.body_part_reba_calculator.Degree_to_REBA.leg_reba_score as REBA_leg
import REBA.body_part_reba_calculator.Degree_to_REBA.upperarm_reba_score as REBA_UA
import REBA.body_part_reba_calculator.Degree_to_REBA.lowerarm_reba_score as REBA_LA
import REBA.body_part_reba_calculator.Degree_to_REBA.wrist_reba_score as REBA_wrist
import REBA.body_part_reba_calculator.partial_REBA_to_total_REBA as REBA
import numpy as np
import _pickle as cPickle
import multiprocessing as mp
from itertools import product, chain
from tensorflow.keras.layers import Dense, Dropout, Concatenate, concatenate
from tensorflow.keras.models import Sequential, load_model
from tensorflow.keras.optimizers import Adam, RMSprop, Nadam, Adamax, Adadelta, Adagrad, SGD
from tensorflow.keras.models import Model
from tensorflow.keras.regularizers import l1, l2
from tensorflow.keras import initializers
from tensorflow.keras.layers import Activation
from tensorflow.keras import backend as K
import tensorflow as tf
from tqdm import tqdm
import math
import gzip
import shutil
import os
import time
import pandas as pd
from functools import partial
from tqdm import tqdm
# libraries for blackbox optimization
from human_forward_kinematic import *
import localsolver
def retrieve_from_pickle(file_address):
f = open(file_address, "rb")
p = cPickle.Unpickler(f)
seqs_list = p.load()
return seqs_list
def store_in_pickle(file_address, data):
p = cPickle.Pickler(open(file_address, "wb"))
p.fast = True
p.dump(data)
def find_largest_power_of_ten(x):
return int(math.log10(x))
# Neck
def neck_ranges():
neck_flexion_extension_samples = list(range(-60, 31))
neck_side_flexion_samples = list(range(-54, 55))
neck_rotation_samples = list(range(-60, 61))
return neck_flexion_extension_samples, neck_side_flexion_samples, neck_rotation_samples
def neck_learning_model():
activation = 'tanh'
model = Sequential()
model.add(Dense(3, input_dim=3, activation=activation, name = "neck_model"))
model.add(Dense(3, activation=activation))
model.add(Dense(3, activation=activation))
model.add(Dense(3, activation=activation))
model.add(Dense(3, activation=activation))
model.add(Dense(3, activation=activation))
model.add(Dense(3, activation=activation))
model.add(Dense(1))
return model
def neck_training_model():
model = neck_learning_model()
model.compile(optimizer=SGD(lr=0.01), loss='mse')
neck_flexion_extension_samples, neck_side_flexion_samples, neck_rotation_samples = neck_ranges()
for e in tqdm(range(60)):
for i in neck_flexion_extension_samples:
num_of_data = len(neck_side_flexion_samples) * len(neck_rotation_samples)
X_train = np.zeros(shape=(num_of_data, 3))
y_train = np.zeros(shape=(num_of_data,))
counter = 0
for j in neck_side_flexion_samples:
for k in neck_rotation_samples:
m_neck =REBA_neck.NeckREBA([i,j,k])
X_train[counter, :] = [i,j,k]
y_train[counter] = m_neck.neck_reba_score()
counter +=1
model.fit(X_train, y_train, verbose=0)
model.save('./data/neck_DNN.model')
def neck_model_test():
neck_flexion_extension_samples, neck_side_flexion_samples, neck_rotation_samples = neck_ranges()
model = load_model('./data/neck_DNN.model')
abs_sum = 0
for i in tqdm(neck_flexion_extension_samples):
num_of_data = len(neck_side_flexion_samples) * len(neck_rotation_samples)
X_train = np.zeros(shape=(num_of_data, 3))
y_train = np.zeros(shape=(num_of_data,))
counter = 0
for j in neck_side_flexion_samples:
for k in neck_rotation_samples:
m_neck =REBA_neck.NeckREBA([i,j,k])
m_neck_reba_score = m_neck.neck_reba_score()
X_train[counter, :] = [i,j,k]
y_train[counter] = m_neck_reba_score
counter += 1
pred = model.predict(X_train)
for y_true, y_pred in zip(y_train, pred):
abs_sum += math.fabs(y_true - y_pred)
return (abs_sum, len(neck_flexion_extension_samples) * len(neck_side_flexion_samples) * len(neck_rotation_samples))
# trunk
def trunk_ranges():
trunk_flexion_extension_samples = range(-30, 71)
trunk_side_flexion_samples = range(-40, 41)
trunk_rotation_samples = range(-35, 36)
return trunk_flexion_extension_samples, trunk_side_flexion_samples, trunk_rotation_samples
def trunk_learning_model():
activation = 'softplus'
model = Sequential()
model.add(Dense(3, input_dim=3, activation=activation, name = "trunk_model"))
model.add(Dense(3, activation=activation))
model.add(Dense(3, activation=activation))
model.add(Dense(3, activation=activation))
model.add(Dense(3, activation=activation))
model.add(Dense(3, activation=activation))
model.add(Dense(3, activation=activation))
model.add(Dense(3, activation=activation))
model.add(Dense(1))
return model
def trunk_training_model():
model = trunk_learning_model()
model.compile(optimizer=Adam(lr=0.00001), loss='mse')
trunk_flexion_extension_samples, trunk_side_flexion_samples, trunk_rotation_samples = trunk_ranges()
for e in tqdm(range(400)):
for i in trunk_flexion_extension_samples:
num_of_data = len(trunk_side_flexion_samples) * len(trunk_rotation_samples)
X_train = np.zeros(shape=(num_of_data, 3))
y_train = np.zeros(shape=(num_of_data,))
counter = 0
for j in trunk_side_flexion_samples:
for k in trunk_rotation_samples:
m_trunk = REBA_trunk.TrunkREBA([i,j,k])
X_train[counter, :] = [i,j,k]
y_train[counter] = m_trunk.trunk_reba_score()
counter += 1
model.fit(X_train, y_train, verbose=0)
model.save('./data/trunk_DNN.model')
def trunk_model_test():
trunk_flexion_extension_samples, trunk_side_flexion_samples, trunk_rotation_samples = trunk_ranges()
model = load_model('./data/trunk_DNN.model')
abs_sum = 0
for i in tqdm(trunk_flexion_extension_samples):
num_of_data = len(trunk_side_flexion_samples) * len(trunk_rotation_samples)
X_train = np.zeros(shape=(num_of_data, 3))
y_train = np.zeros(shape=(num_of_data,))
counter = 0
for j in trunk_side_flexion_samples:
for k in trunk_rotation_samples:
m_trunk = REBA_trunk.TrunkREBA([i,j,k])
X_train[counter, :] = [i,j,k]
y_train[counter] = m_trunk.trunk_reba_score()
counter += 1
pred = model.predict(X_train)
for y_true, y_pred in zip(y_train, pred):
abs_sum += math.fabs(y_true - y_pred)
return (abs_sum, len(trunk_flexion_extension_samples) * len(trunk_side_flexion_samples) * len(trunk_rotation_samples))
# Legs
def leg_ranges():
legs_flexion_samples = range(0, 151)
return legs_flexion_samples
def leg_learning_model():
activation = 'tanh'
model = Sequential()
model.add(Dense(1, input_dim=1, activation=activation, name = "leg_model"))
model.add(Dense(1, activation=activation))
model.add(Dense(1, activation=activation))
model.add(Dense(1, activation=activation))
model.add(Dense(1, activation=activation))
model.add(Dense(1, activation=activation))
model.add(Dense(1, activation=activation))
model.add(Dense(1))
return model
def leg_training_model():
model = leg_learning_model()
model.compile(optimizer=SGD(lr=0.01), loss='mse')
legs_flexion_samples = leg_ranges()
for e in tqdm(range(100)):
num_of_data = len(legs_flexion_samples)
X_train = np.zeros(shape=(num_of_data, 1))
y_train = np.zeros(shape=(num_of_data,))
counter = 0
for i in legs_flexion_samples:
m_leg = REBA_leg.LegREBA([i,i])
X_train[counter, :] = [i]
y_train[counter] = m_leg.leg_reba_score()
counter += 1
model.fit(X_train, y_train, verbose=0)
model.save('./data/leg_DNN.model')
def leg_model_test():
legs_flexion_samples = leg_ranges()
model = load_model('./data/leg_DNN.model')
num_of_data = len(legs_flexion_samples)
X_train = np.zeros(shape=(num_of_data, 1))
y_train = np.zeros(shape=(num_of_data,))
counter = 0
abs_sum = 0
for i in legs_flexion_samples:
m_leg = REBA_leg.LegREBA([i,i])
X_train[counter, :] = [i]
y_train[counter] = m_leg.leg_reba_score()
counter += 1
pred = model.predict(X_train)
for y_true, y_pred in zip(y_train, pred):
abs_sum += math.fabs(y_true - y_pred)
return (abs_sum, len(legs_flexion_samples))
# Upper Arm
def upper_arm_ranges():
right_upper_arm_flexion_extension_samples = [-47, 165, 170] + [*range(-45, 171, 9)]
left_upper_arm_flexion_extension_samples = [-47, 165, 170] + [*range(-45, 171, 9)]
right_upper_arm_adduction_abduction_samples = [-2, -1] + [*range(0, 201, 10)]
left_upper_arm_adduction_abduction_samples = [-2, -1] + [*range(0, 201, 10)]
right_shoulder_raise_samples = [*range(0, 31, 6)]
left_shoulder_raise_samples = [*range(0, 31, 6)]
return right_upper_arm_flexion_extension_samples, left_upper_arm_flexion_extension_samples, \
right_upper_arm_adduction_abduction_samples, left_upper_arm_adduction_abduction_samples, \
right_shoulder_raise_samples, left_shoulder_raise_samples
def upper_arm_learning_model():
activation = 'tanh'
model = Sequential()
model.add(Dense(6, input_dim=6, activation=activation, name = "upper_arm_model"))
model.add(Dense(6, activation=activation))
model.add(Dense(6, activation=activation))
model.add(Dense(6, activation=activation))
model.add(Dense(5, activation=activation))
model.add(Dense(5, activation=activation))
model.add(Dense(4, activation=activation))
model.add(Dense(4, activation=activation))
model.add(Dense(3, activation=activation))
model.add(Dense(3, activation=activation))
model.add(Dense(2, activation=activation))
model.add(Dense(2, activation=activation))
model.add(Dense(1))
return model
def upper_arm_training_model():
model = upper_arm_learning_model()
model.compile(optimizer=SGD(lr=0.001), loss='mse')
right_upper_arm_flexion_extension_samples , left_upper_arm_flexion_extension_samples, \
right_upper_arm_adduction_abduction_samples, left_upper_arm_adduction_abduction_samples, \
right_shoulder_raise_samples, left_shoulder_raise_samples = upper_arm_ranges()
for e in tqdm(range(40)):
for i in right_upper_arm_flexion_extension_samples:
for j in left_upper_arm_flexion_extension_samples:
num_of_data = len(right_upper_arm_adduction_abduction_samples) * len(left_upper_arm_adduction_abduction_samples) *\
len(right_shoulder_raise_samples) * len(left_shoulder_raise_samples)
X_train = np.zeros(shape=(num_of_data, 6))
y_train = np.zeros(shape=(num_of_data,))
counter = 0
for k in right_upper_arm_adduction_abduction_samples:
for l in left_upper_arm_adduction_abduction_samples:
for m in right_shoulder_raise_samples:
for n in left_shoulder_raise_samples:
m_UA = REBA_UA.UAREBA([i, j,k,l,m,n])
X_train[counter, :] = [i, j,k,l,m,n]
y_train[counter] = m_UA.upper_arm_reba_score()
counter += 1
model.fit(X_train, y_train, verbose=0)
model.save('./data/upper_arm_DNN.model')
def upper_arm_model_test():
right_upper_arm_flexion_extension_samples , left_upper_arm_flexion_extension_samples, \
right_upper_arm_adduction_abduction_samples, left_upper_arm_adduction_abduction_samples, \
right_shoulder_raise_samples, left_shoulder_raise_samples = upper_arm_ranges()
model = load_model('./data/upper_arm_DNN.model')
abs_sum = 0
for i in right_upper_arm_flexion_extension_samples:
for j in left_upper_arm_flexion_extension_samples:
num_of_data = len(right_upper_arm_adduction_abduction_samples) * len(left_upper_arm_adduction_abduction_samples) *\
len(right_shoulder_raise_samples) * len(left_shoulder_raise_samples)
X_train = np.zeros(shape=(num_of_data, 6))
y_train = np.zeros(shape=(num_of_data,))
counter = 0
for k in right_upper_arm_adduction_abduction_samples:
for l in left_upper_arm_adduction_abduction_samples:
for m in right_shoulder_raise_samples:
for n in left_shoulder_raise_samples:
m_UA = REBA_UA.UAREBA([i, j,k,l,m,n])
X_train[counter, :] = [i, j,k,l,m,n]
y_train[counter] = m_UA.upper_arm_reba_score()
counter += 1
pred = model.predict(X_train)
for y_true, y_pred in zip(y_train, pred):
abs_sum += math.fabs(y_true - y_pred)
return (abs_sum, len(right_upper_arm_flexion_extension_samples) * len(left_upper_arm_flexion_extension_samples) * \
len(right_upper_arm_adduction_abduction_samples) * len(left_upper_arm_adduction_abduction_samples) *\
len(right_shoulder_raise_samples) * len(left_shoulder_raise_samples))
# Lower Arm
def lower_arm_ranges():
right_lower_arm_flexion_samples = range(0, 151)
left_lower_arm_flexion_samples = range(0,151)
return right_lower_arm_flexion_samples, left_lower_arm_flexion_samples
def lower_arm_learning_model():
activation = 'tanh'
model = Sequential()
model.add(Dense(2, input_dim=2, activation=activation, name = "lower_arm_model"))
model.add(Dense(2, activation=activation))
model.add(Dense(2, activation=activation))
model.add(Dense(2, activation=activation))
model.add(Dense(2, activation=activation))
model.add(Dense(2, activation=activation))
model.add(Dense(2, activation=activation))
model.add(Dense(2, activation=activation))
model.add(Dense(2, activation=activation))
model.add(Dense(1))
return model
def lower_arm_training_model():
model = lower_arm_learning_model()
model.compile(optimizer=Nadam(lr=0.001), loss='mse')
right_lower_arm_flexion_samples, left_lower_arm_flexion_samples = lower_arm_ranges()
for e in tqdm(range(100)):
num_of_data = len(right_lower_arm_flexion_samples) * len(left_lower_arm_flexion_samples)
X_train = np.zeros(shape=(num_of_data, 2))
y_train =
|
np.zeros(shape=(num_of_data,))
|
numpy.zeros
|
from random import shuffle
import datetime
import time
import queue
import threading
import logging
from PIL import Image
import itertools
import yaml
import re
import os
import glob
import shutil
import sys
import copy
import numpy as np
import tensorflow as tf
import tensorflow.contrib.layers as c_layers
GPU = '0'
logger = logging.getLogger("traffic")
class NetA(object):
def __init__(self, height, width, visual_input_channels, visual_output_channels, vector_input_channels,
learning_rate, use_mask):
self.batch_size = tf.placeholder(shape=None, dtype=tf.int32, name='batch_size')
self.step = tf.placeholder(shape=None, dtype=tf.float32, name='step')
self.learning_rate = learning_rate
self.is_train_mode = tf.placeholder(shape=None, dtype=tf.bool, name='is_train_mode')
self.global_step, self.increment_step = self.create_global_steps()
self.vector_in = tf.placeholder(shape=[None, vector_input_channels], dtype=tf.float32, name='vector_in')
self.visual_in = tf.placeholder(shape=[None, height, width, visual_input_channels], dtype=tf.float32,
name='visual_in')
self.true_label = tf.placeholder(shape=[None, height, width, visual_output_channels], dtype=tf.float32,
name='true_label')
self.true_label_mask = tf.placeholder(shape=[None, height, width, visual_output_channels], dtype=tf.float32,
name='true_label_mask')
self.use_mask = use_mask
@staticmethod
def create_global_steps():
global_step = tf.Variable(0, name="global_step", trainable=False, dtype=tf.int32)
increment_step = tf.assign(global_step, tf.add(global_step, 1))
return global_step, increment_step
@staticmethod
def create_vector_observation_encoder(observation_input, h_size, activation, num_layers, scope,
reuse, is_train_mode):
with tf.variable_scope(scope):
hidden = observation_input
for i in range(num_layers):
hidden = tf.layers.dense(hidden, h_size, activation=activation, reuse=reuse,
name="hidden_{}".format(i),
kernel_initializer=c_layers.variance_scaling_initializer(1.0))
return hidden
# https://github.com/taki0112/Group_Normalization-Tensorflow
@staticmethod
def group_norm(x, G=8, eps=1e-6, scope='group_norm'):
with tf.variable_scope(scope):
original_shape = x.get_shape().as_list()
original_shape = [-1 if s is None else s for s in original_shape]
if len([s for s in original_shape if s == -1]) > 1:
raise ValueError('Only one axis dimension can be undefined in the input tensor')
N, H, W, C = original_shape
G = min(G, C)
x = tf.reshape(x, [N, H, W, G, C // G])
mean, var = tf.nn.moments(x, [1, 2, 4], keep_dims=True)
x = (x - mean) / tf.sqrt(var + eps)
gamma = tf.get_variable('gamma', [1, 1, 1, C], initializer=tf.constant_initializer(1.0))
beta = tf.get_variable('beta', [1, 1, 1, C], initializer=tf.constant_initializer(0.0))
x = tf.reshape(x, [N, H, W, C]) * gamma + beta
return x
def data_postprocess(self, data, num_frame_sequence, num_frame_before, num_channel):
(input_data, true_label, input_time) = data
batch_size = input_data.shape[0]
height = input_data.shape[2]
width = input_data.shape[3]
input_data = input_data[:, :, :, :, :num_channel]
true_label = true_label[:, :, :, :, :num_channel]
data_mask = (np.max(input_data, axis=4) == 0)
data_mask2 = (np.max(true_label, axis=4) == 0)
input_data = np.moveaxis(input_data, 1, -1).reshape((batch_size, height, width, -1))
true_label = np.moveaxis(true_label, 1, -1).reshape((batch_size, height, width, -1))
orig_label_mask = np.ones((batch_size, num_frame_sequence - num_frame_before, height, width, num_channel),
np.float32)
orig_label_mask[data_mask2, :] = 0.0
orig_label_mask = np.moveaxis(orig_label_mask, 1, -1).reshape((batch_size, height, width, -1))
input_data_mask =
|
np.zeros((batch_size, num_frame_before, height, width, num_channel), np.bool)
|
numpy.zeros
|
# ---------------------------------------------------------------------------- #
# sparam_waveguide.py
# ---------------------------------------------------------------------------- #
# Computes the S-Parameters of a simple Si waveguide and compares them to the
# corresponding analytical solution.
#
# TODO:
#
# ---------------------------------------------------------------------------- #
# VERSION
# ---------------------------------------------------------------------------- #
# 31 May 2018 - AMH - Initialization
#
# ---------------------------------------------------------------------------- #
# Import Libraries
# ---------------------------------------------------------------------------- #
import meep as mp
from meep import mpb
import numpy as np
from scipy import interpolate
import matplotlib.pyplot as plt
import scipy.io as sio
# ---------------------------------------------------------------------------- #
# Important constants
# ---------------------------------------------------------------------------- #
# Debug parameters
debugSim = False
drawSimpleWaveguide = False
# Physical constants
eps0 = 8.854e-12; # Permittivity of free space
mu0 = 4 * np.pi * 1e-7; # Permeability of free space
c0 = 1/np.sqrt(mu0*eps0); # Speed of light in free space (m/s)
# Material definitions
epsSi = 12
nSi = np.sqrt(epsSi)
Si = mp.Medium(epsilon=epsSi)
# Geometry definitions
waveguideWidth = 0.5 # width of waveguide (y direction)
waveguideLength = mp.inf # length of each waveguide (x direction)
portLength = 10 # gap between ports
# ---------------------------------------------------------------------------- #
# Simulate in Meep
# ---------------------------------------------------------------------------- #
# Create geometry
waveguide = mp.Block(
material = Si,
center=mp.Vector3(0,0,0),
size=mp.Vector3(waveguideLength,waveguideWidth,mp.inf)
)
geometry = [waveguide]
# computational grid resolution
resolution = 40
# Formulate boundary layers
dpml = 2 # size of boundary layer
boundary_layers = [ mp.PML(dpml) ]
# Create computational cell
sx = 2*dpml + 2*portLength # computational grid width
sy = 2*dpml + 2*waveguideWidth # computational grid height
sz = 0 # computational grid thickness
cell_size = mp.Vector3(sx, sy, sz) # computational grid
# Generate source
fLower = 1/2 # lower frequency
fUpper = 1/1 # upper frequency
fcen = np.mean([fLower,fUpper]) # center frequency
fwidth = (fUpper - fLower) # frequency width
xsrc = -portLength/2
sources = [mp.EigenModeSource(mp.GaussianSource(frequency=fcen,width=fwidth),
direction = mp.X,
center=mp.Vector3(xsrc,0))]
# Simulation block
sim = mp.Simulation(resolution=resolution,
cell_size=cell_size,
boundary_layers=boundary_layers,
geometry=geometry,
sources=sources)
# Number of frequency bins to compute
nfreq = 50
# Height of each monitor
monitorHeight = sy
# Port 1 Monitor
xm1 = -portLength/2 + 1 # x-coordinate of monitor
mflux1 = sim.add_eigenmode(fcen, fwidth, nfreq,
mp.FluxRegion(center=mp.Vector3(xm1,0,0),
size=mp.Vector3(0,monitorHeight,mp.inf),
direction=mp.X))
# Port 2 Monitor
xm2 = portLength/2 # x-coordinate of monitor
mflux2 = sim.add_eigenmode(fcen, fwidth, nfreq,
mp.FluxRegion(center=mp.Vector3(xm2,0,0),
size=mp.Vector3(0,monitorHeight,mp.inf),
direction=mp.X))
# Run the simulation
if debugSim:
sim.run(until=5)
else:
sim.run(until_after_sources=mp.stop_when_fields_decayed(50, mp.Ez, mp.Vector3(0,0), 1e-8))
# Extract the S Parameters
bands = [1] # indices of modes for which to compute expansion coefficients
f1 = sim.get_eigenmode_coefficients(mflux1, bands) # Fluxes on port 1
f2 = sim.get_eigenmode_coefficients(mflux2, bands) # Fluxes on port 2
a1 = f1[0,:,0] # Forward propogating wave on port 1
b1 = f1[0,:,1] # Backward propogating wave on port 1
a2 = f2[0,:,0] # Forward propogating wave on port 2
b2 = f2[0,:,1] # Backward propogating wave on port 2
S11 = b1 / a1
S12 = a2 / a1
# Pull corresponding frequency data for plots
freqSim = np.array(mp.get_flux_freqs(mflux1))
freqSimAdjust = freqSim * 1e6 * c0 * 1e-12
# Pull effective indices and k vector for each omega point
geometry_lattice = mp.Lattice(size=mp.Vector3(0, sy, 0)) # computational grid
ms = mpb.ModeSolver(
geometry_lattice=geometry_lattice,
geometry=geometry,
resolution=resolution
)
findOmega = freqSim # omegas for which we wish to find corresponding k's
k_meep = np.zeros(nfreq) # preallocate k list
for iter in range(0,nfreq):
values = ms.find_k(mp.NO_PARITY, # polarization of interest
findOmega[iter], # omega corresponding to k
1, # number of bands (min)
1, # number of bands (max)
mp.Vector3(1), # direction in K space
1e-3, # convergence tolerance
findOmega[iter] * nSi, # Guess for K
findOmega[iter] * 0.1, # Min magnitude for K
findOmega[iter] * (nSi + 2) # Max magnitude for K
)
# Extract found k vector from list and store in vector
k_meep[iter] = values[0]
# From found k vectors, calculate effective index and store
neff = k_meep / freqSim
# Save raw S Param to matlab file for debugging
sio.savemat('sparam_waveguide_data.mat', mdict={'freqs':freqSim,'a1': a1,'a2': a2,'b1': b1,'b2': b2})
# ---------------------------------------------------------------------------- #
# Generate analytical waveguide S parameters (magnitude and phase)
# ---------------------------------------------------------------------------- #
# Interpolate k vectors
numFreqPts = 1e3
freqAn = np.linspace(fLower,fUpper,numFreqPts)
f = interpolate.PchipInterpolator(freqSim, k_meep)
kInterp = f(freqAn)
# Find distance between the ports
L = (xm2 - xm1)
# Generate the S12/S11 parameters
S12An = np.exp(1j*2*np.pi*kInterp*L)
S11An = np.zeros(int(numFreqPts))
# Convert frequency to THz
freqAn = c0*freqAn *1e6*1e-12
# ---------------------------------------------------------------------------- #
# Plot and compare
# ---------------------------------------------------------------------------- #
plt.figure()
plt.subplot(311)
eps_data = sim.get_array(center=mp.Vector3(), size=cell_size, component=mp.Dielectric)
plt.imshow(np.rot90(eps_data), interpolation='spline36', cmap='binary',
extent=[-sx/2,sx/2,-sy/2,sy/2])
plt.plot([xm1,xm1],[monitorHeight/2,-monitorHeight/2],color='r')
plt.plot([xm2,xm2],[monitorHeight/2,-monitorHeight/2],color='b')
plt.scatter([xsrc],[0],color='g')
plt.legend(('M1','M2','SRC'))
plt.xlabel('X ($\mu m$)')
plt.ylabel('Y ($\mu m$)')
plt.subplot(312)
plt.plot(freqAn,np.abs(S12An))
plt.plot(freqSimAdjust,np.abs(S12)**2,'.')
plt.plot(freqAn,np.abs(S11An))
plt.plot(freqSimAdjust,
|
np.abs(S11)
|
numpy.abs
|
#! -*- coding:utf-8 -*-
"""
sudo apt-get install opencv-python
sudo pip3 install opencv-python
ucid: http://jasoncantarella.com/downloads/
wget http://jasoncantarella.com/downloads/ucid.v2.tar.gz
copydays: http://lear.inrialpes.fr/people/jegou/data.php#copydays
wget http://pascal.inrialpes.fr/data/holidays/copydays_original.tar.gz
"""
import cv2 as cv
import numpy as np
from math import cos, sqrt
from math import pi as PI
IMAGESIZE = 512
BLOCKSIZE = 64
GRAYLEVEL = 256
M_PI = 3.1415926
class MyImage:
def __init__(self, filename, q):
img = cv.imread(filename)
img = cv.resize(img, (IMAGESIZE, IMAGESIZE))
self.im = np.zeros(shape=(IMAGESIZE, IMAGESIZE), dtype=int)
for i in range(IMAGESIZE):
for j in range(IMAGESIZE):
r, g, b = map(int, img[i][j])
self.im[i][j] = (r + g + b) / (3 * q)
def get_intensity_matrix(self):
return self.im
class MyGLCM:
def __init__(self, im, theta):
self.ttl_val = 0
self.row_mean = 0
self.col_mean = 0
self.row_stdev = 0
self.col_stdev = 0
self.matrix = np.zeros(shape=(GRAYLEVEL, GRAYLEVEL))
self.set_glcm_params(im, theta)
def set_glcm_params(self, im, theta):
t_matrix = np.zeros(shape=(GRAYLEVEL, GRAYLEVEL))
if theta == 0:
for row in range(IMAGESIZE):
for col in range(IMAGESIZE - 1):
x = im[row][col]
y = im[row][col + 1]
self.matrix[x][y] += 1
t_matrix[y][x] += 1
elif theta == 45:
for row in range(1, IMAGESIZE):
for col in range(IMAGESIZE - 1):
x = im[row][col]
y = im[row - 1][col + 1]
self.matrix[x][y] += 1
t_matrix[y][x] += 1
elif theta == 90:
for row in range(1, IMAGESIZE):
for col in range(IMAGESIZE):
x = im[row][col]
y = im[row - 1][col]
self.matrix[x][y] += 1
t_matrix[y][x] += 1
elif theta == 135:
for row in range(1, IMAGESIZE):
for col in range(1, IMAGESIZE - 1):
x = im[row][col]
y = im[row - 1][col - 1]
self.matrix[x][y] += 1
t_matrix[y][x] += 1
# set glcm params
# '''
self.ttl_val = 0
tmp_sum = np.zeros(shape=GRAYLEVEL)
self.row_mean = 0
for x in range(GRAYLEVEL):
s = np.sum(self.matrix[x])
tmp_sum[x] = s
self.ttl_val += s
self.row_mean += (x + 1) * s
self.row_mean /= self.ttl_val
row_var = 0
for x in range(GRAYLEVEL):
row_var += pow(x + 1 - self.row_mean, 2) * tmp_sum[x]
row_var /= self.ttl_val
self.row_stdev = sqrt(row_var)
self.col_mean = 0
for y in range(GRAYLEVEL):
s = np.sum(t_matrix[y])
tmp_sum[y] = s
self.col_mean += (y + 1) * s
self.col_mean /= self.ttl_val
col_var = 0
for y in range(GRAYLEVEL):
col_var += pow(y + 1 - self.col_mean, 2) * tmp_sum[y]
col_var /= self.ttl_val
self.col_stdev = sqrt(col_var)
# '''
def get_texture_features(self, im, T, theta):
contrast = 0
correlation = 0
energy = 0
homogeneity = 0
for x in range(GRAYLEVEL):
for y in range(GRAYLEVEL):
p = self.matrix[x][y]
numerator = (x - self.row_mean) * (y - self.col_mean)
denominator = self.row_stdev * self.col_stdev
if denominator != 0:
correlation += p * numerator / denominator
contrast += p * pow(x - y, 2)
energy += pow(p, 2)
homogeneity += p / (1 + pow(x - y, 2))
contrast /= self.ttl_val
correlation /= self.ttl_val
energy /= pow(self.ttl_val, 2)
homogeneity /= self.ttl_val
offset = (theta // 45 ) * 4
T[offset] = contrast
T[offset + 1] = correlation
T[offset + 2] = energy
T[offset + 3] = homogeneity
class GlobalFeature:
def __init__(self, im):
self.T = np.zeros(shape=16)
glcm0 = MyGLCM(im, 0)
glcm0.get_texture_features(im, self.T, 0)
glcm1 = MyGLCM(im, 45)
glcm1.get_texture_features(im, self.T, 45)
glcm2 = MyGLCM(im, 90)
glcm2.get_texture_features(im, self.T, 90)
glcm3 = MyGLCM(im, 135)
glcm3.get_texture_features(im, self.T, 135)
# self.show_T()
def show_T(self):
for i in range(4):
for j in range(4):
print(self.T[i*4 + j], end=', ')
print()
class LocalFeature:
def __init__(self, im):
self.num_of_blocks_in_a_row = IMAGESIZE // BLOCKSIZE
self._N = int(pow(self.num_of_blocks_in_a_row, 2))
self._2n = BLOCKSIZE
self._n = BLOCKSIZE // 2
self.sqrt_2_div_by_s = sqrt(2) / BLOCKSIZE
self.PI_div_by_2s = M_PI / (2 * BLOCKSIZE)
self.Q = np.zeros(shape=(self._2n, self._N))
for i in range(self._N):
row_begin = int((i // self.num_of_blocks_in_a_row) * BLOCKSIZE)
col_begin = int((i % self.num_of_blocks_in_a_row) * BLOCKSIZE)
# print('Qi begin %d' % i)
Bi = im[row_begin:row_begin+BLOCKSIZE, col_begin:col_begin+BLOCKSIZE]
imf = np.float32(Bi)/255.0
dst = cv.dct(imf)
for v in range(self._n):
self.Q[v][i] = dst[0][v + 1]
for u in range(self._n):
self.Q[u + self._n][i] = dst[u + 1][0]
self.normalization()
def normalization(self):
for r in range(self._2n):
_mean = np.mean(self.Q[r])
_std = np.std(self.Q[r])
for c in range(self._N):
self.Q[r][c] = (self.Q[r][c] - _mean) / _std
self.set_D()
def set_D(self):
U0 = np.zeros(self._2n)
for r in range(self._2n):
U0[r] = np.mean(self.Q[r])
self.D = np.zeros(self._N)
for i in range(self._N):
di = np.subtract(self.Q[:, i], U0)
di = np.square(di).sum()
di = np.sqrt(di)
self.D[i] = di
# print(self.D)
class MyHash:
def get_img_hash(self, filename, q=1):
img = MyImage(filename, q)
global_feature = GlobalFeature(img.im)
local_feature = LocalFeature(img.im)
H = np.zeros(80, dtype=int)
for i in range(16):
val = global_feature.T[i]
H[i] = round(val * 10 + 0.5)
for i in range(64):
val = local_feature.D[i]
H[16 + i] = round(val * 10 + 0.5)
return H
def get_img_cc(self, filename1='airplane.tiff', filename2='house512.tiff'):
# print(filename1, filename2)
H1 = self.get_img_hash(filename1, 2)
H2 = self.get_img_hash(filename2, 2)
print(np.corrcoef(H1, H2))
def get_h(filename, q=1):
img = MyImage(filename, q)
global_feature = GlobalFeature(img.im)
local_feature = LocalFeature(img.im)
H =
|
np.zeros(80)
|
numpy.zeros
|
import numpy as np
from PuzzleLib import Config
from PuzzleLib.Backend import gpuarray, Blas
from PuzzleLib.Backend.Dnn import instanceNorm2d, instanceNorm2dBackward
from PuzzleLib.Variable import Variable
from PuzzleLib.Modules.Module import ModuleError, Module
class InstanceNorm2D(Module):
def __init__(self, numOfMaps, epsilon=1e-5, affine=True, name=None):
super().__init__(name)
self.registerBlueprint(locals())
self.numOfMaps = numOfMaps
self.epsilon = epsilon
self.affine = affine
shape = (1, numOfMaps, 1, 1)
scale = np.ones(shape, dtype=np.float32)
self.scale = None
self.bias = None
self.setVar("scale", Variable(gpuarray.to_gpu(scale)))
self.setVar("bias", Variable(gpuarray.zeros(shape, dtype=np.float32)))
self.savemean, self.saveinvvar, self.extscale, self.scalegrad, self.biasgrad = None, None, None, None, None
def updateData(self, data):
self.data, self.savemean, self.saveinvvar, self.extscale = instanceNorm2d(
data, self.scale, self.bias, self.epsilon
)
def updateGrad(self, grad):
if self.affine:
self.grad, self.scalegrad, self.biasgrad = instanceNorm2dBackward(
grad, self.inData, self.extscale, self.savemean, self.saveinvvar, self.epsilon, True
)
else:
self.grad = instanceNorm2dBackward(
grad, self.inData, self.extscale, self.savemean, self.saveinvvar, self.epsilon, False
)
def accGradParams(self, grad, scale=1.0, momentum=0.0):
if self.affine:
Blas.addVectorToVector(
self.scalegrad.ravel(), self.vars["scale"].grad.ravel(),
out=self.vars["scale"].grad.ravel(), alpha=scale, beta=momentum
)
Blas.addVectorToVector(
self.biasgrad.ravel(), self.vars["bias"].grad.ravel(),
out=self.vars["bias"].grad.ravel(), alpha=scale, beta=momentum
)
def checkDataShape(self, shape):
if len(shape) != 4:
raise ModuleError("Data must be 4d tensor")
def checkGradShape(self, shape):
if shape != self.data.shape:
raise ModuleError("Inconsistency in grad shape - expected %s (%s given)" % (self.data.shape, shape))
def dataShapeFrom(self, shape):
return shape
def gradShapeFrom(self, shape):
return shape
def reset(self):
super().reset()
self.savemean, self.saveinvvar, self.extscale = None, None, None
if self.affine:
self.scalegrad, self.biasgrad = None, None
def calcMode(self, T):
if Config.backend == Config.Backend.cuda:
if T not in {np.float16, np.float32}:
raise ModuleError("Unsupported dtype %s" % T)
elif T != np.float32:
raise ModuleError("Unsupported dtype %s" % T)
self.calctype = T
def unittest():
batchsize, maps, h, w = 5, 3, 4, 4
hostData = np.random.randn(batchsize, maps, h, w).astype(np.float32)
data = gpuarray.to_gpu(hostData)
instNorm2d = InstanceNorm2D(maps, affine=True)
instNorm2d(data)
hostData = hostData.reshape(data.shape[0] * data.shape[1], -1)
hostVar = np.var(hostData, axis=1)
hostInvVar = np.ones(hostData.shape[0], dtype=np.float32) / np.sqrt(hostVar + instNorm2d.epsilon)
hostOutData = (hostData - np.mean(hostData, axis=1, keepdims=True)) * hostInvVar[:, np.newaxis]
assert np.allclose(instNorm2d.data.get(), hostOutData.reshape(data.shape))
assert np.allclose(
instNorm2d.saveinvvar.get().ravel(), hostVar if Config.backend == Config.Backend.intel else hostInvVar
)
grad = gpuarray.to_gpu(
|
np.random.randn(batchsize, maps, h, w)
|
numpy.random.randn
|
# -*- coding: utf-8 -*-
"""High level functions for making better use of baseline algorithms.
Functions in this module make use of other baseline algorithms in
pybaselines to provide better results or optimize parameters.
Created on March 3, 2021
@author: <NAME>
"""
from math import ceil
import numpy as np
from . import classification, morphological, polynomial, spline, whittaker
from ._algorithm_setup import _setup_optimizer, _setup_polynomial, _whittaker_smooth, _yx_arrays
from .utils import _check_scalar, _get_edges, _inverted_sort, gaussian
def collab_pls(data, average_dataset=True, method='asls', method_kwargs=None, **kwargs):
"""
Collaborative Penalized Least Squares (collab-PLS).
Averages the data or the fit weights for an entire dataset to get more
optimal results. Uses any Whittaker-smoothing-based or weighted spline algorithm.
Parameters
----------
data : array-like, shape (M, N)
An array with shape (M, N) where M is the number of entries in
the dataset and N is the number of data points in each entry.
average_dataset : bool, optional
If True (default) will average the dataset before fitting to get the
weighting. If False, will fit each individual entry in the dataset and
then average the weights to get the weighting for the dataset.
method : str, optional
A string indicating the Whittaker-smoothing-based or weighted spline method to
use for fitting the baseline. Default is 'asls'.
**method_kwargs
Keyword arguments to pass to the selected `method` function.
Returns
-------
baselines : np.ndarray, shape (M, N)
An array of all of the baselines.
params : dict
A dictionary with the following items:
* 'average_weights': numpy.ndarray, shape (N,)
The weight array used to fit all of the baselines.
Additional items depend on the output of the selected method. Every
other key will have a list of values, with each item corresponding to a
fit.
References
----------
<NAME>., et al. Collaborative Penalized Least Squares for Background
Correction of Multiple Raman Spectra. Journal of Analytical Methods
in Chemistry, 2018, 2018.
"""
dataset, fit_func, _, method_kws = _setup_optimizer(
data, method, (whittaker, morphological, classification, spline), method_kwargs,
True, **kwargs
)
if dataset.ndim < 2:
raise ValueError((
'the input data must have a shape of (number of measurements, number of points), '
f'but instead has a shape of {dataset.shape}'
))
if average_dataset:
_, fit_params = fit_func(np.mean(dataset.T, 1), **method_kws)
method_kws['weights'] = fit_params['weights']
else:
weights = np.empty_like(dataset)
for i, entry in enumerate(dataset):
_, fit_params = fit_func(entry, **method_kws)
weights[i] = fit_params['weights']
method_kws['weights'] = np.mean(weights.T, 1)
method_kws['tol'] = np.inf
baselines = np.empty(dataset.shape)
params = {'average_weights': method_kws['weights']}
method = method.lower()
if method == 'fabc':
# have to handle differently since weights for fabc is the mask for
# classification rather than weights for fitting
fit_func = _whittaker_smooth
for key in list(method_kws.keys()):
if key not in {'weights', 'lam', 'diff_order'}:
method_kws.pop(key)
for i, entry in enumerate(dataset):
baselines[i], param = fit_func(entry, **method_kws)
if method == 'fabc':
param = {'weights': param}
for key, value in param.items():
if key in params:
params[key].append(value)
else:
params[key] = [value]
return baselines, params
def optimize_extended_range(data, x_data=None, method='asls', side='both', width_scale=0.1,
height_scale=1., sigma_scale=1. / 12., min_value=2, max_value=8,
step=1, pad_kwargs=None, method_kwargs=None, **kwargs):
"""
Extends data and finds the best parameter value for the given baseline method.
Adds additional data to the left and/or right of the input data, and then iterates
through parameter values to find the best fit. Useful for calculating the optimum
`lam` or `poly_order` value required to optimize other algorithms.
Parameters
----------
data : array-like, shape (N,)
The y-values of the measured data, with N data points.
x_data : array-like, shape (N,), optional
The x-values of the measured data. Default is None, which will create an
array from -1 to 1 with N points.
method : str, optional
A string indicating the Whittaker-smoothing-based, polynomial, or spline method
to use for fitting the baseline. Default is 'asls'.
side : {'both', 'left', 'right'}, optional
The side of the measured data to extend. Default is 'both'.
width_scale : float, optional
The number of data points added to each side is `width_scale` * N. Default
is 0.1.
height_scale : float, optional
The height of the added Gaussian peak(s) is calculated as
`height_scale` * max(`data`). Default is 1.
sigma_scale : float, optional
The sigma value for the added Gaussian peak(s) is calculated as
`sigma_scale` * `width_scale` * N. Default is 1/12, which will make
the Gaussian span +- 6 sigma, making its total width about half of the
added length.
min_value : int or float, optional
The minimum value for the `lam` or `poly_order` value to use with the
indicated method. If using a polynomial method, `min_value` must be an
integer. If using a Whittaker-smoothing-based method, `min_value` should
be the exponent to raise to the power of 10 (eg. a `min_value` value of 2
designates a `lam` value of 10**2).
Default is 2.
max_value : int or float, optional
The maximum value for the `lam` or `poly_order` value to use with the
indicated method. If using a polynomial method, `max_value` must be an
integer. If using a Whittaker-smoothing-based method, `max_value` should
be the exponent to raise to the power of 10 (eg. a `max_value` value of 3
designates a `lam` value of 10**3).
Default is 8.
step : int or float, optional
The step size for iterating the parameter value from `min_value` to `max_value`.
If using a polynomial method, `step` must be an integer.
pad_kwargs : dict, optional
A dictionary of options to pass to :func:`.pad_edges` for padding
the edges of the data when adding the extended left and/or right sections.
Default is None, which will use an empty dictionary.
method_kwargs : dict, optional
A dictionary of keyword arguments to pass to the selected `method` function.
Default is None, which will use an empty dictionary.
**kwargs
Deprecated in version 0.7.0 and will be removed in version 0.10.0 or 1.0. Pass any
keyword arguments for the fitting function in the `method_kwargs` dictionary.
Returns
-------
baseline : numpy.ndarray, shape (N,)
The baseline calculated with the optimum parameter.
method_params : dict
A dictionary with the following items:
* 'optimal_parameter': int or float
The `lam` or `poly_order` value that produced the lowest
root-mean-squared-error.
* 'min_rmse': float
The minimum root-mean-squared-error obtained when using
the optimal parameter.
Additional items depend on the output of the selected method.
Raises
------
ValueError
Raised if `side` is not 'left', 'right', or 'both'.
TypeError
Raised if using a polynomial method and `min_value`, `max_value`, or
`step` is not an integer.
ValueError
Raised if using a Whittaker-smoothing-based method and `min_value`,
`max_value`, or `step` is greater than 100.
Notes
-----
Based on the extended range penalized least squares (erPLS) method from [1]_.
The method proposed by [1]_ was for optimizing lambda only for the aspls
method by extending only the right side of the spectrum. The method was
modified by allowing extending either side following [2]_, and for optimizing
lambda or the polynomial degree for all of the affected algorithms in
pybaselines.
References
----------
.. [1] <NAME>., et al. An Automatic Baseline Correction Method Based on
the Penalized Least Squares Method. Sensors, 2020, 20(7), 2015.
.. [2] <NAME>., et al. Range-independent background subtraction algorithm
for recovery of Raman spectra of biological tissue. Journal of Raman
Spectroscopy. 2012, 43(12), 1884-1894.
"""
side = side.lower()
if side not in ('left', 'right', 'both'):
raise ValueError('side must be "left", "right", or "both"')
y, fit_func, func_module, method_kws = _setup_optimizer(
data, method, (whittaker, polynomial, morphological, spline, classification),
method_kwargs, True, **kwargs
)
method = method.lower()
if func_module == 'polynomial' or method in ('dietrich', 'cwt_br'):
if any(not isinstance(val, int) for val in (min_value, max_value, step)):
raise TypeError((
'min_value, max_value, and step must all be integers when'
' using a polynomial method'
))
param_name = 'poly_order'
else:
if any(val > 100 for val in (min_value, max_value, step)):
raise ValueError((
'min_value, max_value, and step should be the power of 10 to use '
'(eg. min_value=2 denotes 10**2), not the actual "lam" value, and '
'thus should not be greater than 100'
))
param_name = 'lam'
_, x = _yx_arrays(y, x_data)
added_window = int(x.shape[0] * width_scale)
sort_x = x_data is not None
if sort_x:
sort_order = np.argsort(x, kind='mergesort') # to ensure x is increasing
x = x[sort_order]
y = y[sort_order]
if 'weights' in method_kws:
# have to adjust weight length to accomodate the added sections; set weights
# to 1 to ensure the added sections are fit
method_kws['weights'] = np.pad(
method_kws['weights'][sort_order],
[0 if side == 'right' else added_window, 0 if side == 'left' else added_window],
'constant', constant_values=1
)
max_x = x.max()
min_x = x.min()
x_range = max_x - min_x
known_background = np.array([])
fit_x_data = x
fit_data = y
lower_bound = upper_bound = 0
if pad_kwargs is None:
pad_kwargs = {}
added_left, added_right = _get_edges(y, added_window, **pad_kwargs)
added_gaussian = gaussian(
np.linspace(-added_window / 2, added_window / 2, added_window),
height_scale * abs(y.max()), 0, added_window * sigma_scale
)
if side in ('right', 'both'):
added_x = np.linspace(
max_x, max_x + x_range * (width_scale / 2), added_window + 1
)[1:]
fit_x_data = np.concatenate((fit_x_data, added_x))
fit_data = np.concatenate((fit_data, added_gaussian + added_right))
known_background = added_right
upper_bound += added_window
if side in ('left', 'both'):
added_x = np.linspace(
min_x - x_range * (width_scale / 2), min_x, added_window + 1
)[:-1]
fit_x_data =
|
np.concatenate((added_x, fit_x_data))
|
numpy.concatenate
|
# file for
import numpy as np
import numpy.random as npr
import numpy.linalg as npl
from scipy.spatial.distance import pdist
from argparse import ArgumentParser
import pickle as pkl
import pathlib
import os
import os.path
# import kernel thinning
from goodpoints import kt # kt.thin is the main thinning function; kt.split and kt.swap are other important functions
from goodpoints.util import isnotebook # Check whether this file is being executed as a script or as a notebook
from goodpoints.util import fprint # for printing while flushing buffer
from goodpoints.tictoc import tic, toc # for timing blocks of code
# utils for generating samples, evaluating kernels, and mmds
from util_sample import sample, compute_params_p, sample_string
from util_k_mmd import kernel_eval, compute_params_k, compute_power_kernel_params_k
from util_k_mmd import p_kernel, ppn_kernel, pp_kernel, pnpn_kernel, squared_mmd, get_combined_results_filename
from util_parse import init_parser
# for partial functions, to use kernel_eval for kernel
from functools import partial
def run_kernel_thinning_experiment(m, params_p, params_k_split, params_k_swap, rep_ids,
thin_fun=kt.thin, thin_str="",
delta=None, store_K=False,
sample_seed=1234567, thin_seed=9876543,
compute_mmds = True, compute_fun_diff = True,
rerun=False, results_dir="results_new",
compute_last_mmd_only=True):
"""Runs kernel thinning experiment using samples from params_p for repetitions over rep_ids,
saves coresets to disk, saves and returns mmd evaluations to disk mmd evaluation
Args:
m: Number of halving rounds (number of sample points n = 2^{2m})
params_p: Dictionary of distribution parameters recognized by sample()
params_k_split: Dictionary of kernel parameters recognized by kernel_eval()
params_k_swap: Dictionary of kernel parameters recognized by kernel_eval()
rep_ids: Which replication numbers of experiment to run; the replication
number determines the seeds set for reproducibility
delta: delta/(4^m) is the failure probability for
adaptive threshold sequence;
store_K: If False, runs O(nd) space version which does not store kernel
matrix; if True, stores n x n kernel matrix
sample_seed: (Optional) random seed is set to sample_seed + rep
prior to generating input sample for replication rep
thin_seed: (Optional) random seed is set to thin_seed + rep
prior to running thinning for replication rep
rerun: (Optional) If False and results have been previously saved to
disk, load results from disk instead of rerunning experiment
results_dir: (Optional) Directory in which results should be saved
compute_mmds: (Optional) Whether to compute mmds of coresets (using params_k_swap)
compute_fun_diff: (Optional) whether to compute (Pf - Pnf); default f = k(0, .), where k is defined via params_k_swap
compute_last_mmd_only: (Optional) whether to compute mmd for entire range(m+1), or just m; to speed up computation for large m
"""
# range of m for which mmd is evaluated
mmd_eval_ms = range(m, m+1) if compute_last_mmd_only else range(m+1)
# Create results directory if necessary
pathlib.Path(results_dir).mkdir(parents=True, exist_ok=True)
split_kernel = partial(kernel_eval, params_k=params_k_split)
swap_kernel = partial(kernel_eval, params_k=params_k_swap)
# Construct results filename template with placeholder for rep value
d = params_p["d"]
assert(d==params_k_split["d"])
assert(d==params_k_swap["d"])
sample_str = sample_string(params_p, sample_seed)
split_kernel_str = "{}_var{:.3f}_seed{}".format(params_k_split["name"], params_k_split["var"], thin_seed)
swap_kernel_str = "{}_var{:.3f}".format(params_k_swap["name"], params_k_swap["var"])
thresh_str = f"delta{delta}"
file_template = os.path.join(results_dir, f"kt{thin_str}-coresets-{sample_str}-split{split_kernel_str}-swap{swap_kernel_str}-d{d}-m{m}-{thresh_str}-rep{{}}.pkl")
# Create array to store MMD evaluations from P, and Sin
mmds_p = np.zeros((m+1, len(rep_ids)))
mmds_sin = np.zeros((m+1, len(rep_ids)))
# when Pnmax is changed; name changes only for mmd file names
if params_p["flip_Pnmax"]:
mmd_p_sample_str = sample_str + "_flip_Pnmax_"
else:
mmd_p_sample_str = sample_str
mmd_p_file_template = os.path.join(results_dir,
f"kt{thin_str}-mmd-{mmd_p_sample_str}-split{split_kernel_str}-swap{swap_kernel_str}-d{d}-m{m}-{thresh_str}-rep{{}}.pkl")
mmd_sin_file_template = os.path.join(results_dir,
f"kt{thin_str}-mmd-sin-{sample_str}-split{split_kernel_str}-swap{swap_kernel_str}-d{d}-m{m}-{thresh_str}-rep{{}}.pkl")
fun_diff_p = np.zeros((m+1, len(rep_ids)))
fun_diff_sin = np.zeros((m+1, len(rep_ids)))
fun_diff_p_file_template = os.path.join(results_dir,
f"kt{thin_str}-fundiff-{mmd_p_sample_str}-split{split_kernel_str}-swap{swap_kernel_str}-d{d}-m{m}-{thresh_str}-rep{{}}.pkl")
fun_diff_sin_template = os.path.join(results_dir,
f"kt{thin_str}-fundiff-sin-{sample_str}-split{split_kernel_str}-swap{swap_kernel_str}-d{d}-m{m}-{thresh_str}-rep{{}}.pkl")
split_kernel = partial(kernel_eval, params_k=params_k_split)
swap_kernel = partial(kernel_eval, params_k=params_k_swap)
# Number of sample points
n = int(2**(2*m))
fprint(f"Running kernel thinning {thin_str} experiment with template {file_template}.....")
tic()
for r_i, rep in enumerate(rep_ids):
tic()
# Include replication number in filename
filename = file_template.format(rep)
mmd_p_filename = mmd_p_file_template.format(rep)
mmd_sin_filename = mmd_sin_file_template.format(rep)
fun_diff_p_filename = fun_diff_p_file_template.format(rep)
fun_diff_sin_filename = fun_diff_sin_template.format(rep)
# Generate matrix of input sample points
X = sample(n, params_p, seed=sample_seed+rep)
if not rerun and os.path.exists(filename):
# Return previously saved results
with open(filename, 'rb') as file:
coresets = pkl.load(file)
else:
# Obtain sequence of thinned coresets
print(f"Kernel Thinning {thin_str} rep {rep}...", flush=True)
coresets = thin_fun(X, m, split_kernel, swap_kernel, delta=delta, seed=thin_seed+rep, store_K=store_K)
# Save coresets to disk
with open(filename, 'wb') as file:
pkl.dump(coresets, file, protocol=pkl.HIGHEST_PROTOCOL)
#toc()
# Evaluate final coreset MMD
if compute_mmds:
if not rerun and os.path.exists(mmd_sin_filename):
# Return previously saved results
with open(mmd_sin_filename, 'rb') as file:
mmds_sin[:, r_i] = pkl.load(file)
else:
print(f"Evaluating KT MMD_Sin for m = {mmd_eval_ms}", flush=True)
tic()
for j in mmd_eval_ms:
nj = int(2**j)
params_p_sin = dict()
params_p_sin["d"] = d
params_p_sin["name"] = params_p["name"]+ "_sin"
params_p_sin["Pnmax"] = X
params_p_sin["saved_samples"] = False
mmds_sin[j, r_i] = np.sqrt(squared_mmd(params_k_swap, params_p_sin, X[coresets[:nj]]))
toc()
# Save MMD results to disk
with open(mmd_sin_filename, 'wb') as file:
pkl.dump(mmds_sin[:, r_i], file, protocol=pkl.HIGHEST_PROTOCOL)
if not rerun and os.path.exists(mmd_p_filename):
# Return previously saved results
with open(mmd_p_filename, 'rb') as file:
mmds_p[:, r_i] = pkl.load(file)
else:
print(f"Evaluating KT MMD_P for m = {mmd_eval_ms}", flush=True)
tic()
for j in mmd_eval_ms:
nj = int(2**j)
if params_k_swap["name"] == "gauss":
mmds_p[j, r_i] = np.sqrt(
squared_mmd(params_k_swap, params_p, X[coresets[:nj]]))
else:
mmds_p[j, r_i] = mmds_sin[j, r_i]
toc()
# Save MMD results to disk
with open(mmd_p_filename, 'wb') as file:
pkl.dump(mmds_p[:, r_i], file, protocol=pkl.HIGHEST_PROTOCOL)
# Evaluate final coreset fun diff
if compute_fun_diff:
if not rerun and os.path.exists(fun_diff_sin_filename):
# Return previously saved results
with open(fun_diff_sin_filename, 'rb') as file:
fun_diff_sin[:, r_i] = pkl.load(file)
else:
print(f"Evaluating KT fun diff results with P_in for m = {mmd_eval_ms}", flush=True)
tic()
for j in mmd_eval_ms:
nj = int(2**j)
pin_fun = np.mean(kernel_eval(np.zeros((1, d)), X, params_k_swap))
pout_fun = np.mean(kernel_eval(np.zeros((1, d)), X[coresets[:nj]], params_k_swap))
fun_diff_sin[j, r_i] = np.abs(pin_fun-pout_fun)
toc()
# Save results to disk
with open(fun_diff_sin_filename, 'wb') as file:
pkl.dump(fun_diff_sin[:, r_i], file, protocol=pkl.HIGHEST_PROTOCOL)
if not rerun and os.path.exists(fun_diff_p_filename):
# Return previously saved results
with open(fun_diff_p_filename, 'rb') as file:
fun_diff_p[:, r_i] = pkl.load(file)
else:
print(f"Evaluating KT fun diff results with P for m = {mmd_eval_ms}", flush=True)
tic()
for j in mmd_eval_ms:
nj = int(2**j)
if params_k_swap["name"] == "gauss":
p_fun = p_kernel(np.zeros((1, d)), params_k=params_k_swap, params_p=params_p)[0] # fun is fixed to be k(0, .)
pout_fun = np.mean(kernel_eval(np.zeros((1, d)), X[coresets[:nj]], params_k_swap))
fun_diff_p[j, r_i] = np.abs(p_fun-pout_fun)
else:
fun_diff_p[j, r_i] = fun_diff_sin[j, r_i]
toc()
# Save results to disk
with open(fun_diff_p_filename, 'wb') as file:
pkl.dump(fun_diff_p[:, r_i], file, protocol=pkl.HIGHEST_PROTOCOL)
toc()
toc()
if compute_mmds and compute_fun_diff:
return(mmds_p, mmds_sin, fun_diff_p, fun_diff_sin)
if compute_mmds:
return(mmds_p, mmds_sin)
else:
return(mmds_p, mmds_sin, fun_diff_p, fun_diff_sin)
def run_standard_thinning_experiment(m, params_p, params_k_mmd, rep_ids, sample_seed=1234567,
rerun=False, results_dir="results_new", compute_mmds=True, compute_fun_diff=True,
compute_last_mmd_only=True):
"""Evaluates MMD of iid Monte Carlo draws, and saves it to disk
Args:
m: Number of halving rounds (defines number of sample points via n = 2^{2m})
params_p: Dictionary of distribution parameters recognized by sample()
params_k_mmd: Dictionary of kernel parameters for MMD evaluation
rep_ids: Which replication numbers of experiment to run; the replication
number determines the seeds set for reproducibility
sample_seed: (Optional) random seed is set to sample_seed + rep
prior to generating input sample for replication rep
rerun: (Optional) If False and results have been previously saved to
disk, load results from disk instead of rerunning experiment
results_dir: (Optional) Directory in which results should be saved
compute_mmds: (Optional) Whether to compute mmds of coresets (using params_k_mmd)
compute_fun_diff: (Optional) whether to compute (Pf - Pnf); default f = k(0, .), where k is defined via params_k_mmd
compute_last_mmd_only: (Optional) whether to compute mmd for entire range(m+1), or just m; to speed up computation for large m
"""
# range of m for which mmd is evaluated
mmd_eval_ms = range(m, m+1) if compute_last_mmd_only else range(m+1)
# Create results directory if necessary
pathlib.Path(results_dir).mkdir(parents=True, exist_ok=True)
# Create array to store MMD evaluations
mmds_p = np.zeros((m+1, len(rep_ids)))
mmds_sin = np.zeros((m+1, len(rep_ids)))
fun_diff_p = np.zeros((m+1, len(rep_ids)))
fun_diff_sin = np.zeros((m+1, len(rep_ids)))
# Construct results filename template with placeholder for rep value
d = params_p["d"]
assert(d == params_k_mmd["d"])
sample_str = sample_string(params_p, sample_seed)
kernel_str = "{}_var{:.3f}".format(params_k_mmd["name"], params_k_mmd["var"])
if params_p["flip_Pnmax"]:
mmd_p_sample_str = sample_str + "_flip_Pnmax_"
else:
mmd_p_sample_str = sample_str
mmd_p_file_template = os.path.join(results_dir, f"mc-mmd-{mmd_p_sample_str}-{kernel_str}-d{d}-m{m}-rep{{}}.pkl")
mmd_sin_file_template = os.path.join(results_dir, f"mc-mmd-sin-{sample_str}-{kernel_str}-d{d}-m{m}-rep{{}}.pkl")
fun_diff_p_file_template = os.path.join(results_dir, f"mc-fundiff-{mmd_p_sample_str}-{kernel_str}-d{d}-m{m}-rep{{}}.pkl")
fun_diff_sin_file_template = os.path.join(results_dir, f"mc-fundiff-sin-{sample_str}-{kernel_str}-d{d}-m{m}-rep{{}}.pkl")
# Number of sample points
n = int(2**(2*m))
fprint(f"Running standard thinning experiment for m={m} with template {mmd_p_file_template}")
tic()
if params_p["saved_samples"]:
rep_ids = np.zeros(len(rep_ids), dtype=int)
# don't repeat any standard thinning experiment with MCMC data which has saved_samples = True; load rep=0 results always
# such hack is useful since the data is fixed, and other rep_ids don't provide any different result
for r_i, rep in enumerate(rep_ids):
# Include replication number in filename
fprint(f"Standard thinning {r_i} (rep={rep})")
mmd_p_filename = mmd_p_file_template.format(rep)
mmd_sin_filename = mmd_sin_file_template.format(rep)
fun_diff_p_filename = fun_diff_p_file_template.format(rep)
fun_diff_sin_filename = fun_diff_sin_file_template.format(rep)
if compute_mmds:
if not rerun and os.path.exists(mmd_sin_filename):
# Return previously saved results
with open(mmd_sin_filename, 'rb') as file:
mmds_sin[:, r_i] = pkl.load(file)
else:
X = sample(n, params_p, seed=sample_seed+rep)
print(f"Evaluating Monte Carlo MMD_Sin for m = {mmd_eval_ms}", flush=True)
tic()
for j in mmd_eval_ms:
# Target coreset size
coreset_size = int(2**j)
input_size = int(coreset_size**2)
step_size = coreset_size
end = input_size
# redefining target p as distribution on Sin
params_p_sin = dict()
params_p_sin["d"] = d
params_p_sin["name"] = params_p["name"]+"_sin"
params_p_sin["Pnmax"] = X
params_p_sin["saved_samples"] = False
mmds_sin[j, r_i] = np.sqrt(squared_mmd(params_k_mmd, params_p_sin, X[(step_size-1):end:step_size]))
toc()
# Save MMD results to disk
with open(mmd_sin_filename, 'wb') as file:
pkl.dump(mmds_sin[:, r_i], file, protocol=pkl.HIGHEST_PROTOCOL)
if not rerun and os.path.exists(mmd_p_filename):
# Return previously saved results
with open(mmd_p_filename, 'rb') as file:
mmds_p[:, r_i] = pkl.load(file)
else:
X = sample(n, params_p, seed=sample_seed+rep)
tic()
print(f"Evaluating Monte Carlo MMD_P for m = {mmd_eval_ms}", flush=True)
for j in mmd_eval_ms:
if params_k_mmd["name"] == "gauss":
# Target coreset size
coreset_size = int(2**j)
input_size = int(coreset_size**2)
step_size = coreset_size
end = input_size
mmds_p[j, r_i] = np.sqrt(squared_mmd(params_k_mmd, params_p, X[(step_size-1):end:step_size]))
else:
mmds_p[j, r_i] = mmds_sin[j, r_i]
toc()
# Save MMD results to disk
with open(mmd_p_filename, 'wb') as file:
pkl.dump(mmds_p[:, r_i], file, protocol=pkl.HIGHEST_PROTOCOL)
if compute_fun_diff:
if not rerun and os.path.exists(fun_diff_sin_filename):
# Return previously saved results
with open(fun_diff_sin_filename, 'rb') as file:
fun_diff_sin[:, r_i] = pkl.load(file)
else:
X = sample(n, params_p, seed=sample_seed+rep)
tic()
print(f"Evaluating Monte Carlo fun diff with Pin_f for m = {mmd_eval_ms}", flush=True)
for j in mmd_eval_ms:
# Target coreset size
coreset_size = int(2**j)
input_size = int(coreset_size**2)
step_size = coreset_size
end = input_size
pin_fun = np.mean(kernel_eval(np.zeros((1, d)), X, params_k_mmd))
pout_fun = np.mean(kernel_eval(np.zeros((1, d)), X[(step_size-1):end:step_size], params_k_mmd))
fun_diff_sin[j, r_i] = np.abs(pin_fun-pout_fun)
toc()
# Save MMD results to disk
with open(fun_diff_sin_filename, 'wb') as file:
pkl.dump(fun_diff_sin[:, r_i], file, protocol=pkl.HIGHEST_PROTOCOL)
if not rerun and os.path.exists(fun_diff_p_filename):
# Return previously saved results
with open(fun_diff_p_filename, 'rb') as file:
fun_diff_p[:, r_i] = pkl.load(file)
else:
X = sample(n, params_p, seed=sample_seed+rep)
tic()
print(f"Evaluating Monte Carlo fun diff Pf for m = {mmd_eval_ms}", flush=True)
for j in mmd_eval_ms:
if params_k_mmd["name"] == "gauss":
# Target coreset size
coreset_size = int(2**j)
input_size = int(coreset_size**2)
step_size = coreset_size
end = input_size
p_fun = p_kernel(np.zeros((1, d)), params_k=params_k_mmd, params_p=params_p)[0] # fun is fixed to be k(0, .)
pout_fun = np.mean(kernel_eval(np.zeros((1, d)), X[(step_size-1):end:step_size], params_k_mmd))
fun_diff_p[j, r_i] = np.abs(p_fun-pout_fun)
else:
fun_diff_p[j, r_i] = fun_diff_sin[j, r_i]
toc()
# Save MMD results to disk
with open(fun_diff_p_filename, 'wb') as file:
pkl.dump(fun_diff_p[:, r_i], file, protocol=pkl.HIGHEST_PROTOCOL)
toc()
return(mmds_p, mmds_sin, fun_diff_p, fun_diff_sin)
def run_standard_thinning_experiment(m, params_p, params_k_mmd, rep_ids, sample_seed=1234567,
rerun=False, results_dir="results_new", compute_mmds=True, compute_fun_diff=True,
compute_last_mmd_only=True):
"""Evaluates MMD of iid Monte Carlo draws, and saves it to disk
Args:
m: Number of halving rounds (defines number of sample points via n = 2^{2m})
params_p: Dictionary of distribution parameters recognized by sample()
params_k_mmd: Dictionary of kernel parameters for MMD evaluation
rep_ids: Which replication numbers of experiment to run; the replication
number determines the seeds set for reproducibility
sample_seed: (Optional) random seed is set to sample_seed + rep
prior to generating input sample for replication rep
rerun: (Optional) If False and results have been previously saved to
disk, load results from disk instead of rerunning experiment
results_dir: (Optional) Directory in which results should be saved
compute_mmds: (Optional) Whether to compute mmds of coresets (using params_k_mmd)
compute_fun_diff: (Optional) whether to compute (Pf - Pnf); default f = k(0, .), where k is defined via params_k_mmd
compute_last_mmd_only: (Optional) whether to compute mmd for entire range(m+1), or just m; to speed up computation for large m
"""
# range of m for which mmd is evaluated
mmd_eval_ms = range(m, m+1) if compute_last_mmd_only else range(m+1)
# Create results directory if necessary
pathlib.Path(results_dir).mkdir(parents=True, exist_ok=True)
# Create array to store MMD evaluations
mmds_p = np.zeros((m+1, len(rep_ids)))
mmds_sin = np.zeros((m+1, len(rep_ids)))
fun_diff_p = np.zeros((m+1, len(rep_ids)))
fun_diff_sin = np.zeros((m+1, len(rep_ids)))
# Construct results filename template with placeholder for rep value
d = params_p["d"]
assert(d == params_k_mmd["d"])
sample_str = sample_string(params_p, sample_seed)
kernel_str = "{}_var{:.3f}".format(params_k_mmd["name"], params_k_mmd["var"])
if params_p["flip_Pnmax"]:
mmd_p_sample_str = sample_str + "_flip_Pnmax_"
else:
mmd_p_sample_str = sample_str
mmd_p_file_template = os.path.join(results_dir, f"mc-mmd-{mmd_p_sample_str}-{kernel_str}-d{d}-m{m}-rep{{}}.pkl")
mmd_sin_file_template = os.path.join(results_dir, f"mc-mmd-sin-{sample_str}-{kernel_str}-d{d}-m{m}-rep{{}}.pkl")
fun_diff_p_file_template = os.path.join(results_dir, f"mc-fundiff-{mmd_p_sample_str}-{kernel_str}-d{d}-m{m}-rep{{}}.pkl")
fun_diff_sin_file_template = os.path.join(results_dir, f"mc-fundiff-sin-{sample_str}-{kernel_str}-d{d}-m{m}-rep{{}}.pkl")
# Number of sample points
n = int(2**(2*m))
fprint(f"Running standard thinning experiment for m={m} with template {mmd_p_file_template}")
tic()
if params_p["saved_samples"]:
rep_ids = np.zeros(len(rep_ids), dtype=int)
# don't repeat any standard thinning experiment with MCMC data which has saved_samples = True; load rep=0 results always
# such hack is useful since the data is fixed, and other rep_ids don't provide any different result
for r_i, rep in enumerate(rep_ids):
# Include replication number in filename
fprint(f"Standard thinning {r_i} (rep={rep})")
mmd_p_filename = mmd_p_file_template.format(rep)
mmd_sin_filename = mmd_sin_file_template.format(rep)
fun_diff_p_filename = fun_diff_p_file_template.format(rep)
fun_diff_sin_filename = fun_diff_sin_file_template.format(rep)
if compute_mmds:
if not rerun and os.path.exists(mmd_sin_filename):
# Return previously saved results
with open(mmd_sin_filename, 'rb') as file:
mmds_sin[:, r_i] = pkl.load(file)
else:
X = sample(n, params_p, seed=sample_seed+rep)
print(f"Evaluating Monte Carlo MMD_Sin for m = {mmd_eval_ms}", flush=True)
tic()
for j in mmd_eval_ms:
# Target coreset size
coreset_size = int(2**j)
input_size = int(coreset_size**2)
step_size = coreset_size
end = input_size
# redefining target p as distribution on Sin
params_p_sin = dict()
params_p_sin["d"] = d
params_p_sin["name"] = params_p["name"]+"_sin"
params_p_sin["Pnmax"] = X
params_p_sin["saved_samples"] = False
mmds_sin[j, r_i] = np.sqrt(squared_mmd(params_k_mmd, params_p_sin, X[(step_size-1):end:step_size]))
toc()
# Save MMD results to disk
with open(mmd_sin_filename, 'wb') as file:
pkl.dump(mmds_sin[:, r_i], file, protocol=pkl.HIGHEST_PROTOCOL)
if not rerun and os.path.exists(mmd_p_filename):
# Return previously saved results
with open(mmd_p_filename, 'rb') as file:
mmds_p[:, r_i] = pkl.load(file)
else:
X = sample(n, params_p, seed=sample_seed+rep)
tic()
print(f"Evaluating Monte Carlo MMD_P for m = {mmd_eval_ms}", flush=True)
for j in mmd_eval_ms:
if params_k_mmd["name"] == "gauss":
# Target coreset size
coreset_size = int(2**j)
input_size = int(coreset_size**2)
step_size = coreset_size
end = input_size
mmds_p[j, r_i] = np.sqrt(squared_mmd(params_k_mmd, params_p, X[(step_size-1):end:step_size]))
else:
mmds_p[j, r_i] = mmds_sin[j, r_i]
toc()
# Save MMD results to disk
with open(mmd_p_filename, 'wb') as file:
pkl.dump(mmds_p[:, r_i], file, protocol=pkl.HIGHEST_PROTOCOL)
if compute_fun_diff:
if not rerun and os.path.exists(fun_diff_sin_filename):
# Return previously saved results
with open(fun_diff_sin_filename, 'rb') as file:
fun_diff_sin[:, r_i] = pkl.load(file)
else:
X = sample(n, params_p, seed=sample_seed+rep)
tic()
print(f"Evaluating Monte Carlo fun diff with Pin_f for m = {mmd_eval_ms}", flush=True)
for j in mmd_eval_ms:
# Target coreset size
coreset_size = int(2**j)
input_size = int(coreset_size**2)
step_size = coreset_size
end = input_size
pin_fun = np.mean(kernel_eval(np.zeros((1, d)), X, params_k_mmd))
pout_fun = np.mean(kernel_eval(np.zeros((1, d)), X[(step_size-1):end:step_size], params_k_mmd))
fun_diff_sin[j, r_i] = np.abs(pin_fun-pout_fun)
toc()
# Save MMD results to disk
with open(fun_diff_sin_filename, 'wb') as file:
pkl.dump(fun_diff_sin[:, r_i], file, protocol=pkl.HIGHEST_PROTOCOL)
if not rerun and os.path.exists(fun_diff_p_filename):
# Return previously saved results
with open(fun_diff_p_filename, 'rb') as file:
fun_diff_p[:, r_i] = pkl.load(file)
else:
X = sample(n, params_p, seed=sample_seed+rep)
#toc()
tic()
print(f"Evaluating Monte Carlo fun diff Pf for m = {mmd_eval_ms}", flush=True)
for j in mmd_eval_ms:
if params_k_mmd["name"] == "gauss":
# Target coreset size
coreset_size = int(2**j)
input_size = int(coreset_size**2)
step_size = coreset_size
end = input_size
p_fun = p_kernel(np.zeros((1, d)), params_k=params_k_mmd, params_p=params_p)[0] # fun is fixed to be k(0, .)
pout_fun = np.mean(kernel_eval(np.zeros((1, d)), X[(step_size-1):end:step_size], params_k_mmd))
fun_diff_p[j, r_i] = np.abs(p_fun-pout_fun)
else:
fun_diff_p[j, r_i] = fun_diff_sin[j, r_i]
toc()
# Save MMD results to disk
with open(fun_diff_p_filename, 'wb') as file:
pkl.dump(fun_diff_p[:, r_i], file, protocol=pkl.HIGHEST_PROTOCOL)
toc()
return(mmds_p, mmds_sin, fun_diff_p, fun_diff_sin)
def run_iid_thinning_experiment(m, params_p, params_k_mmd, rep_ids, sample_seed=1234567, thin_seed= 9876543,
rerun=False, results_dir="results_new", compute_mmds=True, compute_fun_diff=True,
compute_last_mmd_only=True):
"""Evaluates MMD of iid Monte Carlo draws, and saves it to disk
Args:
m: Number of halving rounds (defines number of sample points via n = 2^{2m})
params_p: Dictionary of distribution parameters recognized by sample()
params_k_mmd: Dictionary of kernel parameters for MMD evaluation
rep_ids: Which replication numbers of experiment to run; the replication
number determines the seeds set for reproducibility
sample_seed: (Optional) random seed is set to sample_seed + rep
prior to generating input sample for replication rep
rerun: (Optional) If False and results have been previously saved to
disk, load results from disk instead of rerunning experiment
results_dir: (Optional) Directory in which results should be saved
compute_mmds: (Optional) Whether to compute mmds of coresets (using params_k_mmd)
compute_fun_diff: (Optional) whether to compute (Pf - Pnf); default f = k(0, .), where k is defined via params_k_mmd
compute_last_mmd_only: (Optional) whether to compute mmd for entire range(m+1), or just m; to speed up computation for large m
"""
# range of m for which mmd is evaluated
mmd_eval_ms = range(m, m+1) if compute_last_mmd_only else range(m+1)
# Create results directory if necessary
pathlib.Path(results_dir).mkdir(parents=True, exist_ok=True)
# Create array to store MMD evaluations
mmds_p = np.zeros((m+1, len(rep_ids)))
mmds_sin = np.zeros((m+1, len(rep_ids)))
fun_diff_p = np.zeros((m+1, len(rep_ids)))
fun_diff_sin = np.zeros((m+1, len(rep_ids)))
# Construct results filename template with placeholder for rep value
d = params_p["d"]
assert(d == params_k_mmd["d"])
sample_str = sample_string(params_p, sample_seed)
kernel_str = "{}_var{:.3f}".format(params_k_mmd["name"], params_k_mmd["var"])
if params_p["flip_Pnmax"]:
mmd_p_sample_str = sample_str + "_flip_Pnmax_"
else:
mmd_p_sample_str = sample_str
mmd_p_file_template = os.path.join(results_dir, f"mc-iid-mmd-{mmd_p_sample_str}-{kernel_str}-d{d}-m{m}-rep{{}}.pkl")
mmd_sin_file_template = os.path.join(results_dir, f"mc-iid-mmd-sin-{sample_str}-{kernel_str}-d{d}-m{m}-rep{{}}.pkl")
fun_diff_p_file_template = os.path.join(results_dir, f"mc-iid-fundiff-{mmd_p_sample_str}-{kernel_str}-d{d}-m{m}-rep{{}}.pkl")
fun_diff_sin_file_template = os.path.join(results_dir, f"mc-iid-fundiff-sin-{sample_str}-{kernel_str}-d{d}-m{m}-rep{{}}.pkl")
# Number of sample points
n = int(2**(2*m))
fprint(f"Running iid thinning experiment for m={m} with template {mmd_p_file_template}.....")
tic()
for r_i, rep in enumerate(rep_ids):
# Include replication number in filename
fprint(f"IID thinning {r_i} (rep={rep})")
mmd_p_filename = mmd_p_file_template.format(rep)
mmd_sin_filename = mmd_sin_file_template.format(rep)
fun_diff_p_filename = fun_diff_p_file_template.format(rep)
fun_diff_sin_filename = fun_diff_sin_file_template.format(rep)
if compute_mmds:
if not rerun and os.path.exists(mmd_sin_filename):
# Return previously saved results
with open(mmd_sin_filename, 'rb') as file:
mmds_sin[:, r_i] = pkl.load(file)
else:
X = sample(n, params_p, seed=sample_seed+rep)
print(f"Evaluating Monte Carlo MMD_Sin for m = {mmd_eval_ms}", flush=True)
tic()
for j in mmd_eval_ms:
# Target coreset size
coreset_size = int(2**j)
input_size = int(coreset_size**2)
thin_idx = npr.default_rng(thin_seed+rep).choice(input_size,
coreset_size, replace=False)
# redefining target p as distribution on Sin
params_p_sin = dict()
params_p_sin["d"] = d
params_p_sin["name"] = params_p["name"]+"_sin"
params_p_sin["Pnmax"] = X
params_p_sin["saved_samples"] = False
mmds_sin[j, r_i] = np.sqrt(squared_mmd(params_k_mmd, params_p_sin,
X[thin_idx]))
toc()
# Save MMD results to disk
with open(mmd_sin_filename, 'wb') as file:
pkl.dump(mmds_sin[:, r_i], file, protocol=pkl.HIGHEST_PROTOCOL)
if not rerun and os.path.exists(mmd_p_filename):
# Return previously saved results
with open(mmd_p_filename, 'rb') as file:
mmds_p[:, r_i] = pkl.load(file)
else:
X = sample(n, params_p, seed=sample_seed+rep)
tic()
print(f"Evaluating Monte Carlo MMD_P for m = {mmd_eval_ms}", flush=True)
for j in mmd_eval_ms:
if params_k_mmd["name"] == "gauss":
# Target coreset size
coreset_size = int(2**j)
input_size = int(coreset_size**2)
thin_idx = npr.default_rng(thin_seed+rep).choice(input_size,
coreset_size, replace=False)
mmds_p[j, r_i] = np.sqrt(squared_mmd(params_k_mmd, params_p,
X[thin_idx]))
else:
mmds_p[j, r_i] = mmds_sin[j, r_i]
toc()
# Save MMD results to disk
with open(mmd_p_filename, 'wb') as file:
pkl.dump(mmds_p[:, r_i], file, protocol=pkl.HIGHEST_PROTOCOL)
if compute_fun_diff:
if not rerun and os.path.exists(fun_diff_sin_filename):
# Return previously saved results
with open(fun_diff_sin_filename, 'rb') as file:
fun_diff_sin[:, r_i] = pkl.load(file)
else:
X = sample(n, params_p, seed=sample_seed+rep)
tic()
print(f"Evaluating Monte Carlo fun diff with Pin_f for m = {mmd_eval_ms}", flush=True)
for j in mmd_eval_ms:
# Target coreset size
coreset_size = int(2**j)
input_size = int(coreset_size**2)
thin_idx = npr.default_rng(thin_seed+rep).choice(input_size,
coreset_size, replace=False)
pin_fun = np.mean(kernel_eval(
|
np.zeros((1, d))
|
numpy.zeros
|
import numpy as np
import soundfile as sf
from scipy.io.wavfile import read
from scipy import interpolate
from herpetologist import check_type
def resample(data, old_samplerate, new_samplerate):
"""
Resample signal.
Parameters
----------
data: np.array
old_samplerate: int
old sample rate.
new_samplerate: int
new sample rate.
Returns
-------
result: data
"""
old_audio = data
duration = data.shape[0] / old_samplerate
time_old = np.linspace(0, duration, old_audio.shape[0])
time_new = np.linspace(
0, duration, int(old_audio.shape[0] * new_samplerate / old_samplerate)
)
interpolator = interpolate.interp1d(time_old, old_audio.T)
data = interpolator(time_new).T
return data
def read_audio(data, old_samplerate, sample_rate=22050):
if len(data.shape) == 2:
data = data[:, 0]
if old_samplerate != sample_rate and sample_rate is not None:
data = resample(data, old_samplerate, sample_rate)
else:
sample_rate = old_samplerate
return data, sample_rate
@check_type
def load(file: str, sr=16000, scale: bool = True):
"""
Read sound file, any format supported by soundfile.read
Parameters
----------
file: str
sr: int, (default=16000)
new sample rate. If input sample rate is not same, will resample automatically.
scale: bool, (default=True)
Scale to -1 and 1.
Returns
-------
result: (y, sr)
"""
data, old_samplerate = sf.read(file)
y, sr = read_audio(data, old_samplerate, sr)
if scale:
y = y / (np.max(
|
np.abs(y)
|
numpy.abs
|
#basics
from gensim import utils
import numpy as np
from numpy.lib.utils import deprecate
import pandas as pd
import re
from functools import wraps
from typing import Union
# pytorch
import torch
from torch import Tensor
from torch._C import dtype
from torch.nn.utils.rnn import pad_sequence
# segnlp
from .label_encoder import LabelEncoder
from .array import ensure_numpy
from .array import ensure_list
from .array import create_mask
from .array import np_cumsum_zero
from .overlap import find_overlap
from .misc import timer
class Batch:
def __init__(self,
df: pd.DataFrame,
label_encoder : LabelEncoder,
pretrained_features: dict = {},
device = None
):
self._df : pd.DataFrame = df
self._pred_df : pd.DataFrame = df.copy(deep=True)
self.label_encoder : LabelEncoder = label_encoder
self._task_regexp = re.compile("seg|link|label|link_label")
self._pretrained_features = pretrained_features
self.device = device
self.__ok_levels = set(["seg", "token", "span", "pair"])
self.use_target_segs : bool = False
self._size = self._df["sample_id"].nunique()
# cache
self.__cache = {}
if "am_id" in self._df.columns:
self.__ok_levels.update(["am", "adu"])
if "seg" in label_encoder.task_labels:
self._pred_df["seg_id"] = None
self._pred_df["target_id"] = None
for task in label_encoder.task_labels:
self._pred_df[task] = None
# #remove columns we dont need
# for c in list(self._pred_df.columns):
# if self._task_regexp.search(c) and c not in label_encoder.task_labels:
# del self._df[c]
# del self._pred_df[c]
def __len__(self):
return self._size
def __sampling_wrapper(func):
@wraps(func)
def wrapped_get(self, *args, **kwargs):
if self.use_target_segs:
kwargs["pred"] = False
return func(self, *args, **kwargs)
return wrapped_get
def __get_column_values(self, df: pd.DataFrame, level: str, key:str):
if level == "token":
flat_values = df.loc[:, key].to_numpy()
else:
flat_values = df.groupby(f"{level}_id", sort = False).first().loc[:, key].to_numpy()
if isinstance(flat_values[0], str):
return flat_values
else:
return torch.LongTensor(flat_values)
def __get_span_idxs(self, df: pd.DataFrame, level:str ):
if level == "am":
ADU_start = df.groupby("adu_id", sort=False).first()["sample_token_id"].to_numpy()
ADU_end = df.groupby("adu_id", sort=False).last()["sample_token_id"].to_numpy() + 1
AC_lens = df.groupby("seg_id", sort=False).size().to_numpy()
AM_start = ADU_start
AM_end = ADU_end - AC_lens
return torch.LongTensor(np.column_stack((AM_start, AM_end)))
else:
start_tok_ids = df.groupby(f"{level}_id", sort=False).first()["sample_token_id"].to_numpy()
end_tok_ids = df.groupby(f"{level}_id", sort=False).last()["sample_token_id"].to_numpy() + 1
return torch.LongTensor(np.column_stack((start_tok_ids, end_tok_ids)))
def __get_mask(self, level:str, pred : bool = False):
return create_mask(self.get(level, "lengths", pred = pred), as_bool = True)
# def __seg_tok_lengths(self, df: pd.DataFrame, level:str):
# return df.groupby(level, sort=False).size().to_numpy()
def __get_lengths(self, df: pd.DataFrame, level:str):
if level == "token":
return torch.LongTensor(df.groupby(level=0, sort = False).size().to_numpy())
else:
return torch.LongTensor(df.groupby(level=0, sort=False)[f"{level}_id"].nunique().to_numpy())
def __get_pretrained_embeddings(self, df:pd.DataFrame, level:str, flat:bool):
if level == "token":
embs = self._pretrained_features["word_embs"]
else:
embs = self._pretrained_features["seg_embs"]
embs = embs[:, :max(self.__get_lengths(df, level)), :]
if flat:
embs = embs[self.__get_mask("level")]
return torch.tensor(embs, dtype = torch.float)
def __add_link_matching_info(self, pair_df:pd.DataFrame, j2i:dict):
def check_true_pair(row, mapping):
p1 = row["p1"]
p2 = row["p2"]
dir = row["direction"]
source = p2 if dir == 2 else p1
target = p1 if dir == 2 else p2
if source not in mapping:
return False
else:
correct_target = mapping[source]
return correct_target == target
j_jt = self._df.loc[:, ["seg_id", "target_id"]].dropna()
# maps a true source to the correct target using the ids of predicted pairs
source2target = {
j2i.get(j, "NONE"): j2i.get(jt, "NONE")
for j,jt in zip(j_jt["seg_id"], j_jt["target_id"])
}
if "NONE" in source2target:
source2target.pop("NONE")
if not source2target:
pair_df["true_link"] = False
return
pair_df["true_link"] = pair_df.apply(check_true_pair, axis = 1, args = (source2target, ))
def __create_pair_df(self, df: pd.DataFrame, pred :bool):
def set_id_fn():
pair_dict = dict()
def set_id(row):
p = tuple(sorted((row["p1"], row["p2"])))
if p not in pair_dict:
pair_dict[p] = len(pair_dict)
return pair_dict[p]
return set_id
# we also have information about whether the seg_id is a true segments
# and if so, which TRUE segmentent id it overlaps with, and how much
i2ratio, j2ratio, i2j, j2i = find_overlap(
target_df = self._df,
pred_df = self._pred_df
)
first_df = df.groupby("seg_id", sort=False).first()
first_df.reset_index(inplace=True)
last_df = df.groupby("seg_id", sort=False).last()
last_df.reset_index(inplace=True)
if pred:
first_target_df = self._df.groupby("seg_id", sort=False).first()
j2link_label = {j:row["link_label"] for j, row in first_target_df.iterrows()}
link_labels = [-1 if i not in i2j else j2link_label.get(i2j[i], -1) for i in first_df.index.to_numpy()]
first_df["link_label"] = link_labels
# we create ids for each memeber of the pairs
# the segments in the batch will have unique ids starting from 0 to
# the total mumber of segments
p1, p2 = [], []
j = 0
for _, gdf in df.groupby("sample_id", sort = False):
n = len(gdf.loc[:, "seg_id"].dropna().unique())
sample_seg_ids = np.arange(
start= j,
stop = j+n
)
p1.extend(np.repeat(sample_seg_ids, n).astype(int))
p2.extend(np.tile(sample_seg_ids, n))
j += n
# setup pairs
pair_df = pd.DataFrame({
"p1": p1,
"p2": p2,
})
if not len(pair_df.index):
return pd.DataFrame()
# create ids for each NON-directional pair
pair_df["id"] = pair_df.apply(set_id_fn(), axis=1)
#set the sample id for each pair
pair_df["sample_id"] = first_df.loc[pair_df["p1"], "sample_id"].to_numpy()
#set true the link_label
#pair_df["link_label"] = first_df.loc[pair_df["p1"], "link_label"].to_numpy()
#set start and end token indexes for p1 and p2
pair_df["p1_start"] = first_df.loc[pair_df["p1"], "sample_token_id"].to_numpy()
pair_df["p1_end"] = last_df.loc[pair_df["p1"], "sample_token_id"].to_numpy()
pair_df["p2_start"] = first_df.loc[pair_df["p2"], "sample_token_id"].to_numpy()
pair_df["p2_end"] = last_df.loc[pair_df["p2"], "sample_token_id"].to_numpy()
# set directions
pair_df["direction"] = 0 #self
pair_df.loc[pair_df["p1"] < pair_df["p2"], "direction"] = 1 # ->
pair_df.loc[pair_df["p1"] > pair_df["p2"], "direction"] = 2 # <-
# mask for where p1 is a source
p1_source_mask = np.logical_or(pair_df["direction"] == 0 , pair_df["direction"] == 1)
pair_df.loc[p1_source_mask, "link_label"] = first_df.loc[pair_df.loc[p1_source_mask, "p1"], "link_label"].to_numpy()
#where p2 is a source
p2_source_mask = pair_df["direction"] == 2
pair_df.loc[p2_source_mask, "link_label"] = first_df.loc[pair_df.loc[p2_source_mask, "p2"], "link_label"].to_numpy()
self.__add_link_matching_info(pair_df, j2i)
if pred:
pair_df["p1-ratio"] = pair_df["p1"].map(i2ratio)
pair_df["p2-ratio"] = pair_df["p2"].map(i2ratio)
else:
pair_df["p1-ratio"] = 1
pair_df["p2-ratio"] = 1
return pair_df
def __get_df_data(self,
level : str,
key : str,
flat : bool = False,
pred : bool = False,
) -> Union[Tensor, list, np.ndarray]:
df = self._pred_df if pred else self._df
if key == "lengths":
data = self.__get_lengths(df, level)
# elif key == "lengths_tok":
# data = self.__seg_tok_lengths(df, level)
elif key == "embs":
data = self.__get_pretrained_embeddings(df, level, flat = flat)
elif key == "mask":
data = self.__get_mask(level, pred = pred)
else:
if key == "span_idxs":
data = self.__get_span_idxs(df, level)
else:
data = self.__get_column_values(df, level, key)
if len(data) == 0:
return data
if isinstance(data[0], str):
return data
if not flat:
if level == "am" and key == "<KEY>":
level = "adu"
lengths = ensure_list(self.get(level, "lengths", pred = pred))
data = pad_sequence(
torch.split(
data,
lengths
),
batch_first = True,
padding_value = -1 if self._task_regexp.search(key) else 0,
)
return data
def __get_pair_df_data(self,
key : str,
bidir : bool = True,
) -> Union[Tensor, list, np.ndarray]:
if not hasattr(self, "_pair_df"):
pred = not self.use_target_segs
self._pair_df = self.__create_pair_df(
df = self._pred_df if pred else self._df,
pred = pred
)
pair_df = self._pair_df
if not len(self._pair_df.index):
return []
if not bidir:
pair_df = pair_df[pair_df["direction"].isin([0,1]).to_numpy()]
if key == "lengths":
sample_ids = list(self._df.groupby("sample_id", sort = False).groups.keys())
sample_pair_lens = pair_df.groupby("sample_id", sort = False).size().to_dict()
data = [sample_pair_lens.get(i, 0) for i in sample_ids]
else:
data = torch.LongTensor(pair_df[key].to_numpy())
return data
@__sampling_wrapper
def get(self,
level : str,
key : str,
flat : bool = False,
pred : bool = False,
bidir : bool = True
):
# create a key for string in cache
cache_key = (level, key, flat, pred, bidir)
# fetched cached data
if cache_key in self.__cache:
return self.__cache[cache_key]
if level not in self.__ok_levels:
raise KeyError
# for level == pair We only have one pair_df as we are using the predicted or TARGET segments
# to create candidate pairs
# For other levels we have seperate dfs for TARGET and PREDICTIONS
if level == "pair":
data = self.__get_pair_df_data(
key = key,
bidir = bidir,
)
else:
data = self.__get_df_data(
level = level,
key = key,
flat = flat,
pred = pred,
)
if isinstance(data, Tensor):
data = data.to(self.device)
return data
def add(self, level:str, key:str, value:str):
if key not in self.label_encoder.task_labels:
raise KeyError(f"cannot add values to key ='{key}'")
# if we are using TARGET segmentation results we overwrite the
# columns of seg_id with TARGET seg_id as well as TARGET labels for each
# task done in segmenation
if "seg" in key and self.use_target_segs:
self._pred_df["seg_id"] = self._df["seg_id"].to_numpy()
for subtask in key.split("+"):
self._pred_df[subtask] = self._df[subtask].to_numpy()
#self.__add_overlap_info()
return
if level == "token":
mask = ensure_numpy(self.get("token", "mask")).astype(bool)
self._pred_df.loc[:, key] = ensure_numpy(value)[mask]
elif level == "seg":
mask = ensure_numpy(self.get("seg", "mask")).astype(bool)
seg_preds = ensure_numpy(value)[mask]
# get the length of tokens for each seg
tok_lens = self._pred_df.groupby("seg_id", sort=False).size().to_numpy()
# we spread the predictions on segments over tokens in TARGET segments
cond = ~self._pred_df["seg_id"].isna()
# expand the segment prediction for all their tokens
token_preds = np.repeat(seg_preds, tok_lens)
#set the predictions for all rows which belong to a TARGET segment
self._pred_df.loc[cond, key] = token_preds
elif level == "p_seg":
#get the lengths of each segment
seg_lengths = self._pred_df.groupby("seg_id", sort=False).size().to_numpy()
#expand the predictions over the tokens in the segments
token_preds =
|
np.repeat(value, seg_lengths)
|
numpy.repeat
|
# pylint: disable = redefined-outer-name
import pytest
import numpy as np
from scipy.sparse import csr_matrix
import ptfidf.utils as ut
@pytest.fixture
def unnormalized_probas():
proba = np.array([
[2., 1.00001, 0., 0.],
[0.01, 2., 1.00001, 0.5],
[0., 0., 0., 0.],
])
prior = np.array([1., 3., 6.])
log_proba = np.zeros_like(proba)
idx = np.where(proba > 0)
log_proba[idx] = np.log(proba[idx])
log_proba = csr_matrix(log_proba)
log_prior = np.zeros_like(prior)
idx = np.where(prior > 0)
log_prior[idx] = np.log(prior[idx])
return proba, prior, log_proba, log_prior
def test_get_normalized_proba_with_prior(unnormalized_probas):
proba, prior, log_proba, log_prior = unnormalized_probas
expected = np.c_[proba, prior]
expected /= expected.sum(axis=1, keepdims=True)
expected = expected[:, :-1]
actual = ut.get_normalized_proba(log_proba, log_prior).toarray()
assert np.allclose(actual, expected)
def test_get_normalized_proba_without_prior(unnormalized_probas):
proba, _, log_proba, _ = unnormalized_probas
idx =
|
np.sum(proba, axis=1)
|
numpy.sum
|
import numpy as np
from matplotlib import pyplot as plt
def dataset1(period=200, N_tot=1000):
mu = 0
sigma = 1.
N = 1
T = [0, 1]
X = [np.random.normal(mu, sigma, 1)[0], np.random.normal(mu, sigma, 1)[0]]
for i in range(2, N_tot):
if i % period == 0:
N += 1
mu += 0.5 * N
T += [i]
ax = 0.6 * X[i - 1] - 0.5 * X[i - 2] + np.random.normal(mu, sigma, 1)[0]
X += [ax]
return np.array(X).reshape(-1, 1)
def plot(X, Y, title, x_label, y_label):
plt.figure(figsize=(12, 3.))
plt.plot(X, Y, linewidth=3)
plt.xlabel(x_label, size=14)
plt.ylabel(y_label, size=14)
plt.grid(b=1)
plt.title(title, size=14)
# plt.legend(loc='best')
plt.tight_layout()
plt.xlim(0)
# plt.show()
def plot2(title, x_label, y_label):
fig = plt.figure(figsize=(15, 6))
ax = fig.add_subplot(1, 1, 1)
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
ax.grid(b=1)
# ax.plot([1, 2, 3], [2, 4, 6])
ax.set_title(title)
plt.tight_layout()
# ax.set_xlim(0)
return ax
def draw_projection(ax, score, period, ws, n):
min_height, max_height = np.min(score), np.max(score)
for line in range(1, n):
current_period = period * line
c_ws = current_period + ws
ax.plot([current_period, current_period], [min_height, max_height], '--', alpha=1, lw=1.5, c='k')
ax.plot([c_ws, c_ws], [min_height, max_height], '-.', alpha=0.5, lw=1, c='k')
return ax
def SMA(scores, N):
new_scores = []
for i in range(0, len(scores)):
s = i - N if i - N >= 0 else 0
new_scores.append(np.mean(scores[s:i + 1], axis=0))
return np.array(new_scores)
def EMA(scores, N, smooth):
alpha = smooth / (1. + N)
new_scores = [scores[0]]
for i in range(1, len(scores)):
new_score = alpha * scores[i] + (1 - alpha) * new_scores[i - 1]
new_scores.append(new_score)
return new_scores
def cum_sum(scores):
new_scores = [0.]
for i in range(1, len(scores)):
new_scores.append(np.maximum(0., new_scores[i - 1] + scores[i]))
return new_scores
def generate_simplest_data(period=200, N=2):
X = []
for i in range(N):
temp = [i for j in range(period)]
X += temp
return np.array(X).reshape(-1, 1)
def generate_normal_simple_data(period=200, N_tot=1000):
mu = 0
sigma = 0.1
N = 1
T = [0, 1]
X = [np.random.normal(mu, sigma, 1)[0], np.random.normal(mu, sigma, 1)[0]]
for i in range(2, N_tot):
if i % period == 0:
N += 1
mu += 0.5 * N
T += [i]
ax = 0.6 * X[i - 1] - 0.5 * X[i - 2] + np.random.normal(mu, sigma, 1)[0]
X += [ax]
return
|
np.array(X)
|
numpy.array
|
#!/usr/bin/env python3
#
from __future__ import division, print_function
import numpy as np
import pints
class LogPrior(pints.LogPrior):
"""
Boundary constraints on the parameters
"""
def __init__(self, no_cells=1, which_model=1, transformation=None):
super(LogPrior, self).__init__()
self.which_model = which_model
# Conductance limits
self.lower_conductance = 2e-2
self.upper_conductance = 2e-1
# Limits on parameters and rates
self.lower_alpha = 1e-7
self.upper_alpha = 1e3
self.lower_beta = 1e-7
self.upper_beta = 0.4
self.lower_rate = 1.67e-5
self.upper_rate = 1000
# Lower and upper bounds for all parameters
self.lower = np.array([
self.lower_alpha,
self.lower_beta,
self.lower_alpha,
self.lower_beta,
self.lower_alpha,
self.lower_beta,
self.lower_alpha,
self.lower_beta,
self.lower_alpha,
self.lower_beta,
self.lower_alpha,
self.lower_beta
])
self.upper = np.array([
self.upper_alpha,
self.upper_beta,
self.upper_alpha,
self.upper_beta,
self.upper_alpha,
self.upper_beta,
self.upper_alpha,
self.upper_beta,
self.upper_alpha,
self.upper_beta,
self.upper_alpha,
self.upper_beta
])
self.minf = -float('inf')
# Limits on maximum reaction rates
self.rmin = 1.67e-5
self.rmax = 1000
# Voltages used to calculate maximum rates
self.vmin = -120
self.vmax = 60
# Optional transformation
self.transformation = transformation
# Number of parameters
n_kparams = 12
self.no_cells = no_cells
self.n_kparams = n_kparams
self.n_params = 2*self.n_kparams + self.no_cells
self.lower = np.append(self.lower, self.lower)
self.upper = np.append(self.upper, self.upper)
self.no_cells = no_cells
for i in range(self.no_cells):
self.lower = np.append(self.lower, self.lower_conductance)
self.upper = np.append(self.upper, self.upper_conductance)
def n_parameters(self):
return self.n_params
def __call__(self, parameters):
debug = False
# Transform parameters back to model space
if self.transformation is not None:
parameters = self.transformation.detransform(parameters, self.which_model)
# Check parameter boundaries
if np.any(parameters < self.lower):
if debug: print('Lower')
return self.minf
if np.any(parameters > self.upper):
if debug: print('Upper')
return self.minf
# Check maximum rate constants
p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12, \
p13, p14, p15, p16, p17, p18, p19, p20, p21, p22, p23, p24 = parameters[:2*self.n_kparams]
# Check positive signed rates
r = p1 * np.exp(p2 * self.vmax)
if r < self.rmin or r > self.rmax:
if debug: print('r1')
return self.minf
r = p5 * np.exp(p6 * self.vmax)
if r < self.rmin or r > self.rmax:
if debug: print('r2')
return self.minf
r = p9 * np.exp(p10 * self.vmax)
if r < self.rmin or r > self.rmax:
if debug: print('r3')
return self.minf
# Check negative signed rates
r = p3 * np.exp(-p4 * self.vmin)
if r < self.rmin or r > self.rmax:
if debug: print('r4')
return self.minf
r = p7 * np.exp(-p8 * self.vmin)
if r < self.rmin or r > self.rmax:
if debug: print('r5')
return self.minf
r = p11 * np.exp(-p12 * self.vmin)
if r < self.rmin or r > self.rmax:
if debug: print('r6')
return self.minf
# Check positive signed rates
r = p13 * np.exp(p14 * self.vmax)
if r < self.rmin or r > self.rmax:
if debug: print('r7')
return self.minf
r = p17 * np.exp(p18 * self.vmax)
if r < self.rmin or r > self.rmax:
if debug: print('r8')
return self.minf
r = p21 * np.exp(p22 * self.vmax)
if r < self.rmin or r > self.rmax:
if debug: print('r9')
return self.minf
# Check negative signed rates
r = p15 * np.exp(-p16 * self.vmin)
if r < self.rmin or r > self.rmax:
if debug: print('r10')
return self.minf
r = p19 * np.exp(-p20 * self.vmin)
if r < self.rmin or r > self.rmax:
if debug: print('r11')
return self.minf
r = p23 *
|
np.exp(-p24 * self.vmin)
|
numpy.exp
|
import numpy as np
import pypolyagamma
from scipy.special._ufuncs import expit, logit
from scipy.linalg import solve_triangular
def multivariate_multinomial(N_m, Pi):
"""
Draws count data X given Pi.
Parameters
----------
N_m: M dimensional vector with total number of samples per M
Pi: Probability matrix used for creating the data
Returns
-------
X: [M x K] M dimensional number of counts per category
"""
N_m = np.array(N_m)
Pi = np.array(Pi)
M = N_m.__len__()
X = np.array([np.random.multinomial(N_m[m], Pi[m, :]) for m in range(M)])
return X
def stick_breaking(Psi):
"""
Calculates the stickbreaking construction of the gaussian variables Psi
Parameters
----------
Psi: [M x K-1] Gaussian Variables used for the stick breaking
Returns
-------
Pi: [M x K] Probability matrix using the logistic function and stick breaking
"""
Pi = np.zeros((np.shape(Psi)[0], np.shape(Psi)[1] + 1))
Pi[:, 0] = expit(Psi[:, 0])
Pi[:, 1:-1] = expit(Psi[:, 1:]) * np.cumprod(1 - expit(Psi[:, 0:-1]), axis=1)
Pi[:, -1] = 1 - np.sum(Pi[:, 0:-1], axis=1)
# Check for numerical instability
if np.any(Pi[:, -1] < 0):
Pi[Pi[:, -1] < 0, -1] = 0 # Set last weight to 0
Pi /= np.sum(Pi, axis=1)[:, None] # Normalize last weight to 0
return Pi
def inv_stick_breaking(Pi):
"""
Calculates the inverse stick breaking construction for the probability matrix Pi
Parameters
----------
Pi: [M x K] Probability matrix using the logistic function and stick breaking
Returns
-------
Psi: [M x K-1] Gaussian Variables used for the stick breaking
"""
Psi = np.zeros((np.shape(Pi)[0], np.shape(Pi)[1] - 1))
Psi[:, 0] = logit(Pi[:, 0])
Psi[:, 1:] = logit(Pi[:, 1:-1] / (1 - np.cumsum(Pi[:, 0:-2], axis=1)))
Psi[np.isnan(Psi)] = -np.Inf
return Psi
def moment_matching(alpha, N_MC=False):
"""
Finds the parameters for the Gaussian (mu, Sigma) by matching the moments
of a dirichlet distribution with parameters alpha
Parameters
----------
alpha: [MxK] parameters of the M dirichlet distributions
N_MC: Number of Monte Carlo samples used for estimating the expectations
Returns
-------
mu: [MxK-1] M mean vectors of the Gaussians
Sigma: [MxM] Correlation matrix of the M histograms
"""
M, K = np.shape(alpha)
if isinstance(N_MC, bool):
N_MC = int(1e5)
Pi_samples = np.array([np.random.dirichlet(alpha[m, :], N_MC) for m in range(M)]).swapaxes(0, 1)
Psi_samples = np.array([inv_stick_breaking(Pi_samples[n, :, :]) for n in range(N_MC)])
mu_k = np.zeros((M, K - 1))
for k in range(K - 1):
mu_k[:, k] = np.mean(Psi_samples[:, :, k], axis=0)
Sigma = np.cov(Psi_samples.swapaxes(1, 2).reshape(-1, M).T)
return mu_k, Sigma
def suff_stats_mult(X):
"""
Transforms the matrix X into a matrix T which corresponds to the counting matrix presented in
https://arxiv.org/abs/1506.05843
Parameters
----------
X: [M x K] count data for K categories
Returns
-------
T: [M x K-1] sufficient statistics
"""
N_m = np.sum(X, axis=1)
T = np.c_[N_m, N_m[:, None] - np.cumsum(X[:, 0:-2], axis=1)]
return T
def poly_gamma_rand(n, Psi):
"""
returns Polyagamma random variables
Parameters
----------
pg: polya gamma object
n: [M x K-1] count matrix
Psi: [M x K-1] Gaussian variables
Returns
-------
omega: [MxK-1] polya gamma variables conditioned on Psi and data n (sufficient statistics of X)
"""
# f = np.vectorize(pg.pgdraw)
# return f(n, Psi)
pg = pypolyagamma.PyPolyaGamma(np.random.randint(0, 2 ** 63, 1))
return np.reshape([pg.pgdraw(i, j) for i, j in zip(n.ravel(), Psi.ravel())], n.shape)
def stickBreaking(fractions, axis=0):
"""
Performs stick breaking for the given collection of breaking fractions.
Parameters
----------
fractions: array of numbers in [0,1]
axis: axis along which the sticks are oriented
Returns
-------
array of probability vectors (points on the simples) oriented along the same axis
"""
# check arguments
f = np.array(fractions)
assert np.all(np.logical_and(f >= 0, f <= 1))
assert np.ndim(f) >= axis
# temporarily permute axis
f = np.swapaxes(f, 0, axis)
# stick breaking
pi = np.zeros((f.shape[0] + 1, *f.shape[1:]))
pi[0, ...] = f[0, ...]
pi[1:-1] = f[1:, ...] * np.cumprod(1 - f[0:-1, ...], axis=0)
pi[-1, ...] = 1 - np.sum(pi[0:-1, ...], axis=0)
# undo permutation
pi = np.swapaxes(pi, 0, axis)
return pi
def reverseStickBreaking(probabilities, axis=0, short=False):
"""
Converts a given collection of probability vectors into the corresponding stick breaking representations.
Parameters
----------
probabilities: array of probability vectors (points on the simplex)
axis: axis along which the probability vectors are oriented
short: if true it is assumed that the last (dependent) entry of each vector has already been dropped
Returns
-------
array of stick breaking fractions (oriented along the same axis) encoding the probability vectors
"""
# check arguments
pi = np.array(probabilities)
assert np.all(np.logical_and(pi >= 0, pi <= 1))
assert np.ndim(pi) >= axis
# temporarily permute axis
pi = np.swapaxes(pi, 0, axis)
# if complete probability vectors are provided, drop last entry
if not short:
assert np.allclose(np.sum(pi, axis=0), 1)
pi = pi[0:-1, ...]
# reverse stick breaking
den = 1 - np.r_[np.zeros((1, *pi.shape[1:])), np.cumsum(pi[0:-1, ...], axis=0)]
fracs = np.divide(pi, den, out=np.zeros_like(pi), where=den != 0)
# fractions following a 1 in the stick breaking representation can not be recovered from the probability
# vector since they all map to the same result:
# e.g. stickBreaking([0.5, 1, a, b, c, ...]) yields [0.5, 0.5, 0, 0, 0, 0, ...] for all a, b, c, ...
# hence, if more than K trailing zeros are provided (K=1 if short=True, K=0 if short=False), all undetermined
# values are set to np.nan
# undo permutation
fracs =
|
np.swapaxes(fracs, 0, axis)
|
numpy.swapaxes
|
import numpy as np
from numpy.linalg import inv
import numpy.ma as ma
def leer_data_pantheon(archivo_pantheon,masked=False,min_z = 0,max_z = 30):
'''Toma la data de Pantheon y extrae la data de los redshifts zcmb y zhel
su error dz, además de los datos de la magnitud aparente con su error:
mb y dm. Con los errores de la magnitud aparente construye la
matriz de correlación asociada. La función devuelve la información
de los redshifts, la magnitud aparente y la matriz de correlación
inversa.'''
# leo la tabla de datos:
zcmb,zhel,dz,mb,dmb=np.loadtxt(archivo_pantheon
, usecols=(1,2,3,4,5),unpack=True)
#creamos la matriz diagonal con los errores de mB. ojo! esto depende de alfa y beta:
Dstat=np.diag(dmb**2.)
# hay que leer la matriz de los errores sistematicos que es de NxN
sn=len(zcmb)
Csys=np.loadtxt('lcparam_full_long_sys.txt',unpack=True)
Csys=Csys.reshape(sn,sn)
#armamos la matriz de cov final y la invertimos:
Ccov=Csys+Dstat
if masked == True: #OJO ESTABA MAL: SE TIENE QUE RESHAPEAR CCOV Y LUEGO INVERTIR.
mask = ma.masked_where((zcmb <= max_z) & ((zcmb >= min_z)) , zcmb).mask
mask_1 = mask[np.newaxis, :] & mask[:, np.newaxis]
zhel = zhel[mask]
mb = mb[mask]
Ccov = Ccov[mask_1]
Ccov = Ccov.reshape(len(zhel),len(zhel))
zcmb = zcmb[mask]
Cinv=inv(Ccov)
return zcmb, zhel, Cinv, mb
def leer_data_pantheon_2(archivo_pantheon,archivo_pantheon_2):
'''Idem leer_data_pantheon, además de importar los parámetros nuisance.'''
# leo la tabla de datos:
zcmb0,zhel0,dz0,mb0,dmb0=np.loadtxt(archivo_pantheon
, usecols=(1,2,3,4,5),unpack=True)
zcmb_1,hmass,x1,cor=np.loadtxt(archivo_pantheon_2,usecols=(7,13,20,22),
unpack=True)
#creamos la matriz diagonal con los errores de mB. ojo! esto depende de alfa y beta:
Dstat=np.diag(dmb0**2.)
# hay que leer la matriz de los errores sistematicos que es de NxN
sn=len(zcmb0)
Csys=np.loadtxt('lcparam_full_long_sys.txt',unpack=True)
Csys=Csys.reshape(sn,sn)
#armamos la matriz de cov final y la invertimos:
Ccov=Csys+Dstat
Cinv=inv(Ccov)
return zcmb0, zcmb_1, zhel0, Cinv, mb0, x1, cor, hmass
def leer_data_cronometros(archivo_cronometros):
'''Toma la data de Pantheon y extrae la data de los redshifts zcmb y zhel
su error dz, además de los datos de la magnitud aparente con su error:
mb y dm. Con los errores de la magnitud aparente construye la
matriz de correlación asociada. La función devuelve la información
de los redshifts, la magnitud aparente y la matriz de correlación
inversa.'''
# leo la tabla de datos:
z, h, dh =
|
np.loadtxt(archivo_cronometros, usecols=(0,1,2), unpack=True)
|
numpy.loadtxt
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
util --- Short and sweet functions, generic algorithms
======================================================
Registers two astropy table readers: 'horizons csv' and 'mpc comet
obs'.
.. autosummary::
:toctree: generated/
Mathmatical
-----------
archav
cartesian
davint
gaussian
gaussian2d
deriv
hav
rotmat
FITS images and WCS
-------------------
basicwcs
fitslog
getrot
Optimizations
-------------
gaussfit
glfit
linefit
planckfit
Searching, sorting
------------------
between
clusters
groupby
leading_num_key
nearest
takefrom
stat_avg
whist
Spherical/Celestial/vectorial geometry
--------------------------------------
delta_at_rh
ec2eq
lb2xyz
mhat
projected_vector_angle
spherical_coord_rotate
spherical_distribution
state2orbit
vector_rotate
xyz2lb
Statistics
----------
kuiper
kuiperprob
mean2minmax
meanclip
midstep
minmax
nanmedian
nanminmax
randpl
sigma
spearman
uclip
"Special" functions
-------------------
bandpass
constant_spectral_resolution
deresolve
phase_integral
planck
#redden
polcurve
savitzky_golay
Time
----
cal2doy
cal2iso
cal2time
date_len
date2time
dh2hms
doy2md
drange
hms2dh
jd2doy
jd2time
timestamp
tz2utc
Other
-----
asAngle
asQuantity
asValue
autodoc
file2list
horizons_csv
mpc_comet_obs
spectral_density_sb
timesten
write_table
"""
import re
import datetime
from functools import singledispatch
import numpy as np
from astropy.time import Time
from astropy.table import Table
from astropy.io import ascii
import astropy.units as u
import astropy.io.registry as astropy_io_registry
__all__ = [
'archav',
'cartesian',
'davint',
'deriv',
'gaussian',
'gaussian2d',
'hav',
'rotmat',
'basicwcs',
'fitslog',
'getrot',
'gaussfit',
'glfit',
'linefit',
'planckfit',
'between',
'clusters',
'groupby',
'leading_num_key',
'nearest',
'stat_avg',
'takefrom',
'whist',
'delta_at_rh',
'ec2eq',
'lb2xyz',
'mhat',
'projected_vector_angle',
'spherical_coord_rotate',
'spherical_distribution',
'state2orbit',
'vector_rotate',
'xyz2lb',
'kuiper',
'kuiperprob',
'mean2minmax',
'meanclip',
'midstep',
'minmax',
'nanmedian',
'nanminmax',
'randpl',
'sigma',
'spearman',
'uclip',
'bandpass',
'constant_spectral_resolution',
'deresolve',
'phase_integral',
'planck',
# 'redden',
'polcurve',
'savitzky_golay',
'cal2doy',
'cal2iso',
'cal2time',
'date_len',
'date2time',
'dh2hms',
'doy2md',
'drange',
'hms2dh',
'jd2doy',
'jd2time',
'timestamp',
'tz2utc',
'asAngle',
'asQuantity',
'asValue',
'autodoc',
'file2list',
'spectral_density_sb',
'timesten',
'write_table'
]
def archav(y):
"""Inverse haversine.
Haversine is (1 - cos(th)) / 2 = sin**2(th/2)
Parameters
----------
y : float or array
The value.
Returns
-------
th : float or ndarray
The inverse haversine. [radians]
"""
return 2.0 * np.arcsin(np.sqrt(y))
def cartesian(*arrays):
"""Cartesian product of the input arrays.
Parameters
----------
arrays : array
The arrays on which to operate.
Returns
-------
result : ndarray
The Cartesian product of (array[0] and array[1]) and array[2],
etc.
Examples
--------
>>> cartesian(([1, 2, 3], [4, 5], [6, 7]))
array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
"""
from itertools import product
return np.array(list(product(*arrays)))
_davint_err = dict()
_davint_err[2] = 'x1 was less than x0'
_davint_err[3] = 'the number of x between x0 and x1 (inclusive) was less than 3 and neither of the two special cases described in the abstract occurred. No integration was performed.'
_davint_err[4] = 'the restriction x(i+1) > x(i) was violated.'
_davint_err[5] = 'the number of function values was < 2'
def davint(x, y, x0, x1, axis=0):
"""Integrate an array using overlapping parabolas.
Interface to davint.f from SLATEC at netlib.org.
DAVINT integrates a function tabulated at arbitrarily spaced
abscissas. The limits of integration need not coincide with the
tabulated abscissas.
A method of overlapping parabolas fitted to the data is used
provided that there are at least 3 abscissas between the limits of
integration. DAVINT also handles two special cases. If the
limits of integration are equal, DAVINT returns a result of zero
regardless of the number of tabulated values. If there are only
two function values, DAVINT uses the trapezoid rule.
Parameters
----------
x : ndarray
Abscissas, must be in increasing order.
y : ndarray
Function values.
x0 : float
Lower limit of integration.
x1 : float
Upper limit of integration.
axis : int
If `y` is a 2D array, then integrate over axis `axis` for each
element of the other axis.
Returns
-------
float
The result.
"""
from .lib import davint as _davint
y = np.array(y)
if y.ndim == 1:
r, ierr = _davint(x, y, len(x), x0, x1)
if ierr != 1:
raise RuntimeError(
"DAVINT integration error: {}".format(err[ierr]))
elif y.ndim == 2:
r = np.zeros(y.shape[axis])
for i, yy in enumerate(np.rollaxis(y, axis)):
r[i] = davint(x, yy, x0, x1)
else:
raise ValueError("y must have 1 or 2 dimensions.")
return r
def deriv(y, x=None):
"""The numerical derivative using 3-point Lagrangian interpolation.
Parameters
----------
y : array
Variable to be differentiated, there must be at least 3 points
x : array, optional
Variable to differentiate with respect to; if equal to None,
then use unit spacing
Returns
-------
d : ndarray
dy/dx
Notes
-----
Based on deriv.pro from RSI/IDL, which is based on Hildebrand,
1956, Introduction to Numerical Analysis.
"""
if y.shape[0] < 3:
raise ValueError("y must have at least 3 elements")
if x is None:
dydx = (np.roll(y, -1) - np.roll(y, 1)) / 2.0
dydx[0] = (-3.0 * y[0] + 4.0 * y[1] - y[2]) / 2.0
dydx[-1] = (3.0 * y[-1] - 4.0 * y[-2] + y[-3]) / 2.0
return dydx
if x.shape != y.shape:
raise ValueError("y and x must have the same number of elements")
return None
xx = x.astype(float)
x12 = xx - np.roll(xx, -1) # x1 - x2
x01 = np.roll(xx, 1) - xx # x0 - x1
x02 = np.roll(xx, 1) - np.roll(xx, -1) # x0 - x2
# mid points
dydx = (np.roll(y, 1) * (x12 / (x01 * x02)) +
y * (1.0 / x12 - 1.0 / x01) -
np.roll(y, -1) * (x01 / (x02 * x12)))
# end points
dydx[0] = (y[0] * (x01[1] + x02[1]) / (x01[1] * x02[1]) -
y[1] * x02[1] / (x01[1] * x12[1]) +
y[2] * x01[1] / (x02[1] * x12[1]))
dydx[-1] = (-y[-3] * x12[-2] / (x01[-2] * x02[-2]) +
y[-2] * x02[-2] / (x01[-2] * x12[-2]) -
y[-1] * (x02[-2] + x12[-2]) / (x02[-2] * x12[-2]))
return dydx
def gaussian(x, mu, sigma):
"""A normalized Gaussian curve.
Parameters
----------
x : array
Dependent variable.
mu : float
Position of the peak.
sigma : float
Width of the curve (sqrt(variance)).
Returns
-------
G : ndarray
The Gaussian function.
"""
return (np.exp(-(x - mu)**2 / 2.0 / sigma**2) /
np.sqrt(2.0 * np.pi) / sigma)
def gaussian2d(shape, sigma, theta=0):
"""A normalized 2-D Gaussian function.
Take care to make sure the result is normalized, if needed.
Parameters
----------
shape : tuple
The shape of the resultant array. The Gaussian will be centered
at y = (shape[0] - 1) / 2, x = (shape[1] - 1) / 2.
sigma : float or array
Width of the Gaussian (sqrt(variance)). If sigma is a
two-element array, the first element will be the width along the
first axis, and the second along the second axis.
theta : float
The angle for an elliptical Gaussian. [degrees]
Returns
-------
G : ndarray
The 2D Gaussian function.
"""
if not np.iterable(sigma):
sy = sigma
sx = sigma
else:
sy = sigma[0]
sx = sigma[1]
thr = np.radians(theta)
a = np.cos(thr)**2 / 2.0 / sx**2 + np.sin(thr)**2 / 2.0 / sy**2
b = np.sin(2 * thr) / 4.0 / sx**2 + np.sin(2 * thr) / 4.0 / sy**2
c = np.sin(thr)**2 / 2.0 / sx**2 + np.cos(thr)**2 / 2.0 / sy**2
y, x = np.indices(shape)
y = y - (shape[0] - 1) / 2
x = x - (shape[1] - 1) / 2
G = np.exp(-(a * x**2 + 2 * b * x * y + c * y**2))
G /= 2.0 * np.pi * sx * sy
return G
def hav(th):
"""Haversine of an angle.
Haversine is (1 - cos(th)) / 2 = sin**2(th/2)
Parameters
----------
th : float or array
The angle. [radians]
Returns
-------
y : float or ndarray
The haversine.
"""
return np.sin(th / 2.0)**2
def rotmat(th):
"""Returns a rotation matrix.
The matrix rotates the vector [x, y] by the amount a.
Parameters
----------
th : float
The amount to rotate. [radians]
Returns
-------
r : np.matrix
Rotation matrix.
Examples
--------
import numpy as np
from mskpy import rotmat
print(np.array([1, 0]) * rotmat(np.radians(90.0)))
--> matrix([[ 6.12323400e-17, 1.00000000e+00]])
print(np.array([0, 1]) * rotmat(np.pi))
--> matrix([[ -1.00000000e+00, 6.12323400e-17]])
"""
c = np.cos(th)
s = np.sin(th)
return np.matrix([[c, s], [-s, c]])
def basicwcs(crpix, crval, cdelt, pa, projection='TAN'):
"""A basic world coordinate system (WCS) object.
Parameters
----------
crpix : array
The center of the WCS projection: [x, y].
crval : array
The coordinates at CRPIX: [ra, dec]. [degrees]
cdelt : double or array
The image scale in arcsecons per pixel. If cdelt is a scalar
value then the WCS CDELT will be [-1, 1] * cdelt.
pa : double
The position angle of N from the y-axis. [degrees]
Returns
-------
wcs : astropy wcs
Your new WCS.
"""
import astropy.wcs
wcs = astropy.wcs.wcs.WCS()
wcs.wcs.crpix = crpix
wcs.wcs.crval = crval
if np.iterable(cdelt):
wcs.wcs.cdelt = cdelt / 3600.0
else:
wcs.wcs.cdelt = np.array([-1, 1]) * cdelt / 3600.0
par = np.radians(pa)
wcs.wcs.pc = np.array([[-np.cos(par), -np.sin(par)],
[-np.sin(par), np.cos(par)]])
return wcs
def fitslog(keywords, files=None, path='.', format=None, csv=True):
"""One-line descriptions of a list of FITS files.
By default, `fitslog` will summarize *.fit{,s} files in the
current directory.
Parameters
----------
keywords : array or str
A list of FITS keywords to extract from each header. Keywords
may also be the name of a template: Bigdog, Guidedog, MIRSI.
files : array, optional
A list of files to summarize. Overrides path.
path : str, optional
Summarize all FITS files in this location.
format : str, optional
The output format string. A newline character will be appended.
csv : bool, optional
Set to `True` to separate output fields with commas. Ignored
for user defined formats.
Returns
-------
log : str
The summary of the FITS files as a string.
"""
from glob import glob
from astropy.io import fits
if files is None:
files = glob("{0}/*.fit".format(path))
files.extend(glob("{0}/*.fits".format(path)))
files.sort()
if type(keywords) is str:
if keywords.lower() == 'bigdog':
keywords = ['TIME_OBS', 'ITIME', 'CO_ADDS', 'CYCLES',
'AIRMASS', 'GRAT', 'OBJECT']
format = ["{0:16}", "{1:18}", "{2:6.2f}", "{3:4d}"
"{4:4d}", "{5:7.3f}", "{6:<12}", "{7:<25}"]
elif keywords.lower() == 'old-guidedog':
keywords = ['TIME_OBS', 'ITIME', 'CO_ADDS', 'CYCLES',
'AIRMASS', 'GFLT', 'OBJECT']
format = ["{0:16}", "{1:18}", "{2:6.2f}", "{3:4d}"
"{4:4d}", "{5:7.3f}", "{6:<12}", "{7:<25}"]
elif keywords.lower() == 'guidedog':
keywords = ['TIME_OBS', 'ITIME', 'CO_ADDS', 'CYCLES',
'TCS_AM', 'GFLT', 'OBJECT']
format = ["{0:16}", "{1:18}", "{2:6.2f}", "{3:4d}"
"{4:4d}", "{5:7.3f}", "{6:<12}", "{7:<25}"]
elif keywords.lower() == 'mirsi':
keywords = ['UTC_TIME', 'OBS-MODE', 'EXPTIME', 'FRAME-T',
'NCOADS', 'AIRMASS', 'WHEEL1', 'WHEEL2', 'WHEEL3',
'OBJECT']
format = ['{0:12}', '{1:13}', '{2:7.3f}', '{3:>7}', '{4:3d}',
'{5:>6}', '{6:>20}', '{7:>20}', '{8:>20}',
'{9}']
else:
print("{0} not a recognized template".format(keywords))
return None
if csv:
format = ", ".join(format)
else:
format = " ".join(format)
else:
if format is None:
format = []
for i in range(len(keywords)):
format.append("{{{0}}}".format(i))
if csv:
format = ", ".join(format)
else:
format = " ".join(format)
log = ""
s = max([len(x.replace('.fits', '').replace('.fit', '').split('/')[-1])
for x in files])
for f in files:
log += '{0:{1}}'.format(
f.replace('.fits', '').replace('.fit', '').split('/')[-1], s)
if csv:
log += ','
log += ' '
h = fits.getheader(f)
values = ()
for k in keywords:
values += (h[k], )
log += format.format(*values)
log += "\n"
return log
def getrot(h):
"""Image rotation and pixel scale from a FITS header.
Based on the IDL Astronomy routine getrot.pro (<NAME>).
Parameters
----------
h : astropy.io.fits header or string
A FITS header or the name of a file with a defined world
coordinate system. The file name will be passed to
`fits.getheader`.
Returns
-------
cdelt : ndarray
Two-element array of the pixel scale (x, y). [arcseconds/pixel]
rot : float
The image orientation (position angle of north). [degrees]
"""
from astropy.io import fits
if isinstance(h, str):
h = fits.getheader(h)
# Does CDELTx exist?
cdelt = np.zeros(2)
cdeltDefined = False
if (('CDELT1' in h) and ('CDELT2' in h)):
# these keywords take precedence over the CD matrix
cdeltDefined = True
cdelt = np.array([h['CDELT1'], h['CDELT2']])
# Transformation matrix?
tmDefined = False
if (('CD1_1' in h) and ('CD1_2' in h) and
('CD2_1' in h) and ('CD2_2' in h)):
tmDefined = True
cd = np.array(((h['CD1_1'], h['CD1_2']), (h['CD2_1'], h['CD2_2'])))
if (('PC1_1' in h) and ('PC1_2' in h) and
('PC2_1' in h) and ('PC2_2' in h)):
tmDefined = True
cd = np.array(((h['PC1_1'], h['PC1_2']), (h['PC2_1'], h['PC2_2'])))
if not tmDefined:
# if CDELT is defined but the transformation matrix isn't,
# then CROT should be defined
if cdeltDefined and ('CROTA2' in h):
rot = h['CROTA2']
return cdelt, rot
raise ValueError("WCS has CDELTx but is missing CROTA2,"
" and CDi_j or PCi_j")
if (h['CTYPE1'].find('DEC-') >= 0) or (h['CTYPE1'].find('LAT') >= 0):
newcd = cd.copy()
newcd[0, :] = cd[1, :]
newcd[1, :] = cd[0, :]
cd = newcd.copy()
if np.linalg.det(cd) < 0:
sgn = -1.0
else:
sgn = 1.0
if (cd[1, 0] == 0) and (cd[0, 1] == 0):
# unrotated coordinate system
rot1 = 0
rot2 = 0
if not cdeltDefined:
cdelt[0] = cd[0, 0]
cdelt[1] = cd[1, 1]
else:
rot1 = np.arctan2(sgn * np.radians(cd[0, 1]),
sgn * np.radians(cd[0, 0]))
rot2 = np.arctan2(np.radians(-cd[1, 0]),
np.radians(cd[1, 1]))
if not cdeltDefined:
cdelt[0] = sgn * np.sqrt(cd[0, 0]**2 + cd[0, 1]**2)
cdelt[1] = np.sqrt(cd[1, 1]**2 + cd[1, 0]**2)
return cdelt * 3600.0, np.degrees(rot1)
def gaussfit(x, y, err, guess, covar=False, **kwargs):
"""A quick Gaussian fitting function, optionally including a line.
Parameters
----------
x, y : array
The independent and dependent variables.
err : array
`y` errors, set to `None` for unweighted fitting.
guess : tuple
Initial guess. The length of the guess determines the fitting
function:
`(amplitude, mu, sigma)` - pure Gaussian
`(amplitude, mu, sigma, b)` - Gaussian + constant offset `b`
`(amplitude, mu, sigma, m, b)` - Gaussian + linear term `m x + b`
covar : bool, optional
Set to `True` to return the covariance matrix rather than the
error.
**kwargs
Keyword arguments to pass to `scipy.optimize.leastsq`.
Returns
-------
fit : tuple
Best-fit parameters.
err or cov : tuple or ndarray
Errors on the fit or the covariance matrix of the fit (see
`covar` keyword).
"""
from scipy.optimize import leastsq
def gauss_chi(p, x, y, err):
A, mu, sigma = p
model = A * gaussian(x, mu, sigma)
chi = (np.array(y) - model) / np.array(err)
return chi
def gauss_offset_chi(p, x, y, err):
A, mu, sigma, b = p
model = A * gaussian(x, mu, sigma) + b
chi = (np.array(y) - model) / np.array(err)
return chi
def gauss_line_chi(p, x, y, err):
A, mu, sigma, m, b = p
model = A * gaussian(x, mu, sigma) + m * x + b
chi = (np.array(y) - model) / np.array(err)
return chi
if err is None:
err = np.ones(len(y))
assert len(guess) in (3, 4, 5), "guess must have length of 3, 4, or 5."
opts = dict(args=(x, y, err), full_output=True, epsfcn=1e-4,
xtol=1e-4, ftol=1e-4)
opts.update(**kwargs)
if len(guess) == 3:
output = leastsq(gauss_chi, guess, **opts)
elif len(guess) == 4:
output = leastsq(gauss_offset_chi, guess, **opts)
elif len(guess) == 5:
output = leastsq(gauss_line_chi, guess, **opts)
fit = output[0]
cov = output[1]
if cov is None:
print(output[3])
err = None
else:
err = np.sqrt(np.diag(cov))
if covar:
return fit, cov
else:
return fit, err
def glfit(x, y, err, guess, covar=False):
"""A quick Gaussian + line fitting function.
Parameters
----------
x, y : array
The independent and dependent variables.
err : array
`y` errors, set to `None` for unweighted fitting.
guess : tuple
Initial guess: `(amplitude, mu, sigma, m, b)`.
covar : bool, optional
Set to `True` to return the covariance matrix rather than the
error.
Returns
-------
fit : tuple
Best-fit parameters.
err or cov : tuple or ndarray
Errors on the fit or the covariance matrix of the fit (see
`covar` keyword).
"""
from scipy.optimize import leastsq
def chi(p, x, y, err):
A, mu, sigma, m, b = p
model = A * gaussian(x, mu, sigma) + m * x + b
chi = (np.array(y) - model) / np.array(err)
return chi
if err is None:
err = np.ones(len(y))
output = leastsq(chi, guess, args=(x, y, err), full_output=True,
epsfcn=1e-4)
fit = output[0]
cov = output[1]
err = np.sqrt(np.diag(cov))
if covar:
return fit, cov
else:
return fit, err
def linefit(x, y, err, guess, covar=False):
"""A quick line fitting function.
Parameters
----------
x, y : array
The independent and dependent variables.
err : array
`y` errors, set to `None` for unweighted fitting.
guess : tuple (double, double)
`(m, b)` a guess for the slope, `m`, and y-axis intercept `b`.
covar : bool, optional
Set to `True` to return the covariance matrix rather than the
error.
Returns
-------
fit : tuple (double, double)
`(m, b)` the best-fit slope, `m`, and y-axis intercept `b`.
err or cov : tuple (double, double) or ndarray
Errors on the fit or the covariance matrix of the fit (see
`covar` keyword). `None` if the scipy's `leastsq` did not
return a covariance matrix.
"""
from scipy.optimize import leastsq
def chi(p, x, y, err):
m, b = p
model = m * np.array(x) + b
chi = (np.array(y) - model) / np.array(err)
return chi
if err is None:
err = np.ones(len(y))
output = leastsq(chi, guess, args=(x, y, err), full_output=True,
epsfcn=1e-3)
fit = output[0]
cov = output[1]
try:
err = np.sqrt(np.diag(cov))
except ValueError:
err = None
if covar:
return fit, cov
else:
return fit, err
def planckfit(wave, fluxd, err, guess, covar=False, epsfcn=1e-3, **kwargs):
"""A quick scaled Planck fitting function.
The scale factor includes a factor of pi for the conversion from
specific surface brightness to flux density.
Parameters
----------
wave, fluxd : Quantity
The wavelength and flux density.
err : Quantity
Flux density uncertainties; set to `None` for unweighted fitting.
guess : tuple (double, double)
`(scale, T)` a guess for the temperature, `T`, and scale factor.
covar : bool, optional
Set to `True` to return the covariance matrix rather than the
error.
**kwargs
`scipy.optimize.leastsq` keyword arguments.
Returns
-------
fit : tuple (double, double)
`(scale, T)` the best-fit parameters.
err or cov : tuple (double, double) or ndarray
Errors on the fit or the covariance matrix of the fit (see
`covar` keyword).
"""
from scipy.optimize import leastsq
def chi(p, wave, fluxd, err):
scale, T = p
model = scale * planck(wave, T, unit=fluxd.unit / u.sr) * u.sr
chi = (fluxd - model) / err
return chi.decompose().value
def dchi(p, wave, fluxd, err):
scale, T = p
d = np.empty((2, len(wave)))
model = planck(wave, T, unit=fluxd.unit / u.sr) * u.sr
d[0] = (model / err).decompose().value
model = scale * planck(wave, T, unit=fluxd.unit /
u.sr, deriv='T') * u.sr
d[1] = (model / err).decompose().value
return d
if err is None:
err = np.ones_like(fluxd)
output = leastsq(chi, guess, args=(wave, fluxd, err), full_output=True,
# Dfun=dchi, col_deriv=True,
epsfcn=epsfcn, **kwargs)
print(output[-2])
fit = output[0]
cov = output[1]
if covar:
return fit, cov
else:
if cov is None:
return fit, None
else:
return fit, np.sqrt(np.diag(cov))
def between(a, limits, closed=True):
"""Return True for elements within the given limits.
Parameters
----------
a : array
Array to test.
limits : array
A 2-element array of the lower- and upper-limits, or an Nx2
element array of lower- and upper-limits where limits[i] is a
set of upper- and lower-limits.
closed : bool, optional
Set to True and the interval will be closed (i.e., use <= and >=
at the limits).
Returns
-------
i : ndarray
True where a is between each set of limits.
"""
b = np.array(a)
lim = np.array(limits)
if len(lim.shape) == 1:
if closed:
i = (a >= lim[0]) * (a <= lim[1])
else:
i = (a > lim[0]) * (a < lim[1])
else:
i = np.zeros(b.shape)
for j in range(lim.shape[0]):
i += between(a, lim[j, :])
return i.astype(bool)
def clusters(test):
"""Define array slices based on a test value.
Parameters
----------
test : array
The test result.
Returns
-------
objects : tuple of slices
An array of slices that return each cluster of `True` values in
`test`.
"""
import scipy.ndimage as nd
labels, n = nd.label(test)
print("{} clusters found".format(n))
return nd.find_objects(labels)
def groupby(key, *lists):
"""Sort elements of `lists` by `unique(key)`.
Note: this is not the same as `itertools.groupby`.
Parameters
----------
key : array
A set of keys that indicate how to group the elements of each
list.
lists : array
Lists to sort.
Returns
-------
groups : dictionary
A dictionary, where the keys are `unqiue(key)`, and the values
are tuples of `list` corresponding to sorted entries from
`lists`. Does that make sense?
Examples
--------
>>> import numpy as np
>>> from mskpy.util import groupby
>>> keys = (np.random.rand(26) * 3).astype(int)
>>> print(keys)
[1 2 2 0 1 1 1 1 1 1 2 1 2 1 0 0 0 1 2 2]
>>> lists = (list('abcdefghijklmnopqrstuvwxyz'), range(26))
>>> groupby(keys, *lists)
{0: (['d', 'o', 'p', 'q'], [3, 14, 15, 16]),
1: (['a', 'e', 'f', 'g', 'h', 'i', 'j', 'l', 'n', 'r'],
[0, 4, 5, 6, 7, 8, 9, 11, 13, 17]),
2: (['b', 'c', 'k', 'm', 's', 't'], [1, 2, 10, 12, 18, 19])}
"""
groups = dict()
key = np.asarray(key)
for k in np.unique(key):
i = np.flatnonzero(key == k)
groups[k] = ()
for l in lists:
groups[k] += (list(np.asarray(l)[i]),)
return groups
def leading_num_key(s):
"""Keys for sorting strings, based on leading multidigit numbers.
A normal string comparision will compare the strings character by
character, e.g., "101P" is less than "1P" because "0" < "P".
`leading_num_key` will generate keys so that `str.sort` can
consider the leading multidigit integer, e.g., "101P" > "1P"
because 101 > 1.
Parameters
----------
s : string
Returns
-------
keys : tuple
They keys to sort by for this string: `keys[0]` is the leading
number, `keys[1]` is the rest of the string.
"""
pfx = ''
sfx = s
for i in range(len(s)):
if not s[i].isdigit():
break
pfx += s[i]
sfx = s[i:]
if len(pfx) > 0:
pfx = int(pfx)
else:
pfx = 0
return pfx, sfx
def nearest(array, v):
"""Return the index of `array` where the value is nearest `v`.
Parameters
----------
array : array
An array.
v : scalar
The requested value.
Returns
-------
result : int
The index.
"""
return np.abs(np.array(array) - v).argmin()
def stat_avg(x, y, u, N):
"""Bin an array, weighted by measurement errors.
Parameters
----------
x : array
The independent variable.
y : array
The parameter to average.
u : array
The uncertainties on y. weights for each `y`.
N : int
The number of points to bin. The right-most bin may contain
fewer than `N` points.
Returns
-------
bx, by, bu : ndarray
The binned data. The `x` data is straight averaged (unweighted).
n : ndarray
The number of points in each bin.
"""
nbins = x.size // N
remainder = x.size % nbins
shape = (nbins, N)
w = (1.0 / np.array(u)**2)
_w = w[:-remainder].reshape(shape)
_x = np.array(x)[:-remainder].reshape(shape)
_y = np.array(y)[:-remainder].reshape(shape)
_x = _x.mean(1)
_y = (_y * _w).sum(1) / _w.sum(1)
_u = np.sqrt(1.0 / _w.sum(1))
n = np.ones(len(_x)) * N
if remainder > 0:
_x = np.r_[_x, np.mean(x[-remainder:])]
_y = np.r_[_y, (np.array(y[-remainder:]) / w[-remainder:]).sum()]
_u = np.r_[_u, np.sqrt(1.0 / w[-remainder:].sum())]
n = np.r_[n, remainder]
return _x, _y, _u, n
def takefrom(arrays, indices):
"""Return elements from each array at the given indices.
Parameters
----------
arrays : tuple of arrays
The arrays to index.
indices : array
The indices to return from each array in `a`.
Returns
-------
r : tuple of arrays
a[0][indices], a[1][indices], etc.
"""
r = ()
for a in arrays:
newa = np.array(a)[indices]
if not isinstance(newa, type(a)):
newa = type(a)(newa)
r += (newa,)
return r
def whist(x, y, w, errors=True, **keywords):
"""A weighted histogram binned by an independent variable.
Parameters
----------
x : array
The independent variable.
y : array
The parameter to average.
w : array
The weights for each `y`. If `errors` is `True`, then `x` will
be weighted by `1 / w**2`.
errors : bool, optional
Set to `True` if `w` is an array of uncertainties on `x`, and
not the actual weights.
**keywords : optional
Any `numpy.histogram` keyword, except `weights`.
Returns
-------
h : ndarray
The weighted mean of `y`, binned by `x`.
err : ndarray
When `errors` is `True`, `err` will be the uncertainty on `h`,
otherwise it will be `None`.
n : ndarray
The number of `x`'s in each bin.
edges: ndarray
The bin edges.
"""
if 'weights' in keywords:
raise RuntimeError('weights not allowed in keywords')
_x = np.array(x)
_y = np.array(y)
_w = np.array(w)
if errors:
_w = 1.0 / _w**2
n, edges = np.histogram(x, **keywords)
n = n.astype(float)
num = np.histogram(x, weights=_y * _w, **keywords)[0]
den = np.histogram(x, weights=_w, **keywords)[0]
m = num / den
if errors:
err = 1.0 / np.sqrt(den)
else:
err = None
return m, err, n, edges
def delta_at_rh(rh, selong, observer=1 * u.au):
"""Target distance and phase angle at heliocentric distance and solar elongation.
Parameters
----------
rh : Quantity or float
Heliocentric distance.
selong : Quantity, Angle, or float
Solar elongation. If a float, it must be in radians.
observer : Quantity or float, optional
Observer-sun distance.
Returns
-------
delta : Quantity or array
phase : Quantity or array
"""
delta_unit = rh.unit if isinstance(rh, u.Quantity) else 1
phase_unit = selong.unit if isinstance(selong, u.Quantity) else 1
rh = u.Quantity(rh, 'au')
observer = u.Quantity(observer, 'au')
selong = u.Quantity(selong, 'rad')
delta = np.zeros_like(rh)
for i in range(len(rh)):
d = np.roots((
1,
-2 * observer.value * np.cos(selong),
(observer**2 - rh[i]**2).value
))
j = 0 if d[0] >= 0 else 1
delta[i] = d[j] * u.au
phase = np.arccos((rh**2 + delta**2 - observer**2)
/ 2 / rh / delta)
if delta_unit == 1:
delta = delta.value
if phase_unit == 1:
phase = phase.value
else:
phase = phase.to(phase_unit)
return delta, phase
def ec2eq(lam, bet):
"""Ecliptic coordinates to equatorial (J2000.0) coordinates.
Parameters
----------
lam, bet : float or array
Ecliptic longitude and latitude. [degrees]
Returns
-------
ra, dec : float or ndarray
Equatorial (J2000.0) longitude and latitude. [degrees]
Notes
-----
Based on euler.pro in the IDL Astro library (<NAME>).
"""
# using the mean obliquity of the ecliptic at the J2000.0 epoch
# eps = 23.439291111 degrees (Astronomical Almanac 2008)
ceps = 0.91748206207 # cos(eps)
seps = 0.39777715593 # sin(eps)
# convert to radians
lam = np.radians(lam)
bet = np.radians(bet)
cbet = np.cos(bet)
sbet = np.sin(bet)
clam = np.cos(lam)
slam = np.sin(lam)
ra = np.arctan2(ceps * cbet * slam - seps * sbet, cbet * clam)
sdec = seps * cbet * slam + ceps * sbet
if np.iterable(sdec):
sdec[sdec > 1.0] = 1.0
else:
if sdec > 1.0:
sdec = 1.0
dec = np.arcsin(sdec)
# make sure 0 <= ra < 2pi
ra = (ra + 4.0 * np.pi) % (2.0 * np.pi)
return np.degrees(ra), np.degrees(dec)
def lb2xyz(lam, bet=None):
"""Transform longitude and latitude to a unit vector.
Parameters
----------
lam : float, array, or 2xN array
The longitude(s), or an array of longitudes and
latitudes. [degrees]
bet : float or array, optional
The latitude(s). [degrees]
Returns
-------
xyz : array or 3xN array
The unit vectors.
"""
_lam = np.array(lam).squeeze()
if bet is None:
return lb2xyz(_lam[0], _lam[1])
lamr = np.radians(_lam)
betr = np.radians(np.array(bet).squeeze())
return np.array((np.cos(betr) * np.cos(lamr),
np.cos(betr) * np.sin(lamr),
np.sin(betr)))
def mhat(a, axis=-1):
"""Mangitude and unit vector decomposition.
Parameters
----------
a : array-like
An array.
axis : int, optional
The axis to decompose. Default is the last axis.
Returns
-------
m : ndarray
The magnitudes.
hat : ndarray
The unit vectors.
"""
_a = np.array(a)
axis = (_a.ndim - 1) if axis is None else axis
if axis < 0:
axis += _a.ndim
m = np.sqrt(np.sum(_a**2, axis))
hat = np.rollaxis(np.rollaxis(_a, axis) / m, 0, axis + 1)
return m, hat
def projected_vector_angle(r, rot, ra, dec):
"""Position angle of a vector projected onto the observing plane.
Parameters
----------
r : array
The vector to project, in heliocentric ecliptic
coordinates. [km]
rot : array
The observer-target vector. [km]
ra, dec : float
The right ascention and declination of the target, as seen by
the observer. [deg]
Returns
-------
angle : float
The position angle w.r.t. to equatorial north. [deg]
"""
r0 = np.sqrt((r**2).sum()) # magnitude of r
dv = rot + r / r0 # delta vector
# find the projected vectors in RA, Dec
lam2 = np.degrees(np.arctan2(dv[1], dv[0]))
bet2 = np.degrees(np.arctan2(dv[2], np.sqrt(dv[0]**2 + dv[1]**2)))
ra2, dec2 = ec2eq(lam2, bet2)
x2 = (ra2 - ra) * np.cos(np.radians(dec2))
y2 = (dec2 - dec)
th = np.degrees(np.arctan2(y2, x2))
pa = 90.0 - th
return pa
def spherical_coord_rotate(lon0, lat0, lon1, lat1, lon, lat):
"""Rotate about an axis defined by two reference points.
Given two reference points (lon0, lat0), and (lon1, lat1), rotate
(lon, lat) in the same manner that (lon0, lat0) needs to be
rotated to match (lon1, lat1).
Parameters
-----------
lon0, lat0 : float
The reference point. [degrees]
lon1, lat1 : float
A second reference point that defines the rotation axis and
direction. [degrees]
lon, lat : float or array-like
The point(s) to rotate [degrees]
Returns
-------
lon_new, lat_new : float or array-like
lon, lat rotated in the sense as lon0, lat0 must be rotated to
produce lon1, lat1. [degrees]
Notes
-----
Based on the IDL routine spherical_coord_rotate.pro written by
<NAME>, and distributed with CUBISM.
"""
if (lon0 == lon1) and (lat0 == lat1):
return (lon, lat)
def rd2cartesian(lon, lat):
# convert to cartesian coords
clat = np.cos(lat)
return np.array([clat * np.cos(lon),
clat * np.sin(lon),
np.sin(lat)])
v0 = rd2cartesian(np.radians(lon0), np.radians(lat0))
v1 = rd2cartesian(np.radians(lon1), np.radians(lat1))
v = rd2cartesian(np.radians(lon), np.radians(lat))
# construct coordinate frame with x -> ref point and z -> rotation
# axis
x = v0
z = np.cross(v1, v0) # rotate about this axis
z = z / np.sqrt((z**2).sum()) # normalize
y = np.cross(z, x)
y = y / np.sqrt((y**2).sum())
# construct a new coordinate frame (x along new direction)
x2 = v1
y2 = np.cross(z, x2)
y2 = y2 / np.sqrt((y2**2).sum())
# project onto the inital frame, the re-express in the rotated one
if len(v.shape) == 1:
v = (v * x).sum() * x2 + (v * y).sum() * y2 + (v * z).sum() * z
else:
vx = np.dot(v.T, x)
vy = np.dot(v.T, y)
vz = np.dot(v.T, z)
v = vx * np.repeat(x2, v.shape[1]).reshape(v.shape)
v += vy * np.repeat(y2, v.shape[1]).reshape(v.shape)
v += vz * np.repeat(z, v.shape[1]).reshape(v.shape)
lat_new = np.degrees(np.arcsin(v[2]))
lon_new = np.degrees(np.arctan2(v[1], v[0]))
lon_new = lon_new % 360.0
return (lon_new, lat_new)
def spherical_distribution(N):
"""Equally distributed points on a unit sphere.
Parameters
----------
N : int
The approximate number of points.
Returns
-------
p : ndarray
Spherical coordinates of the points, `(lambda, beta)`, with the
shape `Nx2`.
Notes
-----
Based on https://www.cmu.edu/biolphys/deserno/pdf/sphere_equi.pdf
by <NAME>.
"""
pi = np.pi
a = 4 * pi / N
d = np.sqrt(a)
Mth = int(np.round(pi / d))
dth = pi / Mth
dphi = a / dth
p = []
for m in range(Mth):
th = pi * (m + 0.5) / Mth
Mphi = int(np.round(2 * pi * np.sin(th) / dphi))
for n in range(Mphi):
phi = 2 * pi * n / Mphi
p.append((phi, th - pi / 2))
return np.array(p)
def state2orbit(R, V):
"""Convert a small body's state vector into osculating orbital elements.
CURRENTLY INCOMPLETE! Only a, ec, q, Tp, P, f, E, and M are
computed, and even fewer are computed for near-parabolic orbits.
Two-body osculating solution. For details, see Murry and Dermott,
Solar System Dynamics, Chapter 2.
Parameters
----------
R : array
The x, y, z heliocentric ecliptic coordinates. [km]
V : array
The vx, vy, vz heliocentric ecliptic speeds. [km/s]
Returns
-------
orbit : dict
A dictionary {a, ec, in, node, peri, Tp, P, f, E, M}, where:
a = semi-major axis [km]
ec = eccentricity
in = inclination [radians]
node = longitude of the acending node, Omega [radians]
peri = argument of pericenter [radians]
Tp = time of perihelion passage [days]
q = perihelion distance
P = orbital period [days]
f = true anomaly at date [radians]
E = eccentric anomaly at date [radians]
M = mean anomaly at date [radians]
"""
mu = 1.32712440018e11 # km3/s2
AU = u.au.to(u.kilometer)
# some usefull things
r = np.sqrt((R**2).sum()) # heliocentric distance [km]
v = np.sqrt((V**2).sum()) # velocity [km/s]
H = np.cross(R, V) # specific angular momentum vector [km2/s]
h = np.sqrt((H**2).sum()) # specific angular momentum [km2/s]
s = np.dot(R, V)
drdt = np.sign(s) * np.sqrt(v**2 - h**2 / r**2)
a = 1.0 / (2.0 / r - v**2 / mu) # [km]
ec = np.sqrt(1.0 - h**2 / mu / a) # eccentricity
q = a * (1.0 - ec) / AU # perihelion distance [AU]
if ec < 0.98:
sinf = h / mu / ec * drdt
cosf = (h**2 / mu / r - 1.0) / ec
f = np.arctan2(sinf, cosf) # true anomaly [radians]
elif ec < 1.1:
# punt!
return dict(a=a, ec=ec, q=q, Tp=None, P=None, f=None, E=None,
H=None, M=None)
else:
raise ValueError("eccentricity is too high")
# eccentric anomaly [radians]
if ec < 1.0:
E = 2.0 * np.arctan2(np.sqrt(1.0 - ec) * np.sin(f / 2.0),
np.sqrt(1.0 + ec) * np.cos(f / 2.0))
M = E - ec * np.sin(E) # mean anomaly [radians]
else:
# hyperbolic eccentric anomaly [radians]
E = 2.0 * np.arccosh((ec + cosf) / (1 + ec + cosf))
M = -E + ec * np.sinh(E) # mean anomaly [radians]
# date of perihelion [Julian date]
if a < 0:
n = np.sqrt(mu / -a**3) / 86400.0 # mean motion
Tp = -M * np.sqrt(-a**3 / mu) / 86400.0
P = None
else:
Tp = -M * np.sqrt(a**3 / mu) / 86400.0
P = 2.0 * np.pi * np.sqrt(a**3 / mu) / 86400.0 # orbital period [days]
return dict(a=a, ec=ec, q=q, Tp=Tp, P=P, f=f, E=E, M=M)
def vector_rotate(r, n, th):
"""Rotate vector `r` an angle `th` CCW about `n`.
Parameters
----------
r : array (3)
The vector to rotate [x, y, z].
n : array (3)
The vector to rotate about.
th : float or array
The CCW angle to rotate by. [radians]
Returns
-------
rp : ndarray
The rotated vector [x, y, z].
Notes
-----
Described in Goldstein p165, 2nd ed. Note that Goldstein presents
the formula for clockwise rotation.
"""
nhat = n / np.sqrt((n**2).sum())
def rot(r, nhat, theta):
return (r * np.cos(-theta) +
nhat * (nhat * r).sum() * (1.0 - np.cos(-theta)) +
np.cross(r, nhat) * np.sin(-theta))
if np.size(th) == 1:
return rot(r, nhat, th)
else:
return np.array([rot(r, nhat, t) for t in th])
def xyz2lb(r):
"""Transform a vector to angles.
Parameters
----------
r : array
The vector, shape = (3,) or (n, 3).
Returns
-------
lam : float or array
Longitude. [degrees]
bet : float or array
Latitude. [degrees]
"""
r = np.array(r)
if r.ndim == 1:
lam = np.arctan2(r[1], r[0])
bet = np.arctan2(r[2], np.sqrt(r[0]**2 + r[1]**2))
else:
# assume it is an array of vectors
lam = np.arctan2(r[:, 1], r[:, 0])
bet = np.arctan2(r[:, 2], np.sqrt(r[:, 0]**2 + r[:, 1]**2))
return np.degrees(lam), np.degrees(bet)
def kuiper(x, y):
"""Compute Kuiper's statistic and probablity.
Parameters
----------
x, y : array
The two distributions to compare.
Returns
-------
V : float
Kuiper's statistic.
p : float
The probability that `V` > observed may occur for uncorrelated
data sets.
Notes
-----
Based on p. 627 of Press et al. (1992, Numerical Recipies in C,
2nd Ed.), and scipy.stats.ks_2samp.
"""
data1, data2 = list(map(np.asarray, (x, y)))
n1 = data1.shape[0]
n2 = data2.shape[0]
data1 = np.sort(data1)
data2 = np.sort(data2)
data_all = np.sort(np.concatenate([data1, data2]))
cdf1 = np.searchsorted(data1, data_all, side='right') / n1
cdf2 = np.searchsorted(data2, data_all, side='right') / n2
V = np.ptp(cdf1 - cdf2)
Ne = n1 * n2 / (n1 + n2)
return V, kuiperprob(V, Ne)
def kuiperprob(V, Ne):
"""The probability of a false positive in Kuiper's test.
Parameters
----------
V : float
The Kuiper statistic.
Ne : int
Effective sample size (i.e., `n1 * n2 / (n1 + n2)`).
Returns
-------
p : float
The probability of a false positive.
Notes
-----
Based on prob_kuiper.pro from Astro IDL library.
"""
# Numerical Recipes algorithm:
lam = (np.sqrt(Ne) + 0.155 + 0.24 / np.sqrt(Ne)) * V
if lam <= 0.4:
# good to 7 sig. figs.
return 1.0
EPS1 = 0.001
EPS2 = 1e-8
p = 0.0
termbf = 0.0
a2 = -2 * lam**2
for j in range(1, 101):
a2j2 = a2 * j**2
term = 2 * (-2 * a2j2 - 1) * np.exp(a2j2)
p += term
if (abs(term) <= (EPS1 * termbf)) or (abs(term) <= (EPS2 * p)):
return p
termbf = abs(term)
return 1.0 # did not converge
def mean2minmax(a):
"""The distance from the mean to the min and max of `a`.
This function is suitable for computing asymetric errorbars for
matplotlib.errorbar (the result will need to be reshaped to a 2x1
array).
Parameters
----------
a : array
Returns
-------
result : ndarray
A two-element `ndarray`, the first element is `mean(a) -
min(a)`, the second is `max(a) - mean(a)`.
"""
return np.abs(minmax(a) - np.array(a).mean())
def meanclip(x, axis=None, lsig=3.0, hsig=3.0, maxiter=5, minfrac=0.001,
full_output=False, dtype=np.float64):
"""Average `x` after iteratively removing outlying points.
Clipping is performed about the median. NaNs are ignored.
Parameters
----------
x : array
axis : int, optional
Set to `None` to clip the entire array, or an integer to clip
over that axis.
lsig : float or tuple, optional
The lower-sigma-rejection limit. If `lsig` is a `tuple`, then
the contents will be placed into the keyword parameters (for
compatibility with functions like np.apply_along_axis()).
hsig : float, optional
The upper-sigma-rejection limit
maxiter : int, optional
The maximum number of clipping iterations.
minfrac : float, optional
Stop iterating if less than or equal to `minfrac` of the data
points are rejected.
full_output : bool, optional
If `True`, also return the standard deviation of the clipped
data, their indicies, and the number of iterations.
Returns
-------
mean : float
The mean of the clipped data.
sigma : float, optional
The standard deviation of the clipped data.
good : ndarray, optional
The indices of the good data.
iter : int, optional
The number of clipping iterations used.
.. Todo::
Look into using scipy.stats.tmean, tstd for meanclip.
"""
if axis is not None:
if axis < len(x.shape):
x2 = np.rollaxis(x, axis)
y = np.zeros(x2.shape[0])
ys = np.zeros(x2.shape[0])
yind = ()
yiter = np.zeros(x2.shape[0])
for i in range(x2.shape[0]):
mc = meanclip(x2[i], axis=None, lsig=lsig, hsig=hsig,
maxiter=maxiter, minfrac=minfrac,
full_output=True)
y[i], ys[i], yiter[i] = mc[0], mc[1], mc[3]
yind += (mc[2],)
if full_output:
return y.mean(dtype=dtype), ys, yind, yiter
else:
return y.mean(dtype=dtype)
else:
raise ValueError("There is no axis {0} in the input"
" array".format(axis))
if isinstance(lsig, tuple):
lsig = list(lsig)
if len(lsig) == 5:
full_output = lsig.pop()
if len(lsig) >= 4:
minfrac = lsig.pop()
if len(lsig) >= 3:
maxiter = lsig.pop()
if len(lsig) >= 2:
hsig = lsig.pop()
if len(lsig) >= 1:
lsig = lsig.pop()
good = np.flatnonzero(np.isfinite(x))
if good.size == 0:
# no good data
if full_output:
return np.nan, np.nan, (), 0
else:
return np.nan
for i in range(maxiter):
y = x.flatten()[good]
medval = np.median(y)
sig = y.std(dtype=dtype)
keep = (y > (medval - lsig * sig)) * (y < (medval + hsig * sig))
cutfrac = abs(good.size - keep.sum()) / good.size
if keep.sum() > 0:
good = good[keep]
else:
break # somehow all the data were clipped
if cutfrac <= minfrac:
break
y = x.flatten()[good]
if full_output:
return y.mean(dtype=dtype), y.std(dtype=dtype), good, i+1
else:
return y.mean(dtype=dtype)
def midstep(a):
"""Compute the midpoints of each step in `a`.
Parameters
----------
a : array
Returns
-------
b : ndarray
The midsteps of `a`, i.e., `b = (a[1:] + a[:-1]) / 2.0`.
"""
return (np.array(a)[1:] + np.array(a)[:-1]) / 2.0
def minmax(a):
"""Compute the minimum and the maximum of an array.
Parameters
----------
a : array
Returns
-------
result : ndarray
A two-element array, the first element is `min(a)`, the second
is `max(a)`.
"""
return np.array([np.min(a), np.max(a)])
def nanmedian(a, axis=None):
"""Median of `a`, ignoring NaNs.
Parameters
----------
a : array
Returns
-------
m : ndarray
The median, or `nan` if all of `a` is `nan`.
"""
if axis is not None:
return np.apply_along_axis(nanmedian, axis, a)
a = np.array(a)
i = ~np.isnan(a)
if np.any(i):
return np.median(a[i])
else:
return np.nan
def nanminmax(a):
"""Compute the minimum and the maximum of an array, ignoring NaNs.
Parameters
----------
a : array
Returns
-------
result : ndarray
A two-element array, the first element is `nanmin(a)`, the
second is `nanmax(a)`.
"""
return np.array([np.nanmin(a), np.nanmax(a)])
def randpl(x0, x1, k, n=1):
"""Pick random deviates from a power-law distribution.
This returns:
.. math:: dn/dx \propto x**k
For:
.. math:: dn/dlog(x) \propto x**alpha
set `k = alpha - 1`.
Parameters
----------
x0 : float
The minimum value to pick.
x1 : float
The maximum value to pick.
k : float
The logarithmic slope of the distribution.
n : int, optional
The number to pick.
Returns
-------
y : float or ndarray
The random number(s).
Notes
-----
Algorithm from Weisstein, <NAME>. "Random Number." From
MathWorld--A Wolfram Web Resource.
http://mathworld.wolfram.com/RandomNumber.html
"""
y = np.random.rand(n)
return ((x1**(k + 1) - x0**(k + 1)) * y + x0**(k + 1))**(1.0 / (k + 1))
def sigma(s):
"""The probablity a normal variate will be `<s` sigma from the mean.
Parameters
----------
s : float
The number of sigma from the mean.
Returns
-------
p : float
The probability that a value within +/-s would occur.
"""
from scipy.special import erf
return 0.5 * (erf(s / np.sqrt(2.0)) - erf(-s / np.sqrt(2.0)))
def spearman(x, y, nmc=None, xerr=None, yerr=None):
"""Perform a Spearman "rho" test on two or more data sets.
Parameters
----------
x, y : array
The parameters being tested.
nmc : int
The number of Monte Carlo tests to perform.
xerr, yerr : array, optional
If Monte Carlo tests are requested, use these 1 sigma
uncertainties for each value of x and/or y, assumed to be
normally distributed. Set to None for no errors.
Returns
-------
r : float or ndarray
The Spearman correlation coefficient between x and y.
p : float or ndarray
The probability that a value greater than r may occur in
uncorrelated data sets. According to scipy.stats.spearmanr p
may not be reliable for datasets smaller 500.
Z : float or ndarray
The significance of r expressed in units of standard deviations
based on the expectation value and variance of the null
hypothesis that x and y are uncorrelated.
meanZ : float or ndarray, optional
The average Z measured in the Monte Carlo tests.
n : float or ndarray, optional
The number of Monte Carlo runs for which Z was greater than 3
sigma.
"""
from scipy import stats
def spearmanZ(x, y):
N = len(x)
rankx = stats.rankdata(x)
ranky = stats.rankdata(y)
# find the corrections for ties
ties = stats.mstats.count_tied_groups(x)
sx = sum((k**3 - k) * v for k, v in ties.items())
ties = stats.mstats.count_tied_groups(y)
sy = sum((k**3 - k) * v for k, v in ties.items())
D = sum((rankx - ranky)**2)
meanD = (N**3 - N) / 6.0 - (sx + sy) / 12.0
varD = (N - 1) * N**2 * (N + 1)**2 / 36.0
varD *= (1 - sx / (N**3 - N)) * (1 - sy / (N**3 - N))
return abs(D - meanD) / np.sqrt(varD)
N = len(x)
rp = stats.mstats.spearmanr(x, y, use_ties=True)
r = rp[0]
p = rp[1]
Z = spearmanZ(x, y)
if nmc is not None:
if xerr is None:
xerr = np.zeros(N)
if yerr is None:
yerr = np.zeros(y.shape)
mcZ = np.zeros(nmc)
for i in range(nmc):
dx = np.random.randn(N) * xerr
dy = np.random.rand(N) * yerr
mcZ[i] = spearmanZ(x + dx, y + dy)
meanZ = mcZ.mean()
n = sum(mcZ > 3.0)
return r, p, Z, meanZ, n
return r, p, Z
def uclip(x, ufunc, full_output=False, **keywords):
"""Sigma clip data and apply the function ufunc.
Clipping is done by `meanclip`.
Parameters
----------
x : array
ufunc : function
A function to apply to the sigma clipped `x`.
**keywords
Any `meanclip` keyword.
Returns
-------
y :
The result.
ind : ndarray, optional
The array indices of the good data in `x.flatten()`.
iter : int, optional
The number of clipping iterations used.
"""
mc = meanclip(x, full_output=True, **keywords)
if full_output:
return ufunc(x.flatten()[mc[2]]), mc[2], mc[3]
else:
return ufunc(x.flatten()[mc[2]])
def bandpass(sw, sf, se=None, fw=None, ft=None, filter=None, filterdir=None,
k=3, s=None):
"""Filters a spectrum given a transimission function.
If the filter has a greater spectreal dispersion than the
spectrum, the spectrum is interpolated onto the filter's
wavelengths. Otherwise, the filter is interpoalted onto the
spectrum's wavelengths.
Either fw+ft or filter must be given.
Parameters
----------
sw : array
Spectrum wavelengths.
sf : array
Spectrum flux per unit wavelength.
se : array, optional
Weight the fluxes with these uncertainties.
fw : array, optional
Filter transmission profile wavelengths, same units as `sw`.
ft : array, optional
Filter transmission profile.
filter : string, optional
The name of a filter (see `calib.filter_trans`). The wavelength
units will be micrometers.
filterdir : string, optional
The directory containing the filter transmission files
(see `calib.filter_trans`).
k : int, optional
Order of the spline fit for interpolation. See
`scipy.interpolate.splrep`.
s : float, optional
Interpolation smoothing. See `scipy.interpolate.splrep`.
Returns
-------
wave, flux : ndarray
The effective wavelength and flux density of the filtered spectrum.
err : ndarray, optional
The uncertaintiy on the filtered spectrum. Returned if `se` is
not `None`.
"""
from scipy import interpolate
from . import calib
# local copies
_sw = np.array(sw)
_sf = np.array(sf)
if se is None:
_se = np.ones_like(_sf)
else:
_se = np.array(se)
if (fw is not None) and (ft is not None):
_fw = np.array(fw)
_ft = np.array(ft)
elif filter is not None:
_fw, _ft = calib.filter_trans(filter)
_fw = _fw.to(u.um).value
else:
raise ValueError("Neither fw+ft nor filter was supplied.")
# We need a scale for the errorbars since 1/err^2 can be fairly large
errscale = _se.mean()
_se = _se / errscale
# determine if the spectrum or filter has the greater dispersion
if np.median(_fw / deriv(_fw)) > np.median(_sw / deriv(_sw)):
# interpolate the spectrum onto the filter wavelengths
# the spectrum may be incomplete
i = (_fw >= min(_sw)) * (_fw <= max(_sw))
_fw = _fw[i]
_ft = _ft[i]
_w = _fw
spl = interpolate.splrep(_sw, _sf, k=k, s=s)
_sf = interpolate.splev(_w, spl)
spl = interpolate.splrep(_sw, _se**2, k=k, s=s)
_se2 = interpolate.splev(_w, spl)
_ft = _ft
else:
# the spectrum or filter transmission may be incomplete
# interpolate the filter onto the spectrum wavelengths
i = (_sw >= min(_fw)) * (_sw <= max(_fw))
_sw = _sw[i]
_sf = _sf[i]
_se = _se[i]
_w = _sw
spl = interpolate.splrep(_fw, _ft, k=k, s=s)
_ft = interpolate.splev(_w, spl)
_sf = _sf
_se2 = _se**2
# weighted mean to get the effective wavelength
wrange = minmax(_w)
weights = _ft * _sf / _se2
wave = (davint(_w, _w * weights, *wrange) / davint(_w, weights, *wrange))
# weighted mean for the flux
weights = _ft / _se2
flux = davint(_w, _sf * weights, *wrange) / davint(_w, weights, *wrange)
err = davint(_w, weights, *wrange) / davint(_w, 1.0 / _se2, *wrange)
err = np.sqrt(err) * errscale
if se is None:
return wave, flux
else:
return wave, flux, err
def constant_spectral_resolution(start, stop, R):
"""Spectral wavelength generator for constant spectral resolution.
Parameters
----------
start, stop : array-like
Start and stop wavelengths. The stop wavelength will always be
in the spectrum.
R : float
The desired spectral resolution.
Returns
-------
wave : array
"""
d = 1 + 1 / R
n = int(np.ceil(np.log(stop / start) / np.log(d)))
return start * d**np.arange(n)
def deresolve(func, wave, flux, err=None):
"""De-resolve a spectrum using the supplied instrument profile.
Parameters
----------
func : function or string
The instrument profile/weighting function. The function only
takes one parameter: delta-wavelength (distance from the center
of the filter) in the same units as `wave`. Some shortcut
strings are allowed (case insensitive):
"gaussian(sigma)" - specifiy sigma in the same units as `wave`
"uniform(fwhm)" - specifiy fwhm in the same units as `wave`
wave : ndarray
The wavelengths of the spectrum.
flux : ndarray
The spectral flux.
err : ndarray, optional
The uncertainties on `flux`. If provided, the fluxes will be
weighted by `1/err**2` before deresolving.
Results
-------
f : ndarray
The de-resolved fluxes.
"""
if type(func) is str:
if 'gaussian' in func.lower():
sigma = float(re.findall('gaussian\(([^)]+)\)', func.lower())[0])
def func(dw):
return gaussian(dw, 0, sigma)
elif 'uniform' in func.lower():
hwhm = (float(re.findall('uniform\(([^)]+)\)', func.lower())[0])
/ 2.0)
def func(dw):
f = np.zeros_like(dw)
i = (dw > -hwhm) * (dw <= hwhm)
if any(i):
f[i] = 1.0
return f
else:
raise ValueError("Function '{}' not recognized.".format(func))
if err is not None:
weights = err**-2
sumWeights = 1.0 / np.sqrt(deresolve(func, wave, weights))
else:
weights = 1.0
sumWeights = 1.0
wflux = flux * weights
fluxout = np.zeros_like(wflux)
for i in range(len(wave)):
dw = wave - wave[i]
f = func(dw)
f /= f.sum()
fluxout[i] = np.sum(f * wflux) / sumWeights
return fluxout
def phase_integral(phasef, range=[0, 180]):
"""The phase integral of a phase function.
Parameters
----------
phasef : function
The phase function, takes one parameter, `phase`, in units of
degrees.
range : array, optional
The integration limits. [degrees]
Returns
-------
pint : float
"""
from scipy.integrate import quad
range = np.radians(range)
pint = 2.0 * quad(lambda x: phasef(np.degrees(x)) * np.sin(x),
min(range), max(range))[0]
return pint
def planck(wave, T, unit='W/(m2 Hz sr)', deriv=None):
"""The Planck function.
Parameters
----------
wave : array or Quantity
The wavelength(s) to evaluate the Planck function. [micron]
T : float, array, or Quantity
The temperature(s) of the Planck function. [Kelvin]
unit : astropy Unit
The output units. Do not include K for derivatives. If `None`,
returns a float in units of W/m2/Hz/sr (may be faster than using
astropy units).
deriv : string
Set to 'T' to return the first derivative with respect to
temperature in units of `unit` per K.
Returns
-------
B : float or Quantity
The Planck function or its derivative.
Raises
------
AssertionError when `deriv` isn't an allowed value.
"""
assert deriv in [None, 'T', 't']
# prevent over/underflow warnings
oldseterr = np.seterr(all='ignore')
# wave in m
if isinstance(wave, u.Quantity):
wave = wave.si.value
else:
wave = wave * 1e-6
#from astropy import constants as const
#c1 = 2.0 * const.si.h * const.si.c / u.s / u.Hz
#c2 = const.si.h * const.si.c / const.si.k_B
#a = np.exp(c2 / wave.si / T.to(u.K))
#B = c1 / ((wave.si)**3 * (a - 1.0)) / u.sr
c1 = 3.9728913665386057e-25 # J m
c2 = 0.0143877695998 # K m
a = np.exp(c2 / wave / u.Quantity(T, 'K').value)
B = c1 / (wave**3 * (a - 1.0))
if unit is not None:
B = B * u.Unit('W/(m2 Hz sr)')
B = B.to(unit, equivalencies=spectral_density_sb(wave * u.m))
if deriv in ['T', 't']:
B = B * c2 / T.to('K').value**2 / wave * a / (a - 1.0) / u.K
# restore seterr
np.seterr(**oldseterr)
return B
def _redden(wave, S, wave0=0.55):
"""Redden a spectrum with the slope S.
Parameters
----------
wave : array
An array of wavelengths.
S : float or array
Redden the spectrum by the fraction `S` per unit wavelength.
`S` should be defined for each wavelength `wave`, or be a single
value for all wavelengths.
wave0 : float, optional
The wavelength to hold constant.
Returns
-------
spec : ndarray
The scale factors to produce the reddened spectrum.
Examples
--------
Comet dust slopes are typically described as % per 0.1 um
>>> import numpy as np
>>> from mskpy.util import redden
>>> wave = np.array([0.4, 0.45, 0.5, 0.55, 0.65, 1.55])
>>> S = 12. * 0.01 / 0.1 # 12% / (0.1 um)
>>> print(redden(wave, S))
[ 0.83527021 0.88692044 0.94176453 1. 1.12749685 3.32011692]
"""
from scipy.integrate import quad
from scipy.interpolate import interp1d
if not np.iterable(wave):
wave = np.array(wave).reshape(1)
if not np.iterable(S):
S = np.ones_like(wave) * S
elif len(S) == 1:
S = np.ones_like(wave) * S[0]
slope = interp1d(np.r_[0, wave, np.inf], np.r_[S[0], S, S[-1]],
kind='linear')
spec = np.zeros_like(wave)
for i in range(len(wave)):
# integrate S*dwave from wave0 to wave[i]
intS = quad(slope, wave0, wave[i], epsabs=1e-3, epsrel=1e-3)[0]
spec[i] = np.exp(intS)
return spec
def polcurve(th, p, a, b, th0):
"""The comet polarization versus phase angle curve.
Levasseur-Regourd et al. 1996:
.. math:: P(th) = p * sin(th)^a * cos(th / 2)^b * sin(th - th0)
Parameters
----------
th : float or array
The phase angle. [degrees]
p, a, b : float
The parameters of the function.
th0 : float
The negative to positive branch turnover angle. [degrees]
Returns
-------
P : float or ndarray
The polarization at phase angle `th`.
"""
thr = np.radians(th)
return (p * np.sin(thr)**a * np.cos(thr / 2.)**b
* np.sin(thr - np.radians(th0)))
def savitzky_golay(x, kernel=11, order=4):
"""Smooth with the Savitzky-Golay filter.
Parameters
----------
x : array
kernel : int, optional
A positive odd integer giving the kernel size. `kernel > 2 + order`.
order : int, optional
Order of the polynomal.
Returns
-------
smoothed : ndarray
The smoothed `x`.
Notes
-----
From the SciPy Cookbook,
http://www.scipy.org/Cookbook/SavitzkyGolay, 01 Dec 2009
"""
if (kernel % 2) != 1 or kernel < 1:
raise ValueError(
"kernel size must be a positive odd number, was:{}".format(kernel))
if kernel < order + 2:
raise ValueError(
"kernel is to small for the polynomals\nshould be > order + 2")
half_window = (kernel - 1) // 2
b = np.mat([[k**i for i in range(order + 1)]
for k in range(-half_window, half_window+1)])
# since we don't want the derivative, else choose [1] or [2], respectively
m = np.linalg.pinv(b).A[0]
window_size = len(m)
half_window = (window_size - 1) // 2
# precompute the offset values for better performance
offsets = zip(range(-half_window, half_window + 1), m)
# temporary data, extended with a mirror image to the left and right
# left extension: f(x0-x) = f(x0)-(f(x)-f(x0)) = 2f(x0)-f(x)
# right extension: f(xl+x) = f(xl)+(f(xl)-f(xl-x)) = 2f(xl)-f(xl-x)
leftpad = np.zeros(half_window) + 2 * x[0]
rightpad = np.zeros(half_window) + 2 * x[-1]
leftchunk = x[1:(1 + half_window)]
leftpad = leftpad-leftchunk[::-1]
rightchunk = x[len(x) - half_window - 1:len(x) - 1]
rightpad = rightpad - rightchunk[::-1]
data = np.concatenate((leftpad, x))
data = np.concatenate((data, rightpad))
smooth_data = list()
for i in range(half_window, len(data) - half_window):
value = 0.0
for offset, weight in offsets:
value += weight * data[i + offset]
smooth_data.append(value)
return np.array(smooth_data)
def cal2doy(cal, scale='utc'):
"""Calendar date to day of year.
Parameters
----------
cal : string or array
Calendar date. See `cal2iso` for details.
scale : string, optional
See `astropy.time.Time`.
Returns
-------
doy : astropy Time
Day of year.
"""
t = cal2time(cal, scale=scale)
if len(t) > 1:
return [int(x.yday.split(':')[1]) for x in t]
else:
return int(t.yday.split(':')[1])
def cal2iso(cal):
"""Calendar date to ISO format.
Parameters
----------
cal : string or array
Calendar date. Format: YYYY-MM-DD HH:MM:SS.SSS. May be
shortened, for example, to YYYY or YYYY-MM. DD == 0 is not
allowed and is forced to 1. MM may be a three character
abbreviation. Fractional values are allowed for days and
smaller units.
Returns
-------
iso : string or list
`cal`, ISO formatted.
"""
if isinstance(cal, (list, tuple, np.ndarray)):
return [cal2iso(x) for x in cal]
# mapping function to remove nondigits from the date string
def a2space(c):
return c if (c.isdigit() or c == ".") else " "
# if the month is an abbreviation, replace it with a number
cal = cal.lower()
cal = cal.replace('jan', '01')
cal = cal.replace('feb', '02')
cal = cal.replace('mar', '03')
cal = cal.replace('apr', '04')
cal = cal.replace('may', '05')
cal = cal.replace('jun', '06')
cal = cal.replace('jul', '07')
cal = cal.replace('aug', '08')
cal = cal.replace('sep', '09')
cal = cal.replace('oct', '10')
cal = cal.replace('nov', '11')
cal = cal.replace('dec', '12')
d = (''.join(map(a2space, cal))).split(" ")
d = d[:6] # truncate at seconds
d = [float(t) for t in d] + [0] * (6 - len(d))
if d[1] == 0.0:
d = d[:1] + [1.0] + d[2:]
if d[2] == 0.0:
d = d[:2] + [1.0] + d[3:]
dt = datetime.timedelta(days=d[2] - 1.0, hours=d[3], minutes=d[4],
seconds=d[5])
d = datetime.datetime(int(d[0]), int(d[1]), 1) + dt
return d.isoformat()
def cal2time(cal, scale='utc'):
"""Calendar date to astropy `Time`.
Parameters
----------
cal : string or array
Calendar date. See `cal2iso` for details.
scale : string, optional
See `astropy.time.Time`.
Returns
-------
doy : int or list
Day of year.
"""
return Time(cal2iso(cal), format='isot', scale=scale)
def date_len(date):
"""Length of the date, or 0 if it is a scalar.
Useful for routines that use `date2time`.
Parameters
----------
date : string, float, astropy Time, datetime, array, None
Some time-like thingy, or `None`.
Returns
-------
n : int
The length of the array, or 0 if it is a scalar.
"""
if isinstance(date, (list, tuple, np.ndarray)):
return len(date)
elif isinstance(date, Time):
if date.isscalar:
return 0
else:
return len(date)
elif date is None:
return 0
elif np.isscalar(date):
return 0
else:
return len(date)
@singledispatch
def date2time(date, scale='utc'):
"""Lazy date to astropy `Time`.
Parameters
----------
date : string, float, astropy Time, datetime, or array
Some time-like thingy, or `None` to return the current date (UTC).
scale : string, optional
See `astropy.time.Time`.
Returns
-------
date : astropy Time
"""
if (date is not None):
raise ValueError("Bad date: {} ({})".format(date, type(date)))
return Time(datetime.datetime.utcnow(), scale=scale,
format='datetime')
@date2time.register(Time)
@date2time.register(datetime.datetime)
def _(date, scale='utc'):
return Time(date, scale=scale)
@date2time.register(int)
@date2time.register(float)
def _(date, scale='utc'):
return jd2time(date, scale=scale)
@date2time.register(str)
def _(date, scale='utc'):
return cal2time(date, scale=scale)
@date2time.register(list)
@date2time.register(tuple)
@date2time.register(np.ndarray)
def _(date, scale='utc'):
date = [date2time(d, scale=scale) for d in date]
return Time(date)
def dh2hms(dh, format="{:02d}:{:02d}:{:06.3f}"):
"""Decimal hours as HH:MM:SS.SSS, or similar.
Will work for degrees, too.
Parameters
----------
dh : float
format : string, optional
Use this format, e.g., for [+/-]HH:MM, use "{:+02d}:{:02d}".
Returns
-------
hms : string
"""
sign = -1 if dh < 0 else 1
dh = abs(dh)
hh = int(dh)
mm = int((dh - hh) * 60.0)
ss = ((dh - hh) * 60.0 - mm) * 60.0
if ss >= 60:
ss -= 60
mm += 1
if mm >= 60:
mm -= 60
hh += 1
return format.format(sign * hh, mm, ss)
def doy2md(doy, year):
"""Day of year in MM-DD format.
Parameters
----------
doy : int or array
Day(s) of year.
year : int
The year in question.
Returns
-------
md : string or list
MM-DD for each `doy`.
"""
jd0 = s2jd('{0}-12-31'.format(year - 1))
if isinstance(doy, (tuple, list, numpy.ndarray)):
md = []
for i in range(len(doy)):
md.append(jd2dt(jd0 + doy[i]).strftime('%m-%d'))
else:
md = jd2dt(jd0 + doy).strftime('%m-%d')
return md
def drange(start, stop, num=50):
"""Array of dates, linearly spaced.
Parameters
----------
start : string, float, astropy Time, datetime, or array
The start date, in any form suitable for `date2time`.
stop :
The stop date, in any form suitable for `date2time`.
num : int, optional
The number of samples to generate.
Returns
-------
dates : astropy Time
"""
endpoints = date2time((start, stop))
interval = np.diff(endpoints)[0].jd
return endpoints[0] +
|
np.linspace(0, interval, num)
|
numpy.linspace
|
from collections import OrderedDict
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.sparse as sp
import tensorflow as tf
from keras import backend as K
from keras.layers import Input
from keras.models import Model
from pygsp import graphs
from sklearn.cluster import spectral_clustering
from sklearn.datasets import make_blobs
from sklearn.metrics.cluster import v_measure_score, homogeneity_score, completeness_score
from sklearn.neighbors import kneighbors_graph
from spektral.layers import MinCutPool, DiffPool
from spektral.layers.convolutional import GraphConvSkip
from spektral.utils import init_logging
from spektral.utils.convolution import normalized_adjacency
from tqdm import tqdm
from utils import citation
from utils.misc import sp_matrix_to_sp_tensor_value, product_dict
np.random.seed(0) # for reproducibility
PLOTS_ON = True
ITER = 10000
plt.set_cmap('nipy_spectral')
VERBOSE = False
# Parameters
P = OrderedDict([
('apply_GNN', True),
('ACTIV', 'elu'),
('es_patience', ITER)
])
log_dir = init_logging() # Create log directory and file
# Tunables
tunables = OrderedDict([
('dataset', ['cora']), # 'cora', 'citeseer', 'pubmed', 'cloud', or 'synth'
('method', ['mincut_pool']), # 'mincut_pool', 'diff_pool'
('H_', [None]),
('n_channels', [16]),
('learning_rate', [5e-4])
])
N_RUNS = 1
df_out = None
for T in product_dict(tunables):
# Update params with current config
P.update(T)
print(T)
############################################################################
# LOAD DATASET
############################################################################
if P['dataset'] == 'synth':
X, y = make_blobs(n_samples=100, centers=5, n_features=4, random_state=None) # 6
X = X.astype(np.float32)
A = kneighbors_graph(X, n_neighbors=25, mode='distance').todense()
A = np.asarray(A)
A = np.maximum(A, A.T)
A = sp.csr_matrix(A, dtype=np.float32)
n_clust = y.max() + 1
elif P['dataset'] == 'cloud':
G = graphs.Grid2d(N1=15, N2=10) # Community(N=150, seed=0) #SwissRoll(N=400, seed=0) #Ring(N=100) #TwoMoons() #Cube(nb_pts=500) #Bunny()
X = G.coords.astype(np.float32)
A = G.W
y = np.ones(X.shape[0]) # X[:,0] + X[:,1]
n_clust = 5
else:
A, X, _, _, _, _, _, _, y_ohe = citation.load_data(P['dataset'])
y = np.argmax(y_ohe, axis=-1)
X = X.todense()
n_clust = y.max() + 1
# Sort IDs
if P['dataset'] != 'cloud':
ids = np.argsort(y)
y = y[ids]
X = X[ids, :]
A = A[ids, :][:, ids]
A = sp.csr_matrix(A.todense())
n_feat = X.shape[-1]
homo_score_list = []
complete_score_list = []
v_score_list = []
for run in range(N_RUNS):
############################################################################
# MODEL
############################################################################
X_in = Input(tensor=tf.placeholder(tf.float32, shape=(None, n_feat), name='X_in'))
A_in = Input(tensor=tf.sparse_placeholder(tf.float32, shape=(None, None)), name='A_in', sparse=True)
S_in = Input(tensor=tf.placeholder(tf.int32, shape=(None,), name='segment_ids_in'))
if P['apply_GNN'] and P['method'] != 'diff_pool':
A_norm = normalized_adjacency(A)
X_1 = GraphConvSkip(P['n_channels'],
kernel_initializer='he_normal',
activation=P['ACTIV'])([X_in, A_in])
else:
A_norm = A
X_1 = X_in
if P['method'] == 'mincut_pool':
pool1, adj1, seg1, C = MinCutPool(k=n_clust,
h=P['H_'],
activation=P['ACTIV'])([X_1, A_in, S_in])
elif P['method'] == 'diff_pool':
pool1, adj1, seg1, C = DiffPool(k=n_clust,
channels=P['n_channels'],
activation=P['ACTIV'])([X_1, A_in, S_in])
else:
raise ValueError
model = Model([X_in, A_in, S_in], [pool1, seg1, C])
model.compile('adam', None)
############################################################################
# TRAINING
############################################################################
# Setup
sess = K.get_session()
loss = model.total_loss
opt = tf.train.AdamOptimizer(learning_rate=P['learning_rate'])
train_step = opt.minimize(loss)
# Initialize all variables
init_op = tf.global_variables_initializer()
sess.run(init_op)
# Fit layer
tr_feed_dict = {X_in: X,
A_in: sp_matrix_to_sp_tensor_value(A_norm),
S_in: y}
layer_out = []
nmi_out = []
best_loss = np.inf
patience = P['es_patience']
tol = 1e-5
for _ in tqdm(range(ITER)):
outs = sess.run([train_step, model.losses[0], model.losses[1], C], feed_dict=tr_feed_dict)
layer_out.append((outs[1], outs[2], outs[1] + outs[2]))
c = np.argmax(outs[3], axis=-1)
v_score = v_measure_score(y, c)
nmi_out.append(v_score)
if outs[1] + outs[2] + tol < best_loss:
best_loss = outs[1] + outs[2]
patience = P['es_patience']
if VERBOSE:
tqdm.write('New best loss {}'.format(best_loss))
else:
patience -= 1
if VERBOSE:
tqdm.write('Patience {}'.format(patience))
if patience == 0:
break
layer_out = np.array(layer_out)
############################################################################
# RESULTS
############################################################################
C_ = sess.run([C], feed_dict=tr_feed_dict)[0]
c = np.argmax(C_, axis=-1)
hs = homogeneity_score(y, c)
cs = completeness_score(y, c)
nmis = v_measure_score(y, c)
homo_score_list.append(hs)
complete_score_list.append(cs)
v_score_list.append(nmis)
print('MinCutPool - HOMO: {}, CS: {}, NMI: {}'.format(hs, cs, nmis))
np.savez(log_dir + 'loss+nmi_{}_{}_{}.npz'.format(P['dataset'], P['method'], run),
loss=layer_out, nmi=nmi_out)
K.clear_session()
P['homo_score_mean'] = np.mean(homo_score_list)
P['homo_score_std'] = np.std(homo_score_list)
P['complete_score_mean'] = np.mean(complete_score_list)
P['complete_score_std'] =
|
np.std(complete_score_list)
|
numpy.std
|
import numpy as np
import sys
from tqdm import tqdm,trange
from partition import *
from iris_io import *
from utils import *
from datetime import date, timedelta
def prep_train(nl, lend, nt, nb, neb, nlen, dic, istest, usecpp, icache, bcache, ccache):
X = np.zeros((nl, nt, 1)).astype(int)
NSS, TYPE, PL, PH = [], [], [], []
for i in tqdm(range(nl), desc='Preparing examples: '):
if istest == 2: # test
id = int(i / lend)
nqss = [int(i % lend)]
if (id >= len(dic['DVcol'])):
print("Err_TooManyTests")
sys.exit()
elif istest == 0: # pre-train
id = int(i % len(dic['DVcol']))
nqss = [nn for nn in range(lend)]
elif istest == 1: # validate during pre-train
id = int(i % len(dic['DVcol']))
nqss = [int(i / len(dic['DVcol'])) % lend]
#dvcol 0
dvcol = dic['DVcol'][id][0] if len(dic['DVcol'][id]) > 0 else []
if (len(dic['DimC'][id]) > 0):
ds = np.array(dic['DimC'][id])
nss = rebucket(np.array(dic['Cnts'][id]), ds, nt, nb, neb, 'Point')
elif (len(dic['DimR'][id]) > 0):
ds = np.array(dic['DimR'][id])
nss = rebucket(np.array(dic['Cnts'][id]), ds, nt, nb, neb, 'Range', [np.where(ds == dvcol)[0]])
else:
print('Err_Type: ' + dic['Table'][id])
sys.exit()
nsssort, dimsort = map(list, zip(*sorted(zip(nss, ds), reverse=True)))
NSS += [nsssort]
CNTs = [dic['Cnts'][id][d] for d in dimsort]
KEYs = [dic['Keys'][id][d] for d in dimsort]
TYPES = [dic['Types'][id][d] for d in dimsort]
ckey = dic['Table'][id] + ';' + str(nsssort) + ';' + str(dimsort)
KEYs, CNTs = bkt_shrink(ccache, ckey, KEYs, CNTs, TYPES, nsssort, usecpp, [dimsort.index(dvc) for dvc in dvcol])
for d in range(len(CNTs)):
for k in range(1, len(CNTs[d])):
CNTs[d][k] += CNTs[d][k - 1]
KEYs = [parse_keys(KEYs[id], TYPES[id]) for id in range(len(KEYs))]
pl, ph = [], []
for nqs in nqss:
pred_low = dic['Pred_low'][id][nqs] if len(dic['Pred_low'][id]) > nqs else []
pred_high = dic['Pred_high'][id][nqs] if len(dic['Pred_high'][id]) > nqs else []
if len(pred_low) > 0:
pred_low_r, pred_high_r = [], []
for d in range(len(nss)):
pred_low_r += [pred_low[int(
|
np.where(ds == dimsort[d])
|
numpy.where
|
from LaplacianEigenmap import *
import numpy as np
from PIL import Image
from mds import *
from isomap import *
import Classifier
from LLE import *
import sys
def mds_func(data):
data_reduced = mds(data, 20)
np.savetxt('mds_data_reduced.txt', data_reduced, '%.7e', '\t')
sys.stdout.write('降维操作完成,低维度数据已保存到 mds_data_reduced.txt\n')
return data_reduced
def isomap_func(data):
data_reduced = isomap(data, 20, 15)
|
np.savetxt('isomap_data_reduced.txt', data_reduced, '%.7e', '\t')
|
numpy.savetxt
|
import numpy as np
import pandas as pd
import warnings
from scipy.stats import expon, uniform
from scipy.optimize import root_scalar
try:
from scipy.integrate import solve_ivp
except ImportError:
print("Warning, solve_ivp could not be imported. Use f_is_stepwise_constant = True")
def solve_ivp(*args, **kwargs):
raise NotImplementedError
def interpolate_F_inverse(tau, t_grid, F_grid):
if F_grid[-1] < tau:
return np.inf
## Left bound
# Largest i such that F_i <= tau
i = np.max(np.nonzero(F_grid <= tau))
# Smallest j such that F_j == F_i
if tau == F_grid[i]:
j = np.min(np.nonzero(F_grid == tau))
return t_grid[j]
## Right bound
k = i + 1
return np.interp(tau, [F_grid[i], F_grid[k]], [t_grid[i], t_grid[k]])
def F_sister_bias(t, N, beta, tG):
output = 0
output += (t // tG) * (1 - beta) * tG
remainder = np.mod(t, tG)
if remainder < beta * tG:
output += remainder * (N - 1) * (1 - beta) / N
else:
output += (N - 1) / N * (1 - beta) * beta * tG
output += (remainder - beta * tG) * \
(((N - 1) * (1 - beta) + 1) / N)
return output
def inverse_F_sister_bias(tau, N, beta, tG):
output = 0
output += (tau // ((1 - beta) * tG)) * tG
remainder = np.mod(tau, (1 - beta) * tG)
if remainder < (N - 1) / N * (1 - beta) * beta * tG:
output += remainder / ((N - 1) / N * (1 - beta))
else:
output += beta * tG
output += (remainder - (N - 1) / N * (1 - beta) * beta * tG) / \
(((N - 1) * (1 - beta) + 1) / N)
return output
inverse_F_sister_bias = np.vectorize(inverse_F_sister_bias)
def average_f_g1_proportion(avg_tG1_fun, gamma_fun, tG, beta, Tdeath, c):
## Compute gamma (cell cycle model parameters)
gamma = gamma_fun(beta, tG)
# Define functions
def eta_fun(f, tG, beta, Tdeath, c):
if c * beta * tG * f == 0:
output = np.inf
else:
output = Tdeath / (c * beta * tG * f)
return output
def g_fun(f, gamma, tG, beta, Tdeath, c):
output = f - (1 - beta) * tG / \
(avg_tG1_fun(eta_fun(f, tG, beta, Tdeath, c), gamma)
+ (1 - beta) * tG )
return output
# Solution always lies between 0 and 1
bracket = [0, 1]
# Initialise output
output = []
sol = root_scalar(g_fun,
args=(gamma, tG, beta, Tdeath, c),
bracket=bracket,
x0 = 1 - beta)
if sol.converged:
return sol.root
else:
return np.nan
average_f_g1_proportion = np.vectorize(average_f_g1_proportion)
def exponential_ccm(random_state=None, clone=None, tG1_param=50):
return expon.rvs(scale=tG1_param, random_state=random_state)
def exponential_ccm_heterotypic(random_state=None, clone=None, tG1_param_clone_0=50, tG1_param_clone_1=50):
assert clone == 0 or clone == 1
if clone == 0:
return expon.rvs(scale=tG1_param_clone_0, random_state=random_state)
elif clone == 1:
return expon.rvs(scale=tG1_param_clone_1, random_state=random_state)
else:
raise Exception('This should not be reached. Something has gone horribly wrong.')
def exponential_cdf(t, tG1_param=50):
return 1 - np.exp(- t / tG1_param)
def uniform_ccm(random_state=None, clone=None, tG1_param=50, r=20):
assert 0.5 * r / tG1_param <= 1
return uniform.rvs(loc=tG1_param - 0.5 * r, scale=r, random_state=random_state)
def uniform_ccm_heterotypic(random_state=None, clone=None, tG1_param_clone_0=50,
r_clone_0=20, tG1_param_clone_1=50, r_clone_1=20):
assert 0.5 * r_clone_0 / tG1_param_clone_0 <= 1
assert 0.5 * r_clone_1 / tG1_param_clone_1 <= 1
assert clone == 0 or clone == 1
if clone == 0:
return uniform.rvs(loc=tG1_param_clone_0 - 0.5 * r_clone_0,
scale=r_clone_0, random_state=random_state)
elif clone == 1:
return uniform.rvs(loc=tG1_param_clone_1 - 0.5 * r_clone_1,
scale=r_clone_1, random_state=random_state)
else:
raise Exception('This should not be reached. Something has gone horribly wrong.')
def uniform_cdf(t, tG1_param=50, r=20):
assert 0.5 * r / tG1_param <= 1
if t < tG1_param - 0.5 * r:
return 0
if tG1_param + 0.5 * r < t:
return 1
return (t - (tG1_param - 0.5 * r)) / r
def base_rate_death_signal(t, tau, tbirth, tG1, clone, isinG1, base_rate=1):
return base_rate * np.ones(tau.shape)
def base_rate_death_signal_heterotypic(t, tau, tbirth, tG1, clone, isinG1,
base_rate_clone_0=1, base_rate_clone_1=1):
assert np.all(np.logical_or(clone == 0, clone == 1))
return base_rate_clone_0 * (clone == 0) + base_rate_clone_1 * (clone == 1)
def normalised_g2_death_signal(t, tau, tbirth, tG1, clone, isinG1, coef=1):
# All cells in G1
if np.all(isinG1):
return np.zeros(tau.shape)
# All cells in G2
if np.all(np.logical_not(isinG1)):
return coef * np.ones(tau.shape)
# Neither of these scenarios
return coef * np.sum(np.logical_not(isinG1)) / (tau.size - 1) * np.ones(tau.shape)
def normalised_g2_death_signal_heterotypic(t, tau, tbirth, tG1, clone, isinG1,
coef_clone_0=1, coef_clone_1=1):
assert np.all(np.logical_or(clone == 0, clone == 1))
coef = coef_clone_0 * (clone == 0) + coef_clone_1 * (clone == 1)
# All cells in G1
if np.all(isinG1):
return np.zeros(tau.shape)
# All cells in G2
if np.all(np.logical_not(isinG1)):
return coef * np.ones(tau.shape)
# Neither of these scenarios
return coef * np.sum(np.logical_not(isinG1)) / (tau.size - 1) * np.ones(tau.shape)
def g2_death_signal(t, tau, tbirth, tG1, clone, isinG1, coef=1):
return coef * np.sum(np.logical_not(isinG1)) * np.ones(tau.shape)
def g2_death_signal_heterotypic(t, tau, tbirth, tG1, clone, isinG1,
coef_clone_0=1, coef_clone_1=1):
assert np.all(np.logical_or(clone == 0, clone == 1))
coef = coef_clone_0 * (clone == 0) + coef_clone_1 * (clone == 1)
return coef * np.sum(np.logical_not(isinG1)) * np.ones(tau.shape)
class WellMixedSimulator(object):
def __init__(self, f=base_rate_death_signal, ccm=exponential_ccm,
Tdeath=100, tG2=50, tstart=0, tend=500,
f_args=(),
ccm_args=(),
max_cell_count=np.inf,
min_cell_count=0,
f_is_stepwise_constant=True,
min_cell_count_for_clone={},
max_cell_count_for_clone={},
apoptosis_at_checkpoint=False,
switch_apoptosis_time=None,
):
# Some assertions
assert callable(f)
assert callable(ccm)
if not callable(Tdeath):
assert Tdeath >= 0
Tdeath = lambda clone, Tdeath=Tdeath: Tdeath * np.ones(clone.shape)
if not callable(tG2):
assert tG2 >= 0
tG2 = lambda clone, tG2=tG2: tG2 * np.ones(clone.shape)
assert tend >= tstart
self.f = f
self.ccm = ccm
self.Tdeath = Tdeath
self.tG2 = tG2
self.tstart = tstart
self.tend = tend
self.f_args = f_args
self.ccm_args = ccm_args
self.min_cell_count = min_cell_count
self.max_cell_count = max_cell_count
self.f_is_stepwise_constant = f_is_stepwise_constant
self.max_cell_count_for_clone = max_cell_count_for_clone
self.min_cell_count_for_clone = min_cell_count_for_clone
self.apoptosis_at_checkpoint = apoptosis_at_checkpoint
self.switch_apoptosis_time = switch_apoptosis_time
self.switched = False
# Save parameters in dict for output
self.param = {
'f' : f,
'ccm' : ccm,
'Tdeath' : Tdeath,
'tG2' : tG2,
'tstart' : tstart,
'tend' : tend,
'f_args' : f_args,
'ccm_args' : ccm_args,
'min_cell_count' : min_cell_count,
'max_cell_count' : max_cell_count,
'f_is_stepwise_constant' : f_is_stepwise_constant,
'min_cell_count_for_clone' : min_cell_count_for_clone,
'max_cell_count_for_clone' : max_cell_count_for_clone,
'apoptosis_at_checkpoint' : apoptosis_at_checkpoint,
'switch_apoptosis_time' : switch_apoptosis_time,
}
# Define division, transition, and death events
if not self.f_is_stepwise_constant:
if self.apoptosis_at_checkpoint:
raise NotImplementedError
if self.switch_apoptosis_time:
raise NotImplementedError
division_event = lambda t, tau, tbirth, tG1, clone, isinG1, *args: \
np.amax(t - tbirth - tG1 - self.tG2(clone))
division_event.terminal = True
division_event.direction = 1
def transition_event(t, tau, tbirth, tG1, clone, isinG1, *args):
if np.any(isinG1):
return np.amax(t - tbirth[isinG1] - tG1[isinG1])
else:
return -1
transition_event.terminal = True
transition_event.direction = 1
def death_event(t, tau, tbirth, tG1, clone, isinG1, *args):
if np.any(isinG1):
return np.amax(tau[isinG1] - self.Tdeath(clone)[isinG1])
else:
return -1
death_event.terminal = True
death_event.direction = 1
self.events = [ division_event, transition_event, death_event ]
def run(self,
tau_0=np.zeros(4),
tbirth_0=np.random.rand(4) * -100,
tG1_0=expon.rvs(scale=50, size=4) + 50,
clone_0=np.arange(4),
seed=None,
):
"""
Runs Monte Carlo Death Clock simulation with given initial conditions
and returns dictionary with following items:
(t1, t2, ..., tN are the time points of the N division/transition/death events.)
t_events: [ tstart, t1, t2, ..., tN, tend ]
t_grid: [ np.array(time points for ODE solution) per time interval ]
tau: [ num-cells-by-num-timepoints np.array(tau) per time interval ]
cell_indices: [ np.array(indices of cells) per time interval ]
isinG1: [ np.array(True if in G1, else False) per time interval ]
division: [ np.array(indices of cells undergoing division) per event ]
transition: [ np.array(indices of cells undergoing transition) per event ]
death: [ np.array(indices of cells undergoing death) per event ]
tbirth: np.array(birth times per cell)
tG1: np.array(G1 durations per cell)
clone: np.array(clones per cell)
status: 0 means end of simulation time reached
1 means zero cell count reached
2 means max cell count reached
3 means min cell count reached
4 means clone-specific max cell count reached (see status_info for clone)
5 means clone-specific min cell count reached (see status_info for clone)
status_info: status = 0: None
status = 1: None
status = 2: None
status = 3: None
status = 4: clone whose max cell count was reached
status = 5: clone whose min cell count was reached
param: dictionary containing parameters
f: death clock signal function
ccm: cell cycle model function
Tdeath: death threshold function
tG2: G2 duration function
tstart: start time
tend: end time
f_args: additional args to f
ccm_args: additional args to ccm
min_cell_count: self-explanatory
max_cell_count: self-explanatory
f_is_stepwise_constant: whether f is stepwise constant between events
min_cell_count_for_clone: dict containing clone-specific minimum cell count
max_cell_count_for_clone: dict containing clone-specific maximum cell count
init_cond: dictionary containing initial conditions
tau_0: tau
tbirth_0: birth times
tG1_0: G1 durations
clone_0: clones
seed: seed for random number generator
"""
self.tau_0 = np.array(tau_0, dtype=float)
self.tbirth_0 = np.array(tbirth_0, dtype=float)
self.tG1_0 = np.array(tG1_0, dtype=float)
self.clone_0 = np.array(clone_0, dtype=int)
self.check_initial_conditions()
# Create random state if seed is not None
if not seed is None:
self.random_state = np.random.RandomState(seed)
else:
self.random_state = None
# Save initial conditions in dict for output
self.init_cond = {
'tau_0' : np.array(tau_0, dtype=float),
'tbirth_0' : np.array(tbirth_0, dtype=float),
'tG1_0' : np.array(tG1_0, dtype=float),
'clone_0' : np.array(clone_0, dtype=int),
'seed' : seed,
}
# Index cells
self.cell_indices_now = np.arange(len(self.tbirth_0))
self.last_cell_index = self.cell_indices_now[-1]
# Initialise simulation time
self.t_now = self.tstart
# Initialise state variables
self.tau_now = np.array(self.tau_0)
self.tbirth_now = np.array(self.tbirth_0)
self.tG1_now = np.array(self.tG1_0)
self.clone_now = np.array(self.clone_0)
# Helper state variable
self.isinG1_now = self.t_now - self.tbirth_now < self.tG1_now
# Initialise output data
self.t_events_data = [self.t_now]
self.t_grid_data = []
self.tau_data = []
self.division_data = []
self.transition_data = []
self.death_data = []
self.cell_indices_data = []
self.isinG1_data = []
self.tbirth_data = np.array(self.tbirth_now)
self.tG1_data = np.array(self.tG1_now)
self.clone_data = np.array(self.clone_now)
# Initialise status and status_info
self.status = None
self.status_info = None
# Simulation loop
while True:
t, tau, event_occurred = self.solve_until_next_event()
# Switch apoptosis mode if required
if self.switch_apoptosis_time:
if t[-1] >= self.switch_apoptosis_time and not self.switched:
self.apoptosis_at_checkpoint = not self.apoptosis_at_checkpoint
self.switched = True
# Save data
self.t_events_data.append(t[-1])
self.t_grid_data.append(t)
self.tau_data.append(tau)
self.cell_indices_data.append(np.array(self.cell_indices_now))
self.isinG1_data.append(np.array(self.isinG1_now))
# If division, transition or death event occurred
if event_occurred:
# Update tau and t
self.tau_now = np.array(self.tau_data[-1][:,-1])
self.t_now = self.t_events_data[-1]
# If event happened or past end simulation time, break loop and
# do NOT record event. In other words, discrete events
# happening at exactly tend will not be recorded.
if self.t_now >= self.tend:
break
self.do_cell_transitions_divisions_deaths()
# If there are no cells remaining, tack on last data item and
# break
if len(self.tau_now) == 0:
self.t_events_data.append(self.tend)
self.t_grid_data.append(np.array([self.t_now, self.tend]))
self.tau_data.append(np.array([]))
self.cell_indices_data.append(np.array([]))
self.isinG1_data.append(np.array([]))
self.status = 1
break
# If the maximum number of cells is hit, tack on last data item
# and break
if len(self.tau_now) >= self.max_cell_count:
self.t_events_data.append(self.tend)
self.t_grid_data.append(np.array([self.t_now, self.tend]))
self.tau_data.append(np.array([self.tau_now, self.tau_now]).T)
self.cell_indices_data.append(np.array(self.cell_indices_now))
self.isinG1_data.append(np.array(self.isinG1_now))
self.status = 2
break
# If the minimum number of cells is hit, tack on last data item
# and break
if len(self.tau_now) <= self.min_cell_count:
self.t_events_data.append(self.tend)
self.t_grid_data.append(np.array([self.t_now, self.tend]))
self.tau_data.append(np.array([self.tau_now, self.tau_now]).T)
self.cell_indices_data.append(np.array(self.cell_indices_now))
self.isinG1_data.append(np.array(self.isinG1_now))
self.status = 3
break
# If the maximum number of cells for a clone is hit, tack on
# last data item and break
for clone, max_cell_count in self.max_cell_count_for_clone.items():
if np.sum(self.clone_now == clone) >= max_cell_count:
self.t_events_data.append(self.tend)
self.t_grid_data.append(np.array([self.t_now, self.tend]))
self.tau_data.append(np.array([self.tau_now, self.tau_now]).T)
self.cell_indices_data.append(np.array(self.cell_indices_now))
self.isinG1_data.append(np.array(self.isinG1_now))
self.status = 4
self.status_info = clone
break
# Break out of while loop
if self.status == 4:
break
# If the minimum number of cells for a clone is hit, tack on
# last data item and break
for clone, min_cell_count in self.min_cell_count_for_clone.items():
if np.sum(self.clone_now == clone) <= min_cell_count:
self.t_events_data.append(self.tend)
self.t_grid_data.append(np.array([self.t_now, self.tend]))
self.tau_data.append(np.array([self.tau_now, self.tau_now]).T)
self.cell_indices_data.append(np.array(self.cell_indices_now))
self.isinG1_data.append(np.array(self.isinG1_now))
self.status = 5
self.status_info = clone
break
# Break out of while loop
if self.status == 5:
break
# Else simulation has terminated
else:
self.status = 0
break
return {
't_events' : self.t_events_data,
't_grid' : self.t_grid_data,
'tau' : self.tau_data,
'cell_indices' : self.cell_indices_data,
'isinG1' : self.isinG1_data,
'division' : self.division_data,
'transition' : self.transition_data,
'death' : self.death_data,
'tbirth' : self.tbirth_data,
'tG1' : self.tG1_data,
'clone' : self.clone_data,
'status' : self.status,
'status_info' : self.status_info,
'param' : self.param,
'init_cond' : self.init_cond,
}
def sample_g1_duration(self, clone):
return self.ccm(self.random_state, clone, *self.ccm_args)
def check_initial_conditions(self):
if not len(self.tau_0) == len(self.tbirth_0) == len(self.tG1_0) == len(self.clone_0):
raise ValueError("tau_0, tbirth_0, tG1_0, and clone_0 must have the same length")
if not len(self.tau_0) < self.max_cell_count:
raise ValueError("The initial cell count ({}) must be smaller than "
"the maximum cell count ({})".format(len(self.tau_0), self.max_cell_count))
if not len(self.tau_0) > 0:
raise ValueError("The initial cell count ({}) must be larger than "
"0".format(len(self.tau_0)))
if not len(self.tau_0) > self.min_cell_count:
raise ValueError("The initial cell count ({}) must be larger than "
"the minimum cell count ({})".format(len(self.tau_0), self.min_cell_count))
for clone, min_cell_count in self.min_cell_count_for_clone.items():
if not np.sum(self.clone_0 == clone) > min_cell_count:
raise ValueError("The initial cell count of clone {0} ({1}) must "
"be larger than the minimum cell count for clone {0} ({2})".format(
clone, np.sum(self.clone_0 == clone), min_cell_count))
for clone, max_cell_count in self.max_cell_count_for_clone.items():
if not np.sum(self.clone_0 == clone) < max_cell_count:
raise ValueError("The initial cell count of clone {0} ({1}) must "
"be smaller than the maximum cell count for clone {0} ({2})".format(
clone, np.sum(self.clone_0 == clone), max_cell_count))
if not np.all(self.tbirth_0 <= self.tstart):
raise ValueError("Initial birth times must be at or before start of simulation")
if not self.apoptosis_at_checkpoint:
if not np.all(np.logical_or(self.tau_0 < self.Tdeath(self.clone_0),
np.logical_and(self.tau_0 >= self.Tdeath(self.clone_0), self.tstart
- self.tbirth_0 >= self.tG1_0))):
raise ValueError("Death invariant is violated in initial conditions")
if not np.all(self.tstart - self.tbirth_0 < self.tG1_0 + self.tG2(self.clone_0)):
raise ValueError("Birth invariant is violated in initial conditions")
def do_cell_divisions(self):
division_indices = np.nonzero(np.isclose(self.t_now - self.tbirth_now, self.tG1_now + self.tG2(self.clone_now)))
self.division_data.append(self.cell_indices_now[division_indices])
for division_index in division_indices[0]:
# Reset death clock of daughter cells
self.tau_now[division_index] = 0
self.tau_now = np.append(self.tau_now, 0)
# Set time of birth on daughter cells
self.tbirth_now[division_index] = self.t_now
self.tbirth_now = np.append(self.tbirth_now, self.t_now)
# Draw random G1 duration for daughter cells
self.tG1_now[division_index] = self.sample_g1_duration(self.clone_now[division_index])
self.tG1_now = np.append(self.tG1_now, self.sample_g1_duration(self.clone_now[division_index]))
# Set clone of new daughter cell
self.clone_now = np.append(self.clone_now, self.clone_now[division_index])
# Both cell starts in G1
self.isinG1_now[division_index] = True
self.isinG1_now = np.append(self.isinG1_now, True)
# Generate new indices for cells
self.cell_indices_now[division_index] = self.last_cell_index + 1
self.cell_indices_now = np.append(self.cell_indices_now, self.last_cell_index + 2)
# Save static data
self.tbirth_data = np.append(self.tbirth_data, [self.t_now, self.t_now])
self.tG1_data = np.append(self.tG1_data,
[ self.tG1_now[division_index], self.tG1_now[-1] ])
self.clone_data = np.append(self.clone_data,
[ self.clone_now[-1], self.clone_now[-1] ])
# Update last cell index
self.last_cell_index += 2
def do_cell_transitions(self):
if self.apoptosis_at_checkpoint:
transition_indices = np.nonzero(
np.logical_and(np.isclose(self.t_now - self.tbirth_now, self.tG1_now),
self.tau_now < self.Tdeath(self.clone_now)))
else:
transition_indices = np.nonzero(np.isclose(self.t_now - self.tbirth_now, self.tG1_now))
self.transition_data.append(self.cell_indices_now[transition_indices])
self.isinG1_now[transition_indices] = False
def do_cell_deaths(self):
if self.apoptosis_at_checkpoint:
death_indices = np.nonzero(
np.logical_and(np.isclose(self.t_now - self.tbirth_now, self.tG1_now),
self.tau_now >= self.Tdeath(self.clone_now)))
else:
death_indices = np.nonzero(
np.logical_and(
np.isclose(self.tau_now, self.Tdeath(self.clone_now)),
self.isinG1_now))
self.death_data.append(self.cell_indices_now[death_indices])
# Traverse from last index to first, else indexing is incorrect
for death_index in death_indices[0][::-1]:
# Remove dead cell
self.tau_now = np.delete(self.tau_now, death_index)
self.tbirth_now = np.delete(self.tbirth_now, death_index)
self.tG1_now = np.delete(self.tG1_now, death_index)
self.clone_now = np.delete(self.clone_now, death_index)
self.isinG1_now = np.delete(self.isinG1_now, death_index)
self.cell_indices_now = np.delete(self.cell_indices_now, death_index)
def do_cell_transitions_divisions_deaths(self):
self.do_cell_transitions()
self.do_cell_divisions()
self.do_cell_deaths()
def solve_until_next_event_for_constant_f(self):
# Compute constant f
f = self.f(self.t_now, self.tau_now, self.tbirth_now, self.tG1_now,
self.clone_now, self.isinG1_now, *self.f_args)
# Find time until next event
time_until_next_division = np.min(self.tbirth_now + self.tG1_now + self.tG2(self.clone_now) - self.t_now)
if np.any(self.isinG1_now):
tbirth_filtered = self.tbirth_now[self.isinG1_now]
tG1_filtered = self.tG1_now[self.isinG1_now]
tau_filtered = self.tau_now[self.isinG1_now]
f_filtered = f[self.isinG1_now]
Tdeath_filtered = self.Tdeath(self.clone_now)[self.isinG1_now]
time_until_next_transition = np.min(tbirth_filtered + tG1_filtered - self.t_now)
with np.errstate(divide='ignore'):
time_until_next_death = np.min((Tdeath_filtered - tau_filtered) / f_filtered)
else:
time_until_next_transition = np.inf
time_until_next_death = np.inf
# If apoptosis at checkpoint, then death events are at the same time as
# transition events
if self.apoptosis_at_checkpoint:
time_until_next_death = np.inf
time_until_next_event = \
np.min([time_until_next_division, time_until_next_transition, time_until_next_death])
assert time_until_next_event >= 0
# Event occurred if the time of the next event is before termination
event_occurred = self.t_now + time_until_next_event < self.tend
# Get timestep
if event_occurred:
timestep = time_until_next_event
else:
timestep = self.tend - self.t_now
# Compute and return results
output_t = np.array([self.t_now, self.t_now + timestep])
output_tau = np.array([self.tau_now, self.tau_now + f * timestep]).T
return output_t, output_tau, event_occurred
def solve_until_next_event_for_nonconstant_f(self):
# Solve ODE
output = solve_ivp(self.f, [self.t_now, self.tend], self.tau_now,
args=(self.tbirth_now, self.tG1_now, self.clone_now, self.isinG1_now) + self.f_args,
events=self.events)
if not output.success:
raise Exception('An error occured in scipy.integrate.solve_ivp: "{}"'.format(
output.message))
if output.status == 1:
# Sanity check: only one event should have happened
assert np.sum([len(events) for events in output.y_events]) == 1
return output.t, output.y, output.status == 1
def solve_until_next_event(self):
if self.f_is_stepwise_constant:
return self.solve_until_next_event_for_constant_f()
else:
return self.solve_until_next_event_for_nonconstant_f()
class WellMixedSimulationData(object):
def __init__(self, data):
# Store raw data
self.data = data
# Initialise all data members to None for lazy initialisation
# global data
self.status = None
self.status_info = None
self.unique_clones = None
self.num_divisions = None
self.num_transitions = None
self.num_deaths = None
self.total_cell_count = None # sum of initial cells and cells born during simulation
self.num_divisions_for_clone = {}
self.num_transitions_for_clone = {}
self.num_deaths_for_clone = {}
self.f = None
# timeseries data (per time interval)
self.t_events = None
self.cell_count = None
self.clone_cell_count = None
self.G1_cell_count = None
self.G2_cell_count = None
self.timeseries_df = None
self.G1_cell_count_for_clone = {}
self.G2_cell_count_for_clone = {}
self.cell_count_for_clone = {}
# fine timeseries data (per ODE time point)
self.t_grid = None
self.tau = {}
# cellwise data
self.tbirth = None # Time of birth
self.tG1 = None # G1 duration
self.clone = None # clone
self.died = None # whether cell died during simulation
self.tdeath = None # time of death, inf if not died
self.divided = None # whether cell divided during simulation
self.tdivision = None # time of division, inf if not divided
self.transitioned = None # whether cell transitioned to G2
self.ttransition = None # time of transition to G2, inf if not transitioned
self.t_last_alive = None # time that cell was last alive
# (until it divided, died or simulation terminated)
self.max_age = None # t_last_alive - tbirth
self.time_in_G1 = None # time spent in G1 (from birth until
# transition/death/termination)
self.time_in_G2 = None # time spent in G2 (from transition until
# termination/divions, 0 if not transitioned)
self.last_tau = None # Last death clock value
self.average_f = None # Average death clock signal: last death
# clock value divided by time spent in G1
self.effective_time_in_G1 = None # Effective time spent in G1 (from birth until
# transition/death/termination) until first
# cell that did neither die nor transition
self.cellwise_df = None
def __str__(self):
if self.get_status() == 0:
status_str = 'End of simulation reached'
elif self.get_status() == 1:
status_str = 'Extinction reached'
elif self.get_status() == 2:
status_str = 'Maximum cell count reached'
elif self.get_status() == 3:
status_str = 'Minimum cell count reached'
else:
raise Exception('Never reached')
unique_clones = self.get_unique_clones()
num_divisions = self.get_num_divisions()
num_transitions = self.get_num_transitions()
num_deaths = self.get_num_deaths()
total_cell_count = self.get_total_cell_count()
timeseries_df = self.get_timeseries_df()
timeseries_df_all = timeseries_df.describe(include='all')
cellwise_df = self.get_cellwise_df()
with warnings.catch_warnings():
# cellwise_df may contain 'inf' values (e.g. for tdeath) This
# creates warnings when computing summary statistics, which we
# suppressed by this construction
warnings.simplefilter('ignore')
cellwise_df_all = cellwise_df.describe(include='all')
return """
Global data
-----------
status:\t{}
unique_clones:\t\t{}
num_divisions:\t\t{}
num_transitions:\t{}
num_deaths:\t\t{}
total_cell_count:\t{}
Timeseries data
---------------
{}
{}
Cellwise data
-------------
{}
{}
""".format(status_str,
unique_clones,
num_divisions,
num_transitions,
num_deaths,
total_cell_count,
timeseries_df,
timeseries_df_all,
cellwise_df,
cellwise_df_all
)
def get_unique_clones(self):
if self.unique_clones is None:
self.unique_clones = np.unique(self.data['clone'])
return self.unique_clones
def get_num_divisions(self):
if self.num_divisions is None:
self.num_divisions = np.sum(self.get_divided())
return self.num_divisions
def get_num_divisions_for_clone(self, clone):
if not clone in self.num_divisions_for_clone:
self.num_divisions_for_clone[clone] = np.sum(
np.logical_and(self.get_clone() == clone, self.get_divided()))
return self.num_divisions_for_clone[clone]
def get_num_transitions(self):
# Note that this is not the sum(self.get_transitioned()), because that
# function also counts cells that are in G2 at the start of the
# simulation as transitioned
if self.num_transitions is None:
self.num_transitions = sum(array.size for array in self.data['transition'])
return self.num_transitions
def get_num_transitions_for_clone(self, clone):
if not clone in self.num_transitions_for_clone:
self.num_transitions_for_clone[clone] = sum(
np.sum(self.get_clone()[array] == clone) for array in self.data['transition'])
return self.num_transitions_for_clone[clone]
def get_num_deaths(self):
if self.num_deaths is None:
self.num_deaths = np.sum(self.get_died())
return self.num_deaths
def get_num_deaths_for_clone(self, clone):
if not clone in self.num_deaths_for_clone:
self.num_deaths_for_clone[clone] = np.sum(
np.logical_and(self.get_clone() == clone, self.get_died()))
return self.num_deaths_for_clone[clone]
def get_status(self):
if self.status is None:
self.status = self.data['status']
return self.status
def get_status_info(self):
if self.status_info is None:
self.status_info = self.data['status_info']
return self.status_info
def get_t_events(self):
if self.t_events is None:
self.t_events = np.array(self.data['t_events'])
return self.t_events
def get_cell_count(self):
if self.cell_count is None:
self.cell_count = np.array(
[len(cell_indices) for cell_indices in self.data['cell_indices']])
# Repeat last cell count at termination time
self.cell_count = np.append(self.cell_count, self.cell_count[-1])
return self.cell_count
def get_cell_count_for_clone(self, clone):
if not clone in self.cell_count_for_clone:
cell_count = []
for cell_indices in self.data['cell_indices']:
cell_count.append(np.sum(self.get_clone()[cell_indices] == clone))
# Repeat last cell count at termination time
cell_count.append(cell_count[-1])
# Save as numpy array
self.cell_count_for_clone[clone] = np.array(cell_count)
return self.cell_count_for_clone[clone]
def get_total_cell_count(self):
if self.total_cell_count is None:
self.total_cell_count = len(self.get_tbirth())
return self.total_cell_count
def get_clone_cell_count(self):
if self.clone_cell_count is None:
unique_clones = self.get_unique_clones()
self.clone_cell_count = np.zeros((len(self.get_t_events()), len(unique_clones)))
for i, cell_indices in enumerate(self.data['cell_indices']):
if len(cell_indices) == 0:
continue
clones = self.get_clone()[cell_indices]
unique_clones_now, counts = np.unique(clones, return_counts=True)
for clone, count in zip(unique_clones_now, counts):
idx = np.searchsorted(unique_clones, clone)
self.clone_cell_count[i, idx] = count
# Repeat last cell count at termination time
self.clone_cell_count[-1,:] = self.clone_cell_count[-2, :]
return self.clone_cell_count
def get_G1_cell_count(self):
if self.G1_cell_count is None:
self.G1_cell_count = np.array([np.sum(isinG1)
for isinG1 in self.data['isinG1']])
# Repeat last cell count at termination time
self.G1_cell_count = np.append(self.G1_cell_count, self.G1_cell_count[-1])
return self.G1_cell_count
def get_G1_cell_count_for_clone(self, clone):
if not clone in self.G1_cell_count_for_clone:
G1_cell_count = []
for cell_indices, isinG1 in zip(self.data['cell_indices'], self.data['isinG1']):
G1_cell_count.append(np.sum(np.logical_and(self.get_clone()[cell_indices] == clone,
isinG1)))
# Repeat last cell count at termination time
G1_cell_count.append(G1_cell_count[-1])
# Save as numpy array
self.G1_cell_count_for_clone[clone] = np.array(G1_cell_count)
return self.G1_cell_count_for_clone[clone]
def get_G2_cell_count(self):
if self.G2_cell_count is None:
self.G2_cell_count = np.array([np.sum(np.logical_not(isinG1))
for isinG1 in self.data['isinG1']])
# Repeat last cell count at termination time
self.G2_cell_count = np.append(self.G2_cell_count, self.G2_cell_count[-1])
return self.G2_cell_count
def get_G2_cell_count_for_clone(self, clone):
if not clone in self.G2_cell_count_for_clone:
G2_cell_count = []
for cell_indices, isinG1 in zip(self.data['cell_indices'], self.data['isinG1']):
G2_cell_count.append(np.sum(np.logical_and(self.get_clone()[cell_indices] == clone,
np.logical_not(isinG1))))
# Repeat last cell count at termination time
G2_cell_count.append(G2_cell_count[-1])
# Save as numpy array
self.G2_cell_count_for_clone[clone] = np.array(G2_cell_count)
return self.G2_cell_count_for_clone[clone]
def get_timeseries_df(self):
if self.timeseries_df is None:
timeseries_dict = {
't_events' : self.get_t_events(),
'cell_count' : self.get_cell_count(),
}
for clone, cell_count in \
zip(self.get_unique_clones(), self.get_clone_cell_count().T):
timeseries_dict['clone_{}'.format(clone)] = cell_count
self.timeseries_df = pd.DataFrame(timeseries_dict)
return self.timeseries_df
def get_t_grid(self):
if self.t_grid is None:
self.t_grid = np.concatenate(self.data['t_grid'])
return self.t_grid
def get_tau_for_cell_index(self, cell_index):
if not cell_index in self.tau:
cur_tau = np.array([], dtype=float)
last_G1_tau = 0
# Loop over time intervals
for t_grid, tau, cell_indices, isinG1 in \
zip(self.data['t_grid'], self.data['tau'],
self.data['cell_indices'], self.data['isinG1']):
# Cell is alive in this time interval
if cell_index in cell_indices:
matches = np.nonzero(cell_indices == cell_index)[0]
assert len(matches) == 1
local_idx = matches[0]
if isinG1[local_idx]:
cur_tau = np.append(cur_tau, tau[local_idx])
last_G1_tau = cur_tau[-1]
else:
cur_tau = np.append(cur_tau, t_grid * 0 + last_G1_tau)
else:
cur_tau = np.append(cur_tau, t_grid * 0)
self.tau[cell_index] = cur_tau
return self.tau[cell_index]
def get_last_tau(self):
if self.last_tau is None:
self.last_tau = np.zeros(self.get_total_cell_count(), dtype=float)
# Loop over time intervals
for t_grid, tau, cell_indices, isinG1 in \
zip(self.data['t_grid'], self.data['tau'],
self.data['cell_indices'], self.data['isinG1']):
cur_last_taus = tau[:, -1]
self.last_tau[cell_indices[isinG1]] = cur_last_taus[isinG1]
return self.last_tau
def get_average_f(self):
if self.average_f is None:
# If time_in_G1 is zero, then replace by inf so that average f
# computes to zero.
t = np.array(self.get_time_in_G1())
t[t == 0] = np.inf
self.average_f = self.get_last_tau() / t
return self.average_f
def get_tbirth(self):
if self.tbirth is None:
self.tbirth = self.data['tbirth']
return self.tbirth
def get_tG1(self):
if self.tG1 is None:
self.tG1 = self.data['tG1']
return self.tG1
def get_clone(self):
if self.clone is None:
self.clone = self.data['clone']
return self.clone
def get_died(self):
if self.died is None:
self.died = np.zeros(self.get_total_cell_count(), dtype=bool)
for death_indices in self.data['death']:
self.died[death_indices] = True
return self.died
def get_tdeath(self):
if self.tdeath is None:
self.tdeath = np.ones(self.get_total_cell_count(), dtype=float) * np.inf
for time, death_indices in zip(self.get_t_events()[1:-1], self.data['death']):
self.tdeath[death_indices] = time
return self.tdeath
def get_divided(self):
if self.divided is None:
self.divided = np.zeros(self.get_total_cell_count(), dtype=bool)
for division_indices in self.data['division']:
self.divided[division_indices] = True
return self.divided
def get_tdivision(self):
if self.tdivision is None:
self.tdivision = np.ones(self.get_total_cell_count(), dtype=float) * np.inf
for time, division_indices in zip(self.get_t_events()[1:-1], self.data['division']):
self.tdivision[division_indices] = time
return self.tdivision
def get_transitioned(self):
if self.transitioned is None:
self.transitioned = np.zeros(self.get_total_cell_count(), dtype=bool)
for transition_indices in self.data['transition']:
self.transitioned[transition_indices] = True
# Cells that were already in G2 at start of simulation should be
# considered to have transitioned
tstart = self.get_t_events()[0]
cell_indices_start = self.data['cell_indices'][0]
tG1_start = self.get_tG1()[cell_indices_start]
tbirth_start = self.get_tbirth()[cell_indices_start]
in_G2_at_start = tstart - tbirth_start >= tG1_start
self.transitioned[cell_indices_start[in_G2_at_start]] = True
return self.transitioned
def get_ttransition(self):
if self.ttransition is None:
self.ttransition = np.ones(self.get_total_cell_count(), dtype=float) * np.inf
for time, transition_indices in zip(self.get_t_events()[1:-1], self.data['transition']):
self.ttransition[transition_indices] = time
# Cells that were already in G2 at start of simulation should be
# considered to have spent tG1 in G1
tstart = self.get_t_events()[0]
cell_indices_start = self.data['cell_indices'][0]
tG1_start = self.get_tG1()[cell_indices_start]
tbirth_start = self.get_tbirth()[cell_indices_start]
in_G2_at_start = tstart - tbirth_start >= tG1_start
self.ttransition[cell_indices_start[in_G2_at_start]] = \
tbirth_start[in_G2_at_start] + tG1_start[in_G2_at_start]
return self.ttransition
def get_t_last_alive(self):
if self.t_last_alive is None:
# In case of normal termination, tend is last time point in
# t_events, else the second to last one
if self.get_status() == 0:
tend = self.get_t_events()[-1]
else:
tend = self.get_t_events()[-2]
self.t_last_alive = np.minimum(tend,
np.minimum(self.get_tdeath(), self.get_tdivision()))
return self.t_last_alive
def get_max_age(self):
if self.max_age is None:
self.max_age = self.get_t_last_alive() - self.get_tbirth()
return self.max_age
def get_time_in_G1(self):
if self.time_in_G1 is None:
self.time_in_G1 = np.minimum(self.get_ttransition(), self.get_t_last_alive()) \
- self.get_tbirth()
return self.time_in_G1
def get_time_in_G2(self):
if self.time_in_G2 is None:
self.time_in_G2 = np.zeros(self.get_total_cell_count(), dtype=float)
transitioned = self.get_transitioned()
t_last_alive = self.get_t_last_alive()
ttransition = self.get_ttransition()
self.time_in_G2[transitioned] = t_last_alive[transitioned] \
- ttransition[transitioned]
return self.time_in_G2
def get_effective_time_in_G1(self):
if self.effective_time_in_G1 is None:
died_or_transitioned = np.logical_or(self.get_died(), self.get_transitioned())
indices = np.nonzero(np.logical_not(died_or_transitioned))[0]
if len(indices) > 0:
self.effective_time_in_G1 = self.get_time_in_G1()[:indices[0]]
else:
self.effective_time_in_G1 = self.get_time_in_G1()
return self.effective_time_in_G1
def get_cellwise_df(self):
if self.cellwise_df is None:
cellwise_dict = {
'tbirth' : self.get_tbirth(),
'tG1' : self.get_tG1(),
'clone' : self.get_clone(),
'died' : self.get_died(),
'tdeath' : self.get_tdeath(),
'divided' : self.get_divided(),
'tdivision' : self.get_tdivision(),
'transitioned' : self.get_transitioned(),
'ttransition' : self.get_ttransition(),
't_last_alive' : self.get_t_last_alive(),
'max_age' : self.get_max_age(),
'time_in_G1' : self.get_time_in_G1(),
'time_in_G2' : self.get_time_in_G2(),
}
self.cellwise_df = pd.DataFrame(cellwise_dict)
return self.cellwise_df
def get_death_clock_signal(self, f, f_args=None):
if f_args is None:
f_args = self.data['param']['f_args']
cur_f = []
for t_grid, tau, cell_indices, isinG1 in \
zip(self.data['t_grid'], self.data['tau'],
self.data['cell_indices'], self.data['isinG1']):
tbirth = self.get_tbirth()[cell_indices]
tG1 = self.get_tG1()[cell_indices]
clone = self.get_clone()[cell_indices]
for t, cur_tau in zip(t_grid, tau.T):
cur_f.append(f(t, cur_tau, tbirth, tG1, clone, isinG1, *f_args))
return np.array(cur_f)
def get_integrated_death_clock_signal(self, f):
t_grid = self.get_t_grid()
cur_f = self.get_death_clock_signal(f)
assert len(t_grid) == len(cur_f)
# Apply trapezoid rule to integrate signal
F =
|
np.concatenate([[0.0], 0.5 * (cur_f[1:] + cur_f[:-1]) * (t_grid[1:] - t_grid[:-1])])
|
numpy.concatenate
|
"""
******
Layout
******
Node positioning algorithms for graph drawing.
"""
__author__ = """<NAME> (<EMAIL>)\n<NAME>(<EMAIL>)"""
# Copyright (C) 2004-2009 by
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# All rights reserved.
# BSD license.
__all__ = ['circular_layout',
'random_layout',
'shell_layout',
'spring_layout',
'spectral_layout',
'fruchterman_reingold_layout']
import networkx as nx
def random_layout(G,dim=2):
"""Position nodes uniformly at random in the unit square.
For every node, a position is generated by choosing each of dim
coordinates uniformly at random on the interval [0.0, 1.0).
NumPy (http://scipy.org) is required for this function.
Parameters
----------
G : NetworkX graph
A position will be assigned to every node in G.
dim : int
Dimension of layout.
Returns
-------
dict :
A dictionary of positions keyed by node
Examples
--------
>>> G = nx.lollipop_graph(4, 3)
>>> pos = nx.random_layout(G)
"""
try:
import numpy as np
except ImportError:
raise ImportError("random_layout() requires numpy: http://scipy.org/ ")
n=len(G)
pos=np.asarray(np.random.random((n,dim)),dtype=np.float32)
return dict(list(zip(G,pos)))
def circular_layout(G, dim=2, scale=1):
# dim=2 only
"""Position nodes on a circle.
Parameters
----------
G : NetworkX graph
dim : int
Dimension of layout, currently only dim=2 is supported
scale : float
Scale factor for positions
Returns
-------
dict :
A dictionary of positions keyed by node
Examples
--------
>>> G=nx.path_graph(4)
>>> pos=nx.circular_layout(G)
Notes
------
This algorithm currently only works in two dimensions and does not
try to minimize edge crossings.
"""
try:
import numpy as np
except ImportError:
raise ImportError("circular_layout() requires numpy: http://scipy.org/ ")
if len(G)==0:
return {}
if len(G)==1:
return {G.nodes()[0]:(1,)*dim}
t=np.arange(0,2.0*np.pi,2.0*np.pi/len(G),dtype=np.float32)
pos=np.transpose(np.array([np.cos(t),np.sin(t)]))
pos=_rescale_layout(pos,scale=scale)
return dict(list(zip(G,pos)))
def shell_layout(G,nlist=None,dim=2,scale=1):
"""Position nodes in concentric circles.
Parameters
----------
G : NetworkX graph
nlist : list of lists
List of node lists for each shell.
dim : int
Dimension of layout, currently only dim=2 is supported
scale : float
Scale factor for positions
Returns
-------
dict :
A dictionary of positions keyed by node
Examples
--------
>>> G=nx.path_graph(4)
>>> shells=[[0],[1,2,3]]
>>> pos=nx.shell_layout(G,shells)
Notes
------
This algorithm currently only works in two dimensions and does not
try to minimize edge crossings.
"""
try:
import numpy as np
except ImportError:
raise ImportError("shell_layout() requires numpy: http://scipy.org/ ")
if len(G)==0:
return {}
if len(G)==1:
return {G.nodes()[0]:(1,)*dim}
if nlist==None:
nlist=[G.nodes()] # draw the whole graph in one shell
if len(nlist[0])==1:
radius=0.0 # single node at center
else:
radius=1.0 # else start at r=1
npos={}
for nodes in nlist:
t=np.arange(0,2.0*np.pi,2.0*np.pi/len(nodes),dtype=np.float32)
pos=np.transpose(np.array([radius*np.cos(t),radius*np.sin(t)]))
npos.update(dict(list(zip(nodes,pos))))
radius+=1.0
# FIXME: rescale
return npos
def fruchterman_reingold_layout(G,dim=2,
pos=None,
fixed=None,
iterations=50,
weighted=True,scale=1):
"""Position nodes using Fruchterman-Reingold force-directed algorithm.
Parameters
----------
G : NetworkX graph
dim : int
Dimension of layout
pos : dict
Initial positions for nodes as a dictionary with node as keys
and values as a list or tuple.
fixed : list
Nodes to keep fixed at initial position.
iterations : int
Number of iterations of spring-force relaxation
weighted : boolean
If True, use edge weights in layout
scale : float
Scale factor for positions
Returns
-------
dict :
A dictionary of positions keyed by node
Examples
--------
>>> G=nx.path_graph(4)
>>> pos=nx.spring_layout(G)
# The same using longer function name
>>> pos=nx.fruchterman_reingold_layout(G)
"""
try:
import numpy as np
except ImportError:
raise ImportError("fruchterman_reingold_layout() requires numpy: http://scipy.org/ ")
if fixed is not None:
nfixed=dict(list(zip(G,list(range(len(G))))))
fixed=np.asarray([nfixed[v] for v in fixed])
if pos is not None:
pos_arr=np.asarray(np.random.random((len(G),dim)))
for n,i in zip(G,list(range(len(G)))):
if n in pos:
pos_arr[i]=np.asarray(pos[n])
else:
pos_arr=None
if len(G)==0:
return {}
if len(G)==1:
return {G.nodes()[0]:(1,)*dim}
try:
# Sparse matrix
if len(G) < 500: # sparse solver for large graphs
raise ValueError
A=nx.to_scipy_sparse_matrix(G)
pos=_sparse_fruchterman_reingold(A,
pos=pos_arr,
fixed=fixed,
dim=dim,
iterations=iterations,
weighted=weighted)
except:
A=nx.to_numpy_matrix(G)
pos=_fruchterman_reingold(A,
pos=pos_arr,
fixed=fixed,
dim=dim,
iterations=iterations,
weighted=weighted)
if fixed is None:
pos=_rescale_layout(pos,scale=scale)
return dict(list(zip(G,pos)))
spring_layout=fruchterman_reingold_layout
def _fruchterman_reingold(A,dim=2,
pos=None,
fixed=None,
iterations=50,
weighted=True):
# Position nodes in adjacency matrix A using Fruchterman-Reingold
# Entry point for NetworkX graph is fruchterman_reingold_layout()
try:
import numpy as np
except ImportError:
raise ImportError("_fruchterman_reingold() requires numpy: http://scipy.org/ ")
try:
nnodes,_=A.shape
except AttributeError:
raise nx.NetworkXError(
"fruchterman_reingold() takes an adjacency matrix as input")
A=np.asarray(A) # make sure we have an array instead of a matrix
if not weighted: # use 0/1 adjacency instead of weights
A=np.where(A==0,A,A/A)
if pos==None:
# random initial positions
pos=np.asarray(np.random.random((nnodes,dim)),dtype=A.dtype)
else:
# make sure positions are of same type as matrix
pos=pos.astype(A.dtype)
# optimal distance between nodes
k=np.sqrt(1.0/nnodes)
# the initial "temperature" is about .1 of domain area (=1x1)
# this is the largest step allowed in the dynamics.
t=0.1
# simple cooling scheme.
# linearly step down by dt on each iteration so last iteration is size dt.
dt=t/float(iterations+1)
delta = np.zeros((pos.shape[0],pos.shape[0],pos.shape[1]),dtype=A.dtype)
# the inscrutable (but fast) version
# this is still O(V^2)
# could use multilevel methods to speed this up significantly
for iteration in range(iterations):
# matrix of difference between points
for i in range(pos.shape[1]):
delta[:,:,i]= pos[:,i,None]-pos[:,i]
# distance between points
distance=np.sqrt((delta**2).sum(axis=-1))
# enforce minimum distance of 0.01
distance=np.where(distance<0.01,0.01,distance)
# displacement "force"
displacement=np.transpose(np.transpose(delta)*\
(k*k/distance**2-A*distance/k))\
.sum(axis=1)
# update positions
length=np.sqrt((displacement**2).sum(axis=1))
length=np.where(length<0.01,0.1,length)
delta_pos=np.transpose(np.transpose(displacement)*t/length)
if fixed is not None:
# don't change positions of fixed nodes
delta_pos[fixed]=0.0
pos+=delta_pos
# cool temperature
t-=dt
return pos
def _sparse_fruchterman_reingold(A,dim=2,
pos=None,
fixed=None,
iterations=50,
weighted=True):
# Position nodes in adjacency matrix A using Fruchterman-Reingold
# Entry point for NetworkX graph is fruchterman_reingold_layout()
# Sparse version
try:
import numpy as np
except ImportError:
raise ImportError("_sparse_fruchterman_reingold() requires numpy: http://scipy.org/ ")
try:
nnodes,_=A.shape
except AttributeError:
raise nx.NetworkXError(
"fruchterman_reingold() takes an adjacency matrix as input")
try:
from scipy.sparse import spdiags,coo_matrix
except ImportError:
raise ImportError("_sparse_fruchterman_reingold() scipy numpy: http://scipy.org/ ")
# make sure we have a LIst of Lists representation
try:
A=A.tolil()
except:
A=(coo_matrix(A)).tolil()
if not weighted: # use 0/1 adjacency instead of weights
A=np.where(A==0,A,A/A)
if pos==None:
# random initial positions
pos=np.asarray(np.random.random((nnodes,dim)),dtype=A.dtype)
else:
# make sure positions are of same type as matrix
pos=pos.astype(A.dtype)
# no fixed nodes
if fixed==None:
fixed=[]
# optimal distance between nodes
k=np.sqrt(1.0/nnodes)
# the initial "temperature" is about .1 of domain area (=1x1)
# this is the largest step allowed in the dynamics.
t=0.1
# simple cooling scheme.
# linearly step down by dt on each iteration so last iteration is size dt.
dt=t/float(iterations+1)
displacement=np.zeros((dim,nnodes))
for iteration in range(iterations):
displacement*=0
# loop over rows
for i in range(A.shape[0]):
if i in fixed:
continue
# difference between this row's node position and all others
delta=(pos[i]-pos).T
# distance between points
distance=np.sqrt((delta**2).sum(axis=0))
# enforce minimum distance of 0.01
distance=np.where(distance<0.01,0.01,distance)
# the adjacency matrix row
Ai=np.asarray(A.getrowview(i).toarray())
# displacement "force"
displacement[:,i]+=\
(delta*(k*k/distance**2-Ai*distance/k)).sum(axis=1)
# update positions
length=np.sqrt((displacement**2).sum(axis=0))
length=np.where(length<0.01,0.1,length)
pos+=(displacement*t/length).T
# cool temperature
t-=dt
return pos
def spectral_layout(G,dim=2,weighted=True,scale=1):
"""Position nodes using the eigenvectors of the graph Laplacian.
Parameters
----------
G : NetworkX graph
dim : int
Dimension of layout
weighted : boolean
If True, use edge weights in layout
scale : float
Scale factor for positions
Returns
-------
dict :
A dictionary of positions keyed by node
Examples
--------
>>> G=nx.path_graph(4)
>>> pos=nx.spectral_layout(G)
Notes
-----
Directed graphs will be considered as unidrected graphs when
positioning the nodes.
For larger graphs (>500 nodes) this will use the SciPy sparse
eigenvalue solver (ARPACK).
"""
# handle some special cases that break the eigensolvers
try:
import numpy as np
except ImportError:
raise ImportError("spectral_layout() requires numpy: http://scipy.org/ ")
if len(G)<=2:
if len(G)==0:
pos=np.array([])
elif len(G)==1:
pos=np.array([[1,1]])
else:
pos=
|
np.array([[0,0.5],[1,0.5]])
|
numpy.array
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 13 18:05:21 2018
@author: simao
"""
import os
import pickle
import numpy as np
import random
from sklearn.model_selection import StratifiedShuffleSplit, train_test_split
# Ensure reproducibility:
os.environ['PYTHONHASHSEED'] = '0'
def restart_random():
np.random.seed(1337)
random.seed(12345)
class Loader():
def __init__(self, relpath='./dualmyo_dataset.pkl') :
self.ABSPATH = os.path.dirname(__file__) + os.path.sep + relpath
if not os.path.exists(self.ABSPATH):
raise FileNotFoundError
def load(self):
# Return a tuple with a list of samples and a list of targets
with open(self.ABSPATH,'rb') as file :
return pickle.load(file)
def load_syntetic_sequences(self):
restart_random()
def split(self, targets, train_ratio=0.6, val_ratio=0.2, test_ratio=0.2):
restart_random()
ind_all = np.arange(len(targets))
targets = np.array(targets)
# Split 1 : all -> train and rest
ind_train, ind_test = train_test_split(ind_all,
shuffle=True,
stratify=targets[ind_all],
test_size=val_ratio+test_ratio,
random_state=42)
# Split 2 : rest -> val and test
ind_val, ind_test = train_test_split(ind_test,
shuffle=True,
stratify=targets[ind_test],
test_size=(test_ratio/(val_ratio + test_ratio)),
random_state=42)
return ind_train, ind_val, ind_test
class SynteticSequences():
def __init__(self, data):
# Rest
ind_rest = data[1] == 0
data_rest = np.concatenate([sample for sample in data[0][ind_rest]], axis=0)
# data_rest = np.delete(data_rest,[0,1,10,11], axis=1)
self.rest_dist = normal_dist_par(data_rest)
self.samples = (data[0][~ind_rest], data[1][~ind_rest])
self.interval_dist = (3,0.5) # Mean / dist
self.transition_dist = (0.2, 0.05)
def load_sequences(self, n=8):
restart_random()
targets = self.samples[1]
samples = self.samples[0]
n_samples = samples.shape[0]
# Shuffle samples
ind_shuf = np.random.permutation( np.arange(targets.shape[0]) )
ind_shuf = ind_shuf[:-(n_samples % n)] # Remove samples we can't use
idxs = ind_shuf.reshape((-1,n))
# Reshape samples into subsequences without dropping samples
# sequence_targets = np.zeros((n_samples // n, n + int((n_samples % n) > 0) ))
# for i in range(len(sequence_targets)):
# if i < n_samples % n :
# sequence_targets[i,:] = ind_shuf[i*(n+1):(i+1)*(n+1)]
# else:
# sequence_targets[i,:-1] = ind_shuf[i*n:(i+1)*n]
# Add indexes (-1) for intervals between gestures:
idxs2 = -1 * np.ones((idxs.shape[0], 2*n + 1))
idxs2[:,1::2] = idxs
idxs2 = idxs2.astype(np.int)
master_seq = []
master_tar = []
# Iterate over sequences:
for seq in idxs2:
curr_seq = []
curr_tar = []
# Generate/get samples with intervals:
for idx in seq:
interval_len = int(sample_normal_dist(self.interval_dist) * 200) # 200 fps, sampled len is in seconds
if idx == -1:
curr_seq.append( self.sample_interval(n_frames=interval_len) )
curr_tar.append(0)
else:
curr_seq.append( samples[idx].reshape((1,)+samples[idx].shape) )
curr_tar.append(targets[idx])
# Generate and add transitions
curr_seq_trans = [curr_seq[0]]
curr_tar_trans = [curr_tar[0]]
for i in range(1,len(curr_seq)):
frame1 = curr_seq[i-1][0,-1,:]
frame2 = curr_seq[i][0,0,:]
transition_len = int(sample_normal_dist(self.transition_dist) * 200)
curr_seq_trans.append(self.sample_transition(frame1, frame2, transition_len))
curr_seq_trans.append(curr_seq[i])
curr_tar_trans.append(-1)
curr_tar_trans.append(curr_tar[i])
master_seq.append(curr_seq_trans)
master_tar.append(curr_tar_trans)
# Post-process targets
T = []
for seq, tar in zip(master_seq, master_tar):
N =
|
np.array([x.shape[1] for x in seq])
|
numpy.array
|
import os
import argparse
import numpy as np
import nibabel as nib
from scipy import signal as sci_signal
from nilearn import image, masking
from tqdm import tqdm
def get_numerator(signal_a, signal_b, lag):
"""
Calculates the numerator of the cross-correlation equation.
Parameters
----------
signal_a : array_like (1D)
Reference signal.
signal_b : array_like (1D)
Test signal. Must be the same length as signal_a.
lag : int
Lag by which signal_b will be shifted relative to signal_a.
Returns
-------
array_like (1D)
Element-wise product of matching time points in the lagged signals.
"""
if lag == 0:
numerator = np.multiply(signal_a, signal_b)
# If lag is positive, shift signal_b forwards relative to signal_a.
if lag > 0:
numerator = np.multiply(signal_a[lag:], signal_b[0:-lag])
# If lag is negative, shift signal_b backward relative to signal_a.
if lag < 0:
numerator = np.multiply(signal_b[-lag:], signal_a[0:lag])
return numerator
def get_denominator(signal_a, signal_b):
"""
Calculates the denominator of the cross-correlation equation.
Parameters
----------
signal_a : array_like (1D)
Reference signal.
signal_b : array_like (1D)
Test signal. Must be the same length as signal_a.
Returns
-------
float
Product of the standard deviations of the input signals.
"""
return np.std(signal_a) * np.std(signal_b)
def calc_xcorr(signal_a, signal_b, lag):
"""
Calculate the cross-correlation of two signals at a given lag.
Parameters
----------
signal_a : array_like (1D)
Reference signal.
signal_b : array_like (1D)
Test signal. Must be the same length as signal_a.
lag : int
Lag by which signal_b will be shifted relative to signal_a.
Returns
-------
float
Normalized cross-correlation.
"""
xcorr = np.true_divide(1., len(signal_a)-np.absolute(lag)) * np.sum(np.true_divide(get_numerator(signal_a, signal_b, lag),
get_denominator(signal_a, signal_b)))
return xcorr
def xcorr_range(signal_a, signal_b, lags):
"""
Calculate the cross-correlation of two signals over a range of lags.
Parameters
----------
signal_a : array_like (1D)
Reference signal.
signal_b : array_like (1D)
Test signal. Must be the same length as signal_a.
lags : array_like (1D)
Lags by which signal_b will be shifted relative to signal_a.
Returns
-------
array_like (1D)
Normalized cross-correlation at each lag.
"""
xcorr_vals = []
for lag in lags:
xcorr = calc_xcorr(signal_a, signal_b, lag)
xcorr_vals.append(xcorr)
return np.array(xcorr_vals)
# Adapted from https://gist.github.com/endolith/255291
def parabolic(sample_array, peak_index):
"""
Quadratic interpolation for estimating the true position of an
inter-sample local maximum when nearby samples are known.
Parameters
----------
sample_array : array_like (1D)
Array of samples.
peak_index : int
Index for the local maximum in sample_array for which to estimate the inter-sample maximum.
Returns
-------
tuple
The (x,y) coordinates of the vertex of a parabola through peak_index and its two neighbors.
"""
vertex_x = 1/2. * (sample_array[peak_index-1] - sample_array[peak_index+1]) / (sample_array[peak_index-1] - 2 * sample_array[peak_index] + sample_array[peak_index+1]) + peak_index
vertex_y = sample_array[peak_index] - 1/4. * (sample_array[peak_index-1] - sample_array[peak_index+1]) * (vertex_x - peak_index)
return (vertex_x, vertex_y)
def gen_lag_map(epi_img, brain_mask_img, gm_mask_img, lags):
print("...masking data...")
epi_gm_masked_data = masking.apply_mask(epi_img, gm_mask_img)
gm_mean_signal = np.mean(epi_gm_masked_data, axis=1)
epi_brain_masked_data = masking.apply_mask(epi_img, brain_mask_img).T
lag_index_correction = np.sum(np.array(lags) > 0)
xcorr_array = []
print("...calculating lags...")
for voxel in tqdm(epi_brain_masked_data, unit='voxel'):
vox_signal = voxel
vox_xcorr = xcorr_range(gm_mean_signal, vox_signal, lags)
xcorr_maxima = sci_signal.argrelmax(np.array(vox_xcorr), order=1)[0]
if len(xcorr_maxima) == 0:
interp_max =
|
np.argmax(vox_xcorr)
|
numpy.argmax
|
from __future__ import print_function, division, absolute_import
import os
import re
from collections import defaultdict
from operator import itemgetter
import logging
import sys
from scipy.interpolate import InterpolatedUnivariateSpline as spline
from scipy.integrate import quad
from scipy.optimize import minimize_scalar, fmin
import matplotlib.pyplot as plt
import numpy as np
import emcee
import h5py
import pandas as pd
from george import kernels
import george
from kglib import fitters
from kglib.utils import StarData
from kglib.utils.HelperFunctions import mad, integral
from kglib.spectral_type import SpectralTypeRelations
def classify_filename(fname, type='bright'):
"""
Given a CCF filename, classify the star combination, temperature, metallicity, and vsini
:param fname:
:return:
"""
# First, remove any leading directories
fname = fname.split('/')[-1]
# Star combination
m1 = re.search('\.[0-9]+kps', fname)
stars = fname[:m1.start()]
star1 = stars.split('+')[0].replace('_', ' ')
star2 = stars.split('+')[1].split('_{}'.format(type))[0].replace('_', ' ')
# secondary star vsini
vsini = float(fname[m1.start() + 1:].split('kps')[0])
# Temperature
m2 = re.search('[0-9]+\.0K', fname)
temp = float(m2.group()[:-1])
# logg
m3 = re.search('K\+[0-9]\.[0-9]', fname)
logg = float(m3.group()[1:])
# metallicity
metal = float(fname.split(str(logg))[-1])
return star1, star2, vsini, temp, logg, metal
def get_ccf_data(basedir, primary_name=None, secondary_name=None, vel_arr=np.arange(-900.0, 900.0, 0.1), type='bright'):
"""
Searches the given directory for CCF files, and classifies
by star, temperature, metallicity, and vsini
:param basedir: The directory to search for CCF files
:keyword primary_name: Optional keyword. If given, it will only get the requested primary star data
:keyword secondary_name: Same as primary_name, but only reads ccfs for the given secondary
:keyword vel_arr: The velocities to interpolate each ccf at
:return: pandas DataFrame
"""
if not basedir.endswith('/'):
basedir += '/'
all_files = ['{}{}'.format(basedir, f) for f in os.listdir(basedir) if type in f.lower()]
primary = []
secondary = []
vsini_values = []
temperature = []
gravity = []
metallicity = []
ccf = []
for fname in all_files:
star1, star2, vsini, temp, logg, metal = classify_filename(fname, type=type)
if primary_name is not None and star1.lower() != primary_name.lower():
continue
if secondary_name is not None and star2.lower() != secondary_name.lower():
continue
vel, corr = np.loadtxt(fname, unpack=True)
fcn = spline(vel, corr)
ccf.append(fcn(vel_arr))
primary.append(star1)
secondary.append(star2)
vsini_values.append(vsini)
temperature.append(temp)
gravity.append(logg)
metallicity.append(metal)
# Make a pandas dataframe with all this data
df = pd.DataFrame(data={'Primary': primary, 'Secondary': secondary, 'Temperature': temperature,
'vsini': vsini_values, 'logg': gravity, '[Fe/H]': metallicity, 'CCF': ccf})
return df
def get_ccf_summary(hdf5_filename, vel_arr=np.arange(-900.0, 900.0, 0.1), excel_filename=None,
velocity='highest', addmode='simple', Tmin=3000, Tmax=7000, N_best=1, debug=False):
"""
Goes through the given HDF5 file, and finds the best set of parameters for each combination of primary/secondary star
:param hdf5_filename: The HDF5 file containing the CCF data
:keyword excel_filename: The filename of an MS excel file giving the velocity for each secondary star.
The data must be in the first sheet, and three must be columns labeled
'Star' and 'CCF RV'. Only used if velocity='excel'
:keyword velocity: The velocity to measure the CCF at. Options are:
- 'highest' (default): uses the maximum of the ccf
- value: A numeric type giving the velocity to to use.
- 'excel': Search the filename excel_filename for the velocity of each secondary star
:keyword vel_arr: The velocities to interpolate each ccf at
:keyword addmode: The way the CCF orders were added while generating the ccfs
:keyword debug: If True, it prints the progress. Otherwise, does its work silently and takes a while
:keyword Tmin, Tmax: The minimum and maximum temperatures to include in the output.
:keyword N_best: Passed to find_best_pars()
:return: pandas DataFrame summarizing the best parameters.
This is the type of dataframe to give to the other function here
"""
if velocity.lower() == 'excel':
table = pd.read_excel(excel_filename, 0)
summary_dfs = []
with h5py.File(hdf5_filename, 'r') as f:
primaries = f.keys()
for p in primaries:
secondaries = f[p].keys()
for s in secondaries:
if addmode not in f[p][s].keys():
continue
logging.info('Primary: {}\tSecondary: {}'.format(p, s))
if velocity.lower() == 'excel':
try:
vel_max = table.loc[table.Star.str.lower().str.contains(s.strip().lower())]['CCF RV'].item()
except ValueError:
logging.warning('No entry found for star "{}" in table {}'.format(s, excel_filename))
continue
else:
vel_max = velocity
datasets = f[p][s][addmode].keys()
vsini_values = []
temperature = []
gravity = []
metallicity = []
ccf = []
for i, d in enumerate(datasets):
if debug:
sys.stdout.write('\r\t\tDataset {}/{}'.format(i+1, len(datasets)))
sys.stdout.flush()
ds = f[p][s][addmode][d]
if Tmin <= ds.attrs['T'] <= Tmax:
if ds.value.shape[0] == 2:
vel, corr = ds.value
elif 'velocity' in ds.attrs:
vel, corr = ds.attrs['velocity'], ds.value
else:
raise KeyError('Cannot find velocity information for dataset {}'.format(ds.name))
fcn = spline(vel, corr)
vsini_values.append(ds.attrs['vsini'])
temperature.append(ds.attrs['T'])
gravity.append(ds.attrs['logg'])
metallicity.append(ds.attrs['[Fe/H]'])
ccf.append(fcn(vel_arr))
data = pd.DataFrame(data={'Primary': [p]*len(ccf), 'Secondary': [s]*len(ccf),
'Temperature': temperature, 'vsini': vsini_values,
'logg': gravity, '[Fe/H]': metallicity, 'CCF': ccf})
data.drop_duplicates(subset=('Temperature', 'vsini', 'logg', '[Fe/H]', 'Primary', 'Secondary'),
inplace=True)
summary_dfs.append(find_best_pars(data, velocity=vel_max, vel_arr=vel_arr, N=N_best))
del data
return pd.concat(summary_dfs, ignore_index=True)
def find_best_pars(df, velocity='highest', vel_arr=np.arange(-900.0, 900.0, 0.1), N=1):
"""
Find the 'best-fit' parameters for each combination of primary and secondary star
:param df: the dataframe to search in
:keyword velocity: The velocity to measure the CCF at. The default is 'highest', and uses the maximum of the ccf
:keyword vel_arr: The velocities to interpolate each ccf at
:keyword N: The number of parameters to return
:return: a dataframe with keys of primary, secondary, and the parameters
"""
# Make sure N is odd
if N % 2 == 0:
logging.warn('N must be an odd number. Changing N from {} --> {}'.format(N, N + 1))
N += 1
# Get the names of the primary and secondary stars
primary_names = pd.unique(df.Primary)
secondary_names = pd.unique(df.Secondary)
# Find the ccf value at the given velocity
def val_fcn(ccf, idx=None, search_indices=None):
if idx is None:
if search_indices is None:
idx = np.argmax(ccf)
else:
idx = np.argmax(ccf[search_indices])
idx = search_indices[idx]
rv = vel_arr[idx]
sigma = np.std(ccf[np.abs(vel_arr - rv) > 200])
return ccf[idx], ccf[idx] / sigma, rv
if velocity == 'highest':
vals = df['CCF'].map(val_fcn)
df['ccf_max'] = vals.map(lambda l: l[0])
df['significance'] = vals.map(lambda l: l[1])
df['rv'] = vals.map(lambda l: l[2])
else:
# idx = np.argmin(np.abs(vel_arr - velocity))
idx = np.where(np.abs(vel_arr - velocity) <= 5)[0]
vals = df['CCF'].map(lambda c: val_fcn(c, search_indices=idx))
df['ccf_max'] = vals.map(lambda l: l[0])
df['significance'] = vals.map(lambda l: l[1])
df['rv'] = vals.map(lambda l: l[2])
#print(df[['Secondary', 'rv']])
# Find the best parameter for each combination
d = defaultdict(list)
groups = df.groupby(('Primary', 'Secondary'))
for group in groups.groups.keys():
primary = group[0]
secondary = group[1]
g = groups.get_group(group)
best = g.loc[g.ccf_max == g.ccf_max.max()]
T = best['Temperature'].item()
vsini = best['vsini'].item()
logg = best['logg'].item()
metal = best['[Fe/H]'].item()
rv = best['rv'].item()
Tmin = T - (N - 1) * 50
Tmax = T + (N - 1) * 50
for Ti in range(Tmin, Tmax + 1, 100):
good = g.loc[
(g['Temperature'] == Ti) & (g['vsini'] == vsini) & (g['logg'] == logg) & (g['[Fe/H]'] == metal)]
if len(good) == 0:
logging.warn('No matches for T = {} with primary/secondary = {}/{}!'.format(Ti, primary, secondary))
d['Primary'].append(primary)
d['Secondary'].append(secondary)
d['Temperature'].append(Ti)
d['vsini'].append(vsini)
d['logg'].append(logg)
d['[Fe/H]'].append(metal)
d['rv'].append(rv)
d['CCF'].append(np.nan)
d['significance'].append(np.nan)
continue
# print len(good)
best = good.loc[good.ccf_max == good.ccf_max.max()]
#best = good
if len(best) != 1 or any(np.isnan(best['CCF'].item())):
print(best)
print(good)
print(good.ccf_max)
print(good.ccf_max.max())
continue
# Save the best parameters for this temperature
d['Primary'].append(primary)
d['Secondary'].append(secondary)
d['Temperature'].append(best['Temperature'].item())
d['vsini'].append(best['vsini'].item())
d['logg'].append(best['logg'].item())
d['[Fe/H]'].append(best['[Fe/H]'].item())
idx = np.argmin(np.abs(vel_arr - rv))
d['rv'].append(rv)
d['CCF'].append(best['CCF'].item()[idx])
# d['rv'].append(best['rv'].item())
#d['CCF'].append(best.ccf_max.item())
# Measure the detection significance
std = mad(best.CCF.item())
mean = np.median(best.CCF.item())
d['significance'].append((d['CCF'][-1] - mean) / std)
return pd.DataFrame(data=d)
def get_detected_objects(df, tol=1.0, debug=False):
"""
Takes a summary dataframe with RV information. Finds the median rv for each star,
and removes objects that are more than 'tol' km/s from the median value
:param df: A summary dataframe, such as created by get_ccf_summary or find_best_pars
:param tol: The tolerance, in km/s, to accept an observation as detected
:return: a dataframe containing only detected companions
"""
secondary_names = pd.unique(df.Secondary)
secondary_to_rv = defaultdict(float)
for secondary in secondary_names:
rv = df.loc[df.Secondary == secondary]['rv'].median()
secondary_to_rv[secondary] = rv
if debug:
for secondary in sorted(secondary_to_rv.keys()):
print ('RV for {}: {:.2f} km/s'.format(secondary, secondary_to_rv[secondary]))
keys = df.Secondary.values
good = df.loc[abs(df.rv.values - np.array(itemgetter(*keys)(secondary_to_rv))) < tol]
return good
def get_detected_objects_new(df, siglim=5, Terr_lim=3, Toffset=2000):
"""
Get a dataframe with only the detected objects.
:param df: A DataFrame such as one output by get_ccf_summary with N > 1
:param siglim: The minimum significance to count as detected
:param Terr_lim: The maximum number of standard deviations of (Measured - Actual) to allow for detected objects
:param Toffset: The absolute difference to allow between the true and measured temperature.
:return: A dataframe similar to df, but with fewer rows
"""
S = get_initial_uncertainty(df)
S['Tdiff'] = S.Tmeas - S.Tactual
mean, std = S.Tdiff.mean(), S.Tdiff.std()
detected = S.loc[(S.significance > siglim) & (S.Tdiff - mean < Terr_lim * std) & (abs(S.Tdiff) < Toffset)]
return pd.merge(detected[['Primary', 'Secondary']], df, on=['Primary', 'Secondary'], how='left')
def add_actual_temperature(df, method='excel', filename='SecondaryStar_Temperatures.xls'):
"""
Add the actual temperature to a given summary dataframe
:param df: The dataframe to which we will add the actual secondary star temperature
:keyword method: How to get the actual temperature. Options are:
- 'spt': Use main-sequence relationships to go from spectral type --> temperature
- 'excel': Use tabulated data, available in the file 'SecondaryStar_Temperatures.xls'
:keyword filename: The filename of the excel spreadsheet containing the literature temperatures.
Needs to have the right format! Ignored if method='spt'
:return: copy of the original dataframe, with an extra column for the secondary star temperature
"""
# First, get a list of the secondary stars in the data
secondary_names = pd.unique(df.Secondary)
secondary_to_temperature = defaultdict(float)
secondary_to_error = defaultdict(float)
if method.lower() == 'spt':
MS = SpectralTypeRelations.MainSequence()
for secondary in secondary_names:
star_data = StarData.GetData(secondary)
spt = star_data.spectype[0] + re.search('[0-9]\.*[0-9]*', star_data.spectype).group()
T_sec = MS.Interpolate(MS.Temperature, spt)
secondary_to_temperature[secondary] = T_sec
elif method.lower() == 'excel':
table = pd.read_excel(filename, 0)
for secondary in secondary_names:
T_sec = table.loc[table.Star.str.lower().str.contains(secondary.strip().lower())]['Literature_Temp'].item()
T_error = table.loc[table.Star.str.lower().str.contains(secondary.strip().lower())][
'Literature_error'].item()
secondary_to_temperature[secondary] = T_sec
secondary_to_error[secondary] = T_error
df['Tactual'] = df['Secondary'].map(lambda s: secondary_to_temperature[s])
df['Tact_err'] = df['Secondary'].map(lambda s: secondary_to_error[s])
return
def make_gaussian_process_samples(df):
"""
Make a gaussian process fitting the Tactual-Tmeasured relationship
:param df: pandas DataFrame with columns 'Temperature' (with the measured temperature)
and 'Tactual' (for the actual temperature)
:return: emcee sampler instance
"""
Tmeasured, Tactual, error, lit_err = get_values(df)
for i, e in enumerate(error):
if e < 1:
e = fit_sigma(df, i)
error[i] = np.sqrt(e**2 + lit_err[i]**2)
for Tm, Ta, e in zip(Tmeasured, Tactual, error):
print(Tm, Ta, e)
plt.figure(1)
limits = [3000, 7000]
plt.errorbar(Tmeasured, Tactual, yerr=error, fmt='.k', capsize=0)
plt.plot(limits, limits, 'r--')
plt.xlabel('Measured Temperature')
plt.ylabel('Actual Temperature')
plt.xlim(limits)
plt.ylim(limits)
# Define some functions to use in the GP fit
def model(pars, T):
#polypars = pars[2:]
#return np.poly1d(polypars)(T)
return T
def lnlike(pars, Tact, Tmeas, Terr):
a, tau = np.exp(pars[:2])
gp = george.GP(a * kernels.ExpSquaredKernel(tau))
gp.compute(Tmeas, Terr)
return gp.lnlikelihood(Tact - model(pars, Tmeas))
def lnprior(pars):
lna, lntau = pars[:2]
polypars = pars[2:]
if -20 < lna < 20 and 12 < lntau < 20:
return 0.0
return -np.inf
def lnprob(pars, x, y, yerr):
lp = lnprior(pars)
return lp + lnlike(pars, x, y, yerr) if np.isfinite(lp) else -np.inf
# Set up the emcee fitter
initial = np.array([0, 14])#, 1.0, 0.0])
ndim = len(initial)
nwalkers = 100
p0 = [np.array(initial) + 1e-8 * np.random.randn(ndim) for i in xrange(nwalkers)]
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, args=(Tactual, Tmeasured, error))
print('Running first burn-in')
p1, lnp, _ = sampler.run_mcmc(p0, 500)
sampler.reset()
print("Running second burn-in...")
p_best = p1[np.argmax(lnp)]
p2 = [p_best + 1e-8 * np.random.randn(ndim) for i in xrange(nwalkers)]
p3, _, _ = sampler.run_mcmc(p2, 250)
sampler.reset()
print("Running production...")
sampler.run_mcmc(p3, 1000)
# We now need to increase the spread of the posterior distribution so that it encompasses the right number of data points
# This is because the way we have been treating error bars here is kind of funky...
# First, generate a posterior distribution of Tactual for every possible Tmeasured
print('Generating posterior samples at all temperatures...')
N = 10000 # This is 1/10th of the total number of samples!
idx = np.argsort(-sampler.lnprobability.flatten())[:N] # Get N 'best' curves
par_vals = sampler.flatchain[idx]
Tvalues = np.arange(3000, 6900, 100)
gp_posterior = []
for pars in par_vals:
a, tau = np.exp(pars[:2])
gp = george.GP(a * kernels.ExpSquaredKernel(tau))
gp.compute(Tmeasured, error)
s = gp.sample_conditional(Tactual - model(pars, Tmeasured), Tvalues) + model(pars, Tvalues)
gp_posterior.append(s)
# Get the median and spread in the pdf
gp_posterior = np.array(gp_posterior)
medians = np.median(gp_posterior, axis=0)
sigma_pdf = np.std(gp_posterior, axis=0)
# Correct the data and get the residual spread
df['Corrected_Temperature'] = df['Temperature'].map(lambda T: medians[np.argmin(abs(T - Tvalues))])
sigma_spread = np.std(df.Tactual - df.Corrected_Temperature)
# Increase the spread in the pdf to reflect the residual spread
ratio = np.maximum(np.ones(sigma_pdf.size), sigma_spread / sigma_pdf)
gp_corrected = (gp_posterior - medians) * ratio + medians
# Make confidence intervals
l, m, h = np.percentile(gp_corrected, [16.0, 50.0, 84.0], axis=0)
conf = pd.DataFrame(data={'Measured Temperature': Tvalues, 'Actual Temperature': m,
'Lower Bound': l, 'Upper bound': h})
conf.to_csv('Confidence_Intervals.csv', index=False)
# Finally, plot a bunch of the fits
print("Plotting...")
N = 300
Tvalues = np.arange(3000, 7000, 20)
idx = np.argsort(-sampler.lnprobability.flatten())[:N] # Get N 'best' curves
par_vals = sampler.flatchain[idx]
plot_posterior = []
for i, pars in enumerate(par_vals):
a, tau = np.exp(pars[:2])
gp = george.GP(a * kernels.ExpSquaredKernel(tau))
gp.compute(Tmeasured, error)
s = gp.sample_conditional(Tactual - model(pars, Tmeasured), Tvalues) + model(pars, Tvalues)
plot_posterior.append(s)
plot_posterior = np.array(plot_posterior)
medians = np.median(plot_posterior, axis=0)
sigma_pdf = np.std(plot_posterior, axis=0)
# Increase the spread in the pdf to reflect the residual spread
ratio = np.maximum(np.ones(sigma_pdf.size), sigma_spread / sigma_pdf)
plot_posterior = (plot_posterior - medians) * ratio + medians
plt.plot(Tvalues, plot_posterior.T, 'b-', alpha=0.05)
plt.draw()
plt.savefig('Temperature_Correspondence.pdf')
return sampler, gp_corrected
def check_posterior(df, posterior, Tvalues=np.arange(3000, 6900, 100)):
"""
Checks the posterior samples: Are 95% of the measurements within 2-sigma of the prediction?
:param df: The summary dataframe
:param posterior: The MCMC predicted values
:param Tvalues: The measured temperatures the posterior was made with
:return: boolean, as well as some warning messages if applicable
"""
# First, make 2-sigma confidence intervals
l, m, h = np.percentile(posterior, [5.0, 50.0, 95.0], axis=0)
Ntot = [] # The total number of observations with the given measured temperature
Nacc = [] # The number that have actual temperatures within the confidence interval
g = df.groupby('Temperature')
for i, T in enumerate(Tvalues):
if T in g.groups.keys():
Ta = g.get_group(T)['Tactual']
low, high = l[i], h[i]
Ntot.append(len(Ta))
Nacc.append(len(Ta.loc[(Ta >= low) & (Ta <= high)]))
p = float(Nacc[-1]) / float(Ntot[-1])
if p < 0.95:
logging.warn(
'Only {}/{} of the samples ({:.2f}%) were accepted for T = {} K'.format(Nacc[-1], Ntot[-1], p * 100,
T))
print(low, high)
print(sorted(Ta))
else:
Ntot.append(0)
Nacc.append(0)
p = float(sum(Nacc)) / float(sum(Ntot))
if p < 0.95:
logging.warn('Only {:.2f}% of the total samples were accepted!'.format(p * 100))
return False
return True
def get_Tmeas(d, include_actual=True):
d = d.dropna(subset=['CCF'])
corr = d.CCF.values
corr += 1.0 - corr.max()
T = d.Temperature.values
w = corr / corr.sum()
Tmeas = np.average(T, weights=w)
var_T =
|
np.average((T - Tmeas) ** 2, weights=w)
|
numpy.average
|
from astropy.io import fits, ascii
from astropy.table import Table
import numpy as np
from astropy.stats import sigma_clipped_stats
import photutils.detection as sf
import microlens.jlu.align_flystar as af
from matplotlib.colors import LogNorm
import matplotlib.pyplot as plt
import os
from datetime import date
def euclidean(x1, y1, x2, y2):
return np.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
def removeEdges(imgArr, finalTable, cutoff = 100, x = "xcentroid", y = "ycentroid", scale = 0.009942):
"""Ignores stars close to the edge, i.e., within 100 * scale pixels by default."""
cutoff = (cutoff * scale) / 0.009942
finalTable.remove_rows([i for i in range(len(finalTable[x])) if finalTable[x][i] < cutoff])
finalTable.remove_rows([i for i in range(len(finalTable[y])) if finalTable[y][i] < cutoff])
finalTable.remove_rows([i for i in range(len(finalTable[x])) if finalTable[x][i] > len(imgArr[0]) - cutoff])
finalTable.remove_rows([i for i in range(len(finalTable[y])) if finalTable[y][i] > len(imgArr) - cutoff])
return finalTable
def psfRadius(starPeak, imagePeak, scale = 0.009942):
"""
Arbitrary function that ties the predicted radius of the star to the peak brightness in the image.
returns a radius: 10 < (Ratio of "magnitudes" cubed * 80) < 60 and scaled acc. to plate scale in arcsec
"""
return ((max(min(80 * ((np.log10(starPeak) / np.log10(imagePeak)) ** 3), 60), 10) / 0.00942) * scale)
def isBrightSpeckleStar(starPeak, imagePeak):
""" Can this star have halo speckles? If it's half the peak brightness, it is assumed yes. """
return (starPeak > (0.5 * imagePeak))
def isNearBrightStar(nearStarPeak, mainPeak):
""" Is the main star near another bright star? """
return (0.2 * mainPeak < nearStarPeak)
def isInPSFRadius(starCoo, mainCoo, psfr):
""" Is star within the psf radius from earlier? """
return (euclidean(starCoo[0], starCoo[1], mainCoo[0], mainCoo[1]) < psfr)
def isInSecondaryRadius(starCoo, mainCoo, psfr):
""" Is star within the secondary radius, defined as 1.35x the psf radius? (arbitrarily) """
return (euclidean(starCoo[0], starCoo[1], mainCoo[0], mainCoo[1]) < psfr * 1.35)
def getTableIndex(mainIndex, starIndex):
""" Helper function to get star being considered in removeCloseStars. """
return mainIndex + starIndex + 1
def removeCloseStars(imgArr, newTable, pixelDist = 10, x = "xcentroid", y = "ycentroid", verbose = False, scale = 0.009942):
"""
In order of brightest stars in table, checks if it's a valid star, and tracks secondaries near it.
Removes all invalid stars from table, i.e, those that are likely halo speckles, or too close for valid psf generation.
Edge stars are already removed from table using the removeEdges() method.
Terminology?
main - Star that is being considered for psf-ness.
star - Star that is being checked for nearness to main
psf radius - Radius (arbitrary function tied to brightness) that is checked for halo speckles/close bright stars
secondary radius - Radius in which, if star found, mark as secondary
Pseudocode:
Sort the table by brightest
imagePeak = brightest star peak value in image
invalidStarIndices = indices of stars to be removed from table
for each star in table, that is not marked invalid yet:
set this star to the main star
find the psf radius of this star
for all star-lookalikes less bright than main star:
if star-lookalike is within psf radius (arbitrary function tied to brightness):
if main star can have halo speckles (arbitrary brightness limit tied to imagePeak):
if lookalike is too bright to be a halo speckle (arbitrary brightness limit tied to mainPeak):
add main star to invalidIndices (another bright star is too near it!)
add bright star to invalidIndices (too close to main star)
else lookalike is probably halo speckle:
add lookalike to invalidIndices (too close to main star, not actually a star)
keep the main star!!
else main star cannot have halo speckles:
if there is a BRIGHT star-lookalike nearby:
its definitely a star and not halo speckle AND star is too close!
add main star to invalidIndices (another bright star is too near it!)
add bright star to invalidIndices (too close to main star)
else if star-lookalike is within secondary radius (arbitrary function tied to brightness):
set the "psf?" value of this star in table to 0 (secondary)
from the table, remove all the invalid stars!
return table
"""
newTable.sort("peak", reverse = True)
imagePeak = newTable["peak"][0]
mainIndex = 0
invalidStarIndices = []
while mainIndex < len(newTable):
if mainIndex not in invalidStarIndices:
mainCoo = (newTable[x][mainIndex], newTable[y][mainIndex], newTable["peak"][mainIndex])
psfr = psfRadius(mainCoo[2], imagePeak)
if verbose: print("Found PSF Star: " + str(np.round(mainCoo, 2)))
for starIndex, starCoo in enumerate(zip(newTable[x][mainIndex + 1:], newTable[y][mainIndex + 1:], newTable["peak"][mainIndex + 1:])):
if isInPSFRadius(starCoo, mainCoo, psfr):
if isBrightSpeckleStar(mainCoo[2], imagePeak):
if isNearBrightStar(starCoo[2], mainCoo[2]):
if verbose: print("Ignoring PSF Star Due To Bright Nearby Star: " + str(np.round(starCoo, 2)))
invalidStarIndices.append(mainIndex)
if verbose: print("Ignoring Halo Speckle/Bright Nearby Star: " + str(np.round(starCoo, 2)))
invalidStarIndices.append(getTableIndex(mainIndex, starIndex))
else:
if isNearBrightStar(starCoo[2], mainCoo[2]):
if verbose: print("Ignoring PSF Star Due To Bright Nearby Star: " + str(np.round(starCoo, 2)))
invalidStarIndices.append(mainIndex)
if verbose: print("Ignoring Nearby Star: " + str(np.round(starCoo, 2)))
invalidStarIndices.append(getTableIndex(mainIndex, starIndex))
elif isInSecondaryRadius(starCoo, mainCoo, psfr):
if verbose: print("Adding Possible Secondary Star: " + str(np.round(starCoo, 2)))
newTable["psf"][getTableIndex(mainIndex, starIndex)] = 0
newTable["mx"][getTableIndex(mainIndex, starIndex)] = mainCoo[0]
newTable["my"][getTableIndex(mainIndex, starIndex)] = mainCoo[1]
if verbose: print("*****")
mainIndex += 1
newTable.remove_rows(invalidStarIndices)
return newTable
def getStats(imgArr):
""" Returns mean median and mode of pixel values in image array. """
mean, median, std = sigma_clipped_stats(imgArr, sigma=3.0)
return mean, median, std
def getNStarsHelper(imgArr, expStars = 10, counter = 0, starsFound = 0, fwhm = 4.5, scale = 0.009942):
"""
Attempts to get N stars. If stars aren't found, returns what it can and presents the error.
If so, starfinding may have to be done manually.
"""
std = getStats(imgArr)[2]
med = getStats(imgArr)[1]
thr = 0
finderClass = sf.DAOStarFinder(fwhm = fwhm,
threshold = max(thr, std * 5),
sharphi = 0.4,
roundlo = -0.2,
exclude_border = True)
newTable = finderClass.find_stars(imgArr - med)
newTable["psf"] = 1
newTable["mx"] = 0.0
newTable["my"] = 0.0
if len(newTable["peak"]) < expStars:
print("Pre-Filtering PSF Stars Found: " + str(len(newTable["peak"])))
print("More Stars Required. Widening Search Parameters.")
nowStarsFound = len(newTable["peak"])
newTable = getNStarsHelper(imgArr, expStars, counter + 1, nowStarsFound, fwhm)
newTable = removeEdges(imgArr, newTable, scale = scale)
newTable = removeCloseStars(imgArr, newTable, verbose = False, scale = scale)
if ((len(newTable["peak"]) < expStars) and (counter < 20)):
print("PSF Stars Found: " + str(len(newTable["peak"])))
print("More Stars Required. Widening Search Parameters.")
nowStarsFound = len(newTable["peak"])
if (nowStarsFound < starsFound) or (std * 5 > thr):
print("Failed to find sufficient stars. Do starfinding manually or reduce PSF stars to be found.".upper())
return newTable
newTable = getNStarsHelper(imgArr, expStars, counter + 1, nowStarsFound, fwhm)
if counter >= 20:
print("PSF Stars Found: " + str(len(newTable["peak"])))
print("Failed to find sufficient stars. Do starfinding manually or reduce PSF stars to be found.".upper())
return newTable
return newTable
def getNStars(imgArr, numStars = 10, fwhm = 4.5, scale = 0.009942):
"""
Reformats table of chosen psf stars and adds the relevant secondaries to it.
"""
newTable = getNStarsHelper(imgArr, numStars, fwhm = fwhm, scale = scale)
print("PSF Stars Found: " + str(len(newTable["peak"])) + "/" + str(numStars))
print("Returning Best: " + str(min(len(newTable["peak"]), numStars)))
newTable.sort("peak", reverse = True)
secondaryTable = newTable[:]
nonSecIdx = []
for psfIdx in range(len(secondaryTable)):
if (secondaryTable["psf"][psfIdx] != 0):
nonSecIdx.append(psfIdx)
secondaryTable.remove_rows(nonSecIdx)
removed = []
for i in range(len(newTable)):
if newTable[i]["psf"] == 0:
removed.append(i)
newTable.remove_rows(removed)
newTable.remove_rows(range(numStars, len(newTable)))
goodSecIdx = []
for i in range(len(secondaryTable)):
for j in range(len(newTable)):
if (secondaryTable["mx"][i] == newTable["xcentroid"][j]) and (secondaryTable["my"][i] == newTable["ycentroid"][j]):
goodSecIdx.append(i)
break
for i in goodSecIdx:
newTable.add_row(secondaryTable[i])
return newTable
def generate_list(imgPath, numPSFStars = 7, fwhm = 4.5, scale = 0.009942, targName = ""):
"""
Plots the table generated by PSFListGen, and prints the list of stars in a format that can be used.
@params
imgPath - path to .fits image, Eg. /g/lu/data/microlens/18jun22/combo/mag18jun22_ob040361_kp.fits
numPSFStars - number of stars to be found
scale - defaults to nirc2 scale. Enter in arcsec/pixel.
targName - name of target (can be anything, not important)
fwhm - full-width half maximum variable for finicky images
"""
imgArr = fits.getdata(imgPath)
newTable = getNStars(imgArr, numStars = numPSFStars, fwhm = fwhm, scale = scale)
finalTable = newTable["xcentroid", "ycentroid", "peak", "psf", "mx", "my"]
finalTable.rename_column("xcentroid", "x")
finalTable.rename_column("ycentroid", "y")
finalTable.rename_column("psf", "m")
finalTable["x"] = np.round(finalTable["x"], 2)
finalTable["y"] = np.round(finalTable["y"], 2)
finalTable["peak"] = np.round(finalTable["peak"], 2)
finalTable["mx"] = np.round(finalTable["mx"], 2)
finalTable["my"] = np.round(finalTable["my"], 2)
finalTable["name"] = ["loremipsum" for i in finalTable["x"]]
finalTable["name"][0] = targName
modded_plot_starlist_on_image_pixel(finalTable, imgPath, targName,
flip = False, label = False, magCut = 400000, verbose = False)
print("*************************************************")
print("PSF List Details: ")
finalTable.remove_column("name")
finalTable.pprint()
print("*************************************************")
print("PSFListGen Identified the Following List of Stars:")
print("[")
for i in zip(finalTable["x"], finalTable["y"], finalTable["m"]):
print(" " + str(list(i)) + ",")
print("]")
print("*************************************************")
def plot_starlist_on_image_arcsec(starList, imagePath, refCoo, scale = (9.942/1000), magCut = 23, label = True, verbose=True, flip = False):
"""
Plot a NIRC2 image and overlay a starlist. Input in relative arcsec coordinates.
@params
starList - any astropy Table with a "name" (including target),
"m",
"x" (relative arcsec coordinates), and
"y" (relative arcsec coordinates) column.
imagePath - path to image from root.
refCoo - reference pixel coordinates of a star in image file.
scale - plate scale. Defaults to NIRC2 plate scale.
magCut - highest magnitude of star allowed to be plotted.
label - if True, adds labels. Defaults to true.
verbose - if True, prints the astropy table inputted. Defaults to true.
flip - if True, flips image in x (helps in case image is backwards).
Ensure that the image passed in is at a Position Angle of 0.
The image is NOT Flipped on plotting by default.
In order to flip image, set flip = True.
"""
# Initializing the Image and starList columns
img = fits.getdata(imagePath)
try:
xCoordList = starList['x']
yCoordList = starList['y']
mList = starList['m']
except KeyError:
raise KeyError("Starlist must have columns named 'x' and 'y', in pixel coordinates, and magnitude 'm'.")
if label:
try:
nameList = starList['name']
except KeyError:
raise KeyError("Starlist must have a column named 'name' if label = True. Else, set label = False.")
# Get image dimensions
x_axis = np.arange(img.shape[0], dtype=float) #[0, 1, ...., 1166]
y_axis =
|
np.arange(img.shape[1], dtype=float)
|
numpy.arange
|
#_____import packages_____
from galpy.potential import KuzminKutuzovStaeckelPotential
from galpy.potential import evaluatePotentials, evaluateRforces, evaluateDensities
from galpy.actionAngle import estimateDeltaStaeckel
from galpy.actionAngle import actionAngleStaeckel
from galpy.orbit import Orbit
from galpy.util import bovy_coords
import numpy
import scipy
import math
#Test whether circular velocity calculation works
def test_vcirc():
#test the circular velocity of the KuzminKutuzovStaeckelPotential
#using parameters from Batsleer & Dejonghe 1994, fig. 1-3
#and their formula eq. (10)
#_____model parameters______
#surface ratios of disk and halo:
ac_Ds = [50. ,50. ,50. ,50. ,50. ,50. ,40. ,40. ,40. ]
ac_Hs = [1.005,1.005,1.005,1.01,1.01,1.01 ,1.02,1.02,1.02 ]
#disk contribution to total mass:
ks = [0.05 ,0.06 ,0.07 ,0.07,0.1 ,0.125,0.1 ,0.12,0.125]
#focal distance:
Delta = 1.
for ii in range(9):
ac_D = ac_Ds[ii]
ac_H = ac_Hs[ii]
k = ks[ii]
#_____setup potential_____
#first try, not normalized:
V_D = KuzminKutuzovStaeckelPotential(amp= k ,ac=ac_D,Delta=Delta,normalize=False)
V_H = KuzminKutuzovStaeckelPotential(amp=(1.-k),ac=ac_H,Delta=Delta,normalize=False)
pot = [V_D,V_H]
#normalization according to Batsleer & Dejonghe 1994:
V00 = evaluatePotentials(pot,0.,0.)
#second try, normalized:
V_D = KuzminKutuzovStaeckelPotential(amp= k / (-V00),ac=ac_D,Delta=Delta,normalize=False)
V_H = KuzminKutuzovStaeckelPotential(amp=(1.-k) / (-V00),ac=ac_H,Delta=Delta,normalize=False)
pot = [V_D,V_H]
#_____calculate rotation curve_____
Rs = numpy.linspace(0.,20.,100)
z = 0.
vcirc_calc = numpy.sqrt(-Rs * evaluateRforces(pot,Rs,z))
#_____vcirc by Batsleer & Dejonghe eq. (10) (with proper Jacobian)_____
def vc2w(R):
g_D = Delta**2 / (1.-ac_D**2)
a_D = g_D - Delta**2
g_H = Delta**2 / (1.-ac_H**2)
a_H = g_H - Delta**2
l = R**2 - a_D
q = a_H - a_D
termD = numpy.sqrt(l )*(numpy.sqrt(l ) + numpy.sqrt(-g_D ))**2
termH = numpy.sqrt(l-q)*(numpy.sqrt(l-q) + numpy.sqrt(-g_D-q))**2
return R**2 * (k / termD + (1.-k) / termH)
vcirc_formula = numpy.sqrt(vc2w(Rs)/(-V00))
assert numpy.all(numpy.fabs(vcirc_calc - vcirc_formula) < 10**-8.), \
'Calculated circular velocity for KuzminKutuzovStaeckelPotential '+ \
'does not agree with eq. (10) (corrected by proper Jacobian) '+ \
'by Batsleer & Dejonghe (1994)'
return None
#-----------------------------------------------------------------------------
#test whether the density calculation works
def test_density():
#test the density calculation of the KuzminKutuzovStaeckelPotential
#using parameters from Batsleer & Dejonghe 1994, tab. 2
#_____parameters_____
#table 2 in Batsleer & Dejonghe
ac_D = [25. ,25. ,25. ,25. ,25. ,25. ,40. ,40. ,40. ,40. ,40. ,50. ,50. ,50. ,50. ,50. ,50. ,75. ,75. ,75. ,75. ,75. ,75. ,100. ,100. ,100.,100. ,100.,100. ,150. ,150. ,150. ,150. ,150.,150.]
ac_H = [1.005,1.005,1.01 ,1.01,1.02 ,1.02,1.005,1.005,1.01 ,1.01,1.02,1.005,1.005,1.01,1.01 ,1.02,1.02 ,1.005,1.005,1.01,1.01 ,1.02,1.02 ,1.005,1.005,1.01,1.01 ,1.02,1.02 ,1.005,1.005,1.01 ,1.01 ,1.02,1.02]
k = [0.05 ,0.08 ,0.075,0.11,0.105,0.11,0.05 ,0.08 ,0.075,0.11,0.11,0.05 ,0.07 ,0.07,0.125,0.1 ,0.125,0.05 ,0.065 ,0.07,0.125,0.10,0.125,0.05,0.065,0.07,0.125,0.10,0.125,0.05 ,0.065,0.075,0.125,0.11,0.125]
Delta = [0.99 ,1.01 ,0.96 ,0.99,0.86 ,0.88,1.00 ,1.01 ,0.96 ,0.99,0.89,1.05 ,1.06 ,1.00,1.05 ,0.91,0.97 ,0.98 ,0.99 ,0.94,0.98 ,0.85,0.91 ,1.06 ,1.07 ,1.01,1.06 ,0.94,0.97 ,1.06 ,1.07 ,0.98 ,1.06 ,0.94,0.97]
Mmin = [7.49 ,6.17 ,4.08 ,3.70,2.34 ,2.36,7.57 ,6.16 ,4.08 ,2.64,2.38,8.05 ,6.94 ,4.37,3.70 ,2.48,2.50 ,7.37 ,6.66 ,4.05,3.46 ,2.33,2.36 ,8.14 ,7.27 ,4.42,3.72 ,2.56,2.50 ,8.14 ,7.26 ,4.17 ,3.72 ,2.51,2.50]
Mmax = [7.18 ,6.12 ,3.99 ,3.69,2.37 ,2.40,7.27 ,6.11 ,3.99 ,2.66,2.42,7.76 ,6.85 ,4.26,3.72 ,2.51,2.54 ,7.07 ,6.51 ,3.95,3.48 ,2.36,2.40 ,7.85 ,7.15 ,4.30,3.75 ,2.58,2.54 ,7.85 ,7.07 ,4.08 ,3.75 ,2.53,2.53]
rhomin = [0.04 ,0.05 ,0.04 ,0.04,0.03 ,0.03,0.06 ,0.06 ,0.05 ,0.04,0.04,0.07 ,0.08 ,0.06,0.07 ,0.04,0.05 ,0.08 ,0.09 ,0.07,0.09 ,0.05,0.06 ,0.12 ,0.13 ,0.09,0.13 ,0.07,0.09 ,0.16 ,0.19 ,0.12 ,0.18 ,0.10,0.12]
rhomax = [0.03 ,0.03 ,0.02 ,0.03,0.02 ,0.02,0.04 ,0.04 ,0.03 ,0.03,0.02,0.05 ,0.05 ,0.04,0.05 ,0.03,0.03 ,0.05 ,0.06 ,0.04,0.06 ,0.03,0.04 ,0.07 ,0.08 ,0.06,0.08 ,0.04,0.05 ,0.09 ,0.10 ,0.07 ,0.10 ,0.06,0.07]
Sigmin = [58 ,52 ,52 ,49 ,39 ,40 ,58 ,55 ,51 ,44 ,40 ,59 ,54 ,53 ,49 ,41 ,42 ,58 ,55 ,51 ,48 ,39 ,40 ,59 ,55 ,53 ,49 ,42 ,42 ,59 ,55 ,52 ,49 ,42 ,42]
Sigmax = [45 ,41 ,38 ,37 ,28 ,28 ,45 ,32 ,37 ,32 ,30 ,46 ,43 ,40 ,37 ,30 ,31 ,45 ,43 ,38 ,36 ,28 ,29 ,46 ,43 ,40 ,38 ,31 ,31 ,46 ,44 ,39 ,38 ,30 ,31]
for ii in range(len(ac_D)):
if ac_D[ii] == 40.:
continue
#because I believe that there are typos in tab. 2 by Batsleer & Dejonghe...
for jj in range(2):
#_____parameters depending on solar position____
if jj == 0:
Rsun = 7.5
zsun = 0.004
GM = Mmin[ii] #units: G = 1, M in 10^11 solar masses
rho = rhomin[ii]
Sig = Sigmin[ii]
elif jj == 1:
Rsun = 8.5
zsun = 0.02
GM = Mmax[ii] #units: G = 1, M in 10^11 solar masses
rho = rhomax[ii]
Sig = Sigmax[ii]
outstr = 'ac_D='+str(ac_D[ii])+', ac_H='+str(ac_H[ii])+', k='+str(k[ii])+', Delta='+str(Delta[ii])+\
', Mtot='+str(GM)+'*10^11Msun, Rsun='+str(Rsun)+'kpc, rho(Rsun,zsun)='+str(rho)+'Msun/pc^3, Sig(Rsun,z<1.1kpc)='+str(Sig)+'Msun/pc^2'
#_____setup potential_____
amp_D = GM * k[ii]
V_D = KuzminKutuzovStaeckelPotential(amp=amp_D,ac=ac_D[ii],Delta=Delta[ii],normalize=False)
amp_H = GM * (1.-k[ii])
V_H = KuzminKutuzovStaeckelPotential(amp=amp_H,ac=ac_H[ii],Delta=Delta[ii],normalize=False)
pot = [V_D,V_H]
#_____local density_____
rho_calc = evaluateDensities(pot,Rsun,zsun) * 100. #units: [solar mass / pc^3]
rho_calc = round(rho_calc,2)
#an error of 0.01 corresponds to the significant digit
#given in the table, to which the density was rounded,
#to be wrong by one.
assert numpy.fabs(rho_calc - rho) <= 0.01+10.**-8, \
'Calculated density %f for KuzminKutuzovStaeckelPotential ' % rho_calc + \
'with model parameters:\n'+outstr+'\n'+ \
'does not agree with value from tab. 2 '+ \
'by Batsleer & Dejonghe (1994)'
#_____surface density_____
Sig_calc, err = scipy.integrate.quad(lambda z: (evaluateDensities(pot,Rsun,z/1000.) * 100.), #units: [solar mass / pc^3]
0., 1100.) #units: pc
Sig_calc = round(2. * Sig_calc)
#an error of 1 corresponds to the significant digit
#given in the table, to which the surface density was rounded,
#to be wrong by one.
assert numpy.fabs(Sig_calc - Sig) <= 1., \
'Calculated surface density %f for KuzminKutuzovStaeckelPotential ' % Sig_calc + \
'with model parameters:\n'+outstr+'\n'+ \
'does not agree with value from tab. 2 '+ \
'by Batsleer & Dejonghe (1994)'
return None
#-----------------------------------------------------------------------------
#test wheter the orbit integration in C and Python are the same
def test_orbitIntegrationC():
#_____initialize some KKSPot_____
Delta = 1.0
pot = KuzminKutuzovStaeckelPotential(ac=20.,Delta=Delta,normalize=True)
#_____initialize an orbit (twice)_____
vxvv = [1.,0.1,1.1,0.,0.1]
o_P= Orbit(vxvv=vxvv)
o_C= Orbit(vxvv=vxvv)
#_____integrate the orbit with python and C_____
ts= numpy.linspace(0,100,101)
o_P.integrate(ts,pot,method='leapfrog') #python
o_C.integrate(ts,pot,method='leapfrog_c')#C
for ii in range(5):
exp3= -1.7
if ii == 0: Python, CC, string, exp1, exp2 = o_P.R(ts) , o_C.R(ts) , 'R' , -5., -10.
elif ii == 1: Python, CC, string, exp1, exp2 = o_P.z(ts) , o_C.z(ts) , 'z' , -3.25, -4.
elif ii == 2: Python, CC, string, exp1, exp2 = o_P.vR(ts), o_C.vR(ts), 'vR', -3., -10.
elif ii == 3: Python, CC, string, exp1, exp2, exp3 = o_P.vz(ts), o_C.vz(ts), 'vz', -3., -4., -1.3
elif ii == 4: Python, CC, string, exp1, exp2 = o_P.vT(ts), o_C.vT(ts), 'vT', -5., -10.
rel_diff = numpy.fabs((Python-CC)/CC) < 10.**exp1
abs_diff = (numpy.fabs(Python-CC) < 10.**exp2) * (numpy.fabs(Python) < 10.**exp3)
assert numpy.all(rel_diff+abs_diff), \
'Orbit integration for '+string+' coordinate different in ' + \
'C and Python implementation.'
return None
#-----------------------------------------------------------------------------
#test whether this is really a Staeckel potential and the Delta is constant
def test_estimateDelta():
#_____initialize some KKSPot_____
Delta = 1.0
pot = KuzminKutuzovStaeckelPotential(ac=20.,Delta=Delta,normalize=True)
#_____initialize an orbit (twice)_____
vxvv = [1.,0.1,1.1,0.01,0.1]
o= Orbit(vxvv=vxvv)
#_____integrate the orbit with C_____
ts= numpy.linspace(0,101,100)
o.integrate(ts,pot,method='leapfrog_c')
#____estimate Focal length Delta_____
#for each time step individually:
deltas_estimate = numpy.zeros(len(ts))
for ii in range(len(ts)):
deltas_estimate[ii] = estimateDeltaStaeckel(pot,o.R(ts[ii]),o.z(ts[ii]))
assert numpy.all(numpy.fabs(deltas_estimate - Delta) < 10.**-8), \
'Focal length Delta estimated along the orbit is not constant.'
#for all time steps together:
delta_estimate = estimateDeltaStaeckel(pot,o.R(ts),o.z(ts))
assert numpy.fabs(delta_estimate - Delta) < 10.**-8, \
'Focal length Delta estimated from the orbit is not the same as the input focal length.'
return None
#-----------------------------------------------------------------------------
#test whether this is really a Staeckel potential and the Actions are conserved along the orbit
def test_actionConservation():
#_____initialize some KKSPot_____
Delta = 1.0
pot = KuzminKutuzovStaeckelPotential(ac=20.,Delta=Delta,normalize=True)
#_____initialize an orbit (twice)_____
vxvv = [1.,0.1,1.1,0.01,0.1]
o= Orbit(vxvv=vxvv)
#_____integrate the orbit with C_____
ts= numpy.linspace(0,101,100)
o.integrate(ts,pot,method='leapfrog_c')
#_____Setup ActionAngle object and calculate actions (Staeckel approximation)_____
aAS = actionAngleStaeckel(pot=pot,delta=Delta,c=True)
jrs,lzs,jzs = aAS(o.R(ts),o.vR(ts),o.vT(ts),o.z(ts),o.vz(ts))
assert numpy.all(numpy.fabs(jrs - jrs[0]) < 10.**-8.), \
'Radial action is not conserved along orbit.'
assert numpy.all(numpy.fabs(lzs - lzs[0]) < 10.**-8.), \
'Angular momentum is not conserved along orbit.'
assert numpy.all(numpy.fabs(jzs - jzs[0]) < 10.**-8.), \
'Vertical action is not conserved along orbit.'
return None
#-----------------------------------------------------------------------------
#test coordinate transformation
def test_lambdanu_to_Rz():
#coordinate system:
a = 3.
g = 4.
Delta = numpy.sqrt(g-a)
ac = numpy.sqrt(a/g)
#_____test float input (z=0)_____
#coordinate transformation:
l, n = 2., -4.
R,z= bovy_coords.lambdanu_to_Rz(l,n,ac=ac,Delta=Delta)
#true values:
R_true = numpy.sqrt((l+a)*(n+a)/(a-g))
z_true = numpy.sqrt((l+g)*(n+g)/(g-a))
#test:
assert numpy.fabs(R-R_true) < 10.**-10., 'lambdanu_to_Rz conversion did not work as expected (R)'
assert numpy.fabs(z-z_true) < 10.**-10., 'lambdanu_to_Rz conversion did not work as expected (z)'
#_____Also test for arrays_____
#coordinate transformation:
l = numpy.array([2. ,10.,20. ,0.])
n = numpy.array([-4.,-3.,-3.5,-3.5])
R,z= bovy_coords.lambdanu_to_Rz(l,n,ac=ac,Delta=Delta)
#true values:
R_true = numpy.sqrt((l+a)*(n+a)/(a-g))
z_true = numpy.sqrt((l+g)*(n+g)/(g-a))
#test:
rel_diff =
|
numpy.fabs((R-R_true)/R_true)
|
numpy.fabs
|
import numpy as np
class GradientDescent(object):
'''
Abstract class
'''
def initialize(self, x0):
self.x = x0
def update(self, dx):
pass
class VanillaGradient(GradientDescent):
def __init__(self, learning_rate, ascent=False):
self.leaning_rate = learning_rate
self.ascent = ascent
def update(self, dx):
if self.ascent:
self.x += self.leaning_rate * dx
else:
self.x -= self.leaning_rate * dx
return self.x
class Adam(GradientDescent):
'''
Kingma, Diederik, and <NAME>.
"Adam: A method for stochastic optimization."
arXiv preprint arXiv:1412.6980 (2014).
'''
def __init__(self, learning_rate, beta1=0.9, beta2=0.999, eps=1e-8, use_correction=False, ascent=False, clip=None, normalize_sum=False):
self.learning_rate = learning_rate
self.beta1 = beta1
self.beta2 = beta2
self.eps = eps
self.use_correction = use_correction
self.ascent = ascent
self.clip = clip
self.normalize_sum = normalize_sum
def initialize(self, x0):
self.x = x0
self.m = 0
self.v = 0
self.t = 0
def update(self, dx):
self.t += 1
self.m = self.beta1 * self.m + (1 - self.beta1) * dx
self.v = self.beta2 * self.v + (1 - self.beta2) * (dx ** 2)
if self.use_correction:
m = self.m / (1 - self.beta1 ** self.t)
v = self.v / (1 - self.beta2 ** self.t)
else:
m = self.m
v = self.v
if self.ascent:
self.x += self.learning_rate * m / (np.sqrt(v) + self.eps)
else:
self.x -= self.learning_rate * m / (np.sqrt(v) + self.eps)
if self.clip is not None:
self.x =
|
np.clip(self.x, self.clip[0], self.clip[1])
|
numpy.clip
|
import os
import sys
import pickle as pickle
import numpy as np
import tensorflow as tf
import src.fashion_minst.utils.mnist_reader as mnist_reader
def _read_data(data_path):
"""Reads CIFAR-10 format data. Always returns NHWC format.
Returns:
images: np tensor of size [N, H, W, C]
labels: np tensor of size [N]
"""
images, labels = [], []
X_test, y_test = mnist_reader.load_mnist(data_path, kind='train')
#X_test, y_test = mnist_reader.load_mnist('data/fashion', kind='t10k')
#data = pickle.load(finp,encoding='bytes')
batch_images = X_test.astype(np.float32) / 255.0
batch_labels = np.array(y_test, dtype=np.int32)
images.append(batch_images)
labels.append(batch_labels)
images = np.concatenate(images, axis=0)
labels = np.concatenate(labels, axis=0)
images = np.reshape(images, [-1, 1, 28, 28])
images = np.transpose(images, [0, 2, 3, 1])
return images, labels
def _read_data_test(data_path):
"""Reads CIFAR-10 format data. Always returns NHWC format.
Returns:
images: np tensor of size [N, H, W, C]
labels: np tensor of size [N]
"""
images, labels = [], []
X_test, y_test = mnist_reader.load_mnist(data_path, kind='t10k')
#X_test, y_test = mnist_reader.load_mnist('data/fashion', kind='t10k')
#data = pickle.load(finp,encoding='bytes')
print(X_test)
batch_images = X_test.astype(np.float32) / 255.0
batch_labels = np.array(y_test, dtype=np.int32)
images.append(batch_images)
labels.append(batch_labels)
images =
|
np.concatenate(images, axis=0)
|
numpy.concatenate
|
import contextlib
import sys
import warnings
import itertools
import operator
import platform
from distutils.version import LooseVersion as _LooseVersion
import pytest
from hypothesis import given, settings, Verbosity
from hypothesis.strategies import sampled_from
import numpy as np
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_almost_equal,
assert_array_equal, IS_PYPY, suppress_warnings, _gen_alignment_data,
assert_warns,
)
types = [np.bool_, np.byte, np.ubyte, np.short, np.ushort, np.intc, np.uintc,
np.int_, np.uint, np.longlong, np.ulonglong,
np.single, np.double, np.longdouble, np.csingle,
np.cdouble, np.clongdouble]
floating_types = np.floating.__subclasses__()
complex_floating_types = np.complexfloating.__subclasses__()
# This compares scalarmath against ufuncs.
class TestTypes:
def test_types(self):
for atype in types:
a = atype(1)
assert_(a == 1, "error with %r: got %r" % (atype, a))
def test_type_add(self):
# list of types
for k, atype in enumerate(types):
a_scalar = atype(3)
a_array = np.array([3], dtype=atype)
for l, btype in enumerate(types):
b_scalar = btype(1)
b_array = np.array([1], dtype=btype)
c_scalar = a_scalar + b_scalar
c_array = a_array + b_array
# It was comparing the type numbers, but the new ufunc
# function-finding mechanism finds the lowest function
# to which both inputs can be cast - which produces 'l'
# when you do 'q' + 'b'. The old function finding mechanism
# skipped ahead based on the first argument, but that
# does not produce properly symmetric results...
assert_equal(c_scalar.dtype, c_array.dtype,
"error with types (%d/'%c' + %d/'%c')" %
(k, np.dtype(atype).char, l, np.dtype(btype).char))
def test_type_create(self):
for k, atype in enumerate(types):
a = np.array([1, 2, 3], atype)
b = atype([1, 2, 3])
assert_equal(a, b)
def test_leak(self):
# test leak of scalar objects
# a leak would show up in valgrind as still-reachable of ~2.6MB
for i in range(200000):
np.add(1, 1)
class TestBaseMath:
def test_blocked(self):
# test alignments offsets for simd instructions
# alignments for vz + 2 * (vs - 1) + 1
for dt, sz in [(np.float32, 11), (np.float64, 7), (np.int32, 11)]:
for out, inp1, inp2, msg in _gen_alignment_data(dtype=dt,
type='binary',
max_size=sz):
exp1 = np.ones_like(inp1)
inp1[...] = np.ones_like(inp1)
inp2[...] = np.zeros_like(inp2)
assert_almost_equal(np.add(inp1, inp2), exp1, err_msg=msg)
assert_almost_equal(np.add(inp1, 2), exp1 + 2, err_msg=msg)
assert_almost_equal(np.add(1, inp2), exp1, err_msg=msg)
np.add(inp1, inp2, out=out)
assert_almost_equal(out, exp1, err_msg=msg)
inp2[...] += np.arange(inp2.size, dtype=dt) + 1
assert_almost_equal(np.square(inp2),
np.multiply(inp2, inp2), err_msg=msg)
# skip true divide for ints
if dt != np.int32:
assert_almost_equal(np.reciprocal(inp2),
np.divide(1, inp2), err_msg=msg)
inp1[...] = np.ones_like(inp1)
np.add(inp1, 2, out=out)
assert_almost_equal(out, exp1 + 2, err_msg=msg)
inp2[...] = np.ones_like(inp2)
np.add(2, inp2, out=out)
assert_almost_equal(out, exp1 + 2, err_msg=msg)
def test_lower_align(self):
# check data that is not aligned to element size
# i.e doubles are aligned to 4 bytes on i386
d = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64)
o = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64)
assert_almost_equal(d + d, d * 2)
np.add(d, d, out=o)
np.add(np.ones_like(d), d, out=o)
np.add(d, np.ones_like(d), out=o)
np.add(np.ones_like(d), d)
np.add(d, np.ones_like(d))
class TestPower:
def test_small_types(self):
for t in [np.int8, np.int16, np.float16]:
a = t(3)
b = a ** 4
assert_(b == 81, "error with %r: got %r" % (t, b))
def test_large_types(self):
for t in [np.int32, np.int64, np.float32, np.float64, np.longdouble]:
a = t(51)
b = a ** 4
msg = "error with %r: got %r" % (t, b)
if np.issubdtype(t, np.integer):
assert_(b == 6765201, msg)
else:
assert_almost_equal(b, 6765201, err_msg=msg)
def test_integers_to_negative_integer_power(self):
# Note that the combination of uint64 with a signed integer
# has common type np.float64. The other combinations should all
# raise a ValueError for integer ** negative integer.
exp = [np.array(-1, dt)[()] for dt in 'bhilq']
# 1 ** -1 possible special case
base = [np.array(1, dt)[()] for dt in 'bhilqBHILQ']
for i1, i2 in itertools.product(base, exp):
if i1.dtype != np.uint64:
assert_raises(ValueError, operator.pow, i1, i2)
else:
res = operator.pow(i1, i2)
assert_(res.dtype.type is np.float64)
assert_almost_equal(res, 1.)
# -1 ** -1 possible special case
base = [np.array(-1, dt)[()] for dt in 'bhilq']
for i1, i2 in itertools.product(base, exp):
if i1.dtype != np.uint64:
assert_raises(ValueError, operator.pow, i1, i2)
else:
res = operator.pow(i1, i2)
assert_(res.dtype.type is np.float64)
assert_almost_equal(res, -1.)
# 2 ** -1 perhaps generic
base = [np.array(2, dt)[()] for dt in 'bhilqBHILQ']
for i1, i2 in itertools.product(base, exp):
if i1.dtype != np.uint64:
assert_raises(ValueError, operator.pow, i1, i2)
else:
res = operator.pow(i1, i2)
assert_(res.dtype.type is np.float64)
assert_almost_equal(res, .5)
def test_mixed_types(self):
typelist = [np.int8, np.int16, np.float16,
np.float32, np.float64, np.int8,
np.int16, np.int32, np.int64]
for t1 in typelist:
for t2 in typelist:
a = t1(3)
b = t2(2)
result = a**b
msg = ("error with %r and %r:"
"got %r, expected %r") % (t1, t2, result, 9)
if np.issubdtype(np.dtype(result), np.integer):
assert_(result == 9, msg)
else:
assert_almost_equal(result, 9, err_msg=msg)
def test_modular_power(self):
# modular power is not implemented, so ensure it errors
a = 5
b = 4
c = 10
expected = pow(a, b, c) # noqa: F841
for t in (np.int32, np.float32, np.complex64):
# note that 3-operand power only dispatches on the first argument
assert_raises(TypeError, operator.pow, t(a), b, c)
assert_raises(TypeError, operator.pow, np.array(t(a)), b, c)
def floordiv_and_mod(x, y):
return (x // y, x % y)
def _signs(dt):
if dt in np.typecodes['UnsignedInteger']:
return (+1,)
else:
return (+1, -1)
class TestModulus:
def test_modulus_basic(self):
dt = np.typecodes['AllInteger'] + np.typecodes['Float']
for op in [floordiv_and_mod, divmod]:
for dt1, dt2 in itertools.product(dt, dt):
for sg1, sg2 in itertools.product(_signs(dt1), _signs(dt2)):
fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s'
msg = fmt % (op.__name__, dt1, dt2, sg1, sg2)
a = np.array(sg1*71, dtype=dt1)[()]
b = np.array(sg2*19, dtype=dt2)[()]
div, rem = op(a, b)
assert_equal(div*b + rem, a, err_msg=msg)
if sg2 == -1:
assert_(b < rem <= 0, msg)
else:
assert_(b > rem >= 0, msg)
def test_float_modulus_exact(self):
# test that float results are exact for small integers. This also
# holds for the same integers scaled by powers of two.
nlst = list(range(-127, 0))
plst = list(range(1, 128))
dividend = nlst + [0] + plst
divisor = nlst + plst
arg = list(itertools.product(dividend, divisor))
tgt = list(divmod(*t) for t in arg)
a, b = np.array(arg, dtype=int).T
# convert exact integer results from Python to float so that
# signed zero can be used, it is checked.
tgtdiv, tgtrem = np.array(tgt, dtype=float).T
tgtdiv = np.where((tgtdiv == 0.0) & ((b < 0) ^ (a < 0)), -0.0, tgtdiv)
tgtrem = np.where((tgtrem == 0.0) & (b < 0), -0.0, tgtrem)
for op in [floordiv_and_mod, divmod]:
for dt in np.typecodes['Float']:
msg = 'op: %s, dtype: %s' % (op.__name__, dt)
fa = a.astype(dt)
fb = b.astype(dt)
# use list comprehension so a_ and b_ are scalars
div, rem = zip(*[op(a_, b_) for a_, b_ in zip(fa, fb)])
assert_equal(div, tgtdiv, err_msg=msg)
assert_equal(rem, tgtrem, err_msg=msg)
def test_float_modulus_roundoff(self):
# gh-6127
dt = np.typecodes['Float']
for op in [floordiv_and_mod, divmod]:
for dt1, dt2 in itertools.product(dt, dt):
for sg1, sg2 in itertools.product((+1, -1), (+1, -1)):
fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s'
msg = fmt % (op.__name__, dt1, dt2, sg1, sg2)
a = np.array(sg1*78*6e-8, dtype=dt1)[()]
b = np.array(sg2*6e-8, dtype=dt2)[()]
div, rem = op(a, b)
# Equal assertion should hold when fmod is used
assert_equal(div*b + rem, a, err_msg=msg)
if sg2 == -1:
assert_(b < rem <= 0, msg)
else:
assert_(b > rem >= 0, msg)
def test_float_modulus_corner_cases(self):
# Check remainder magnitude.
for dt in np.typecodes['Float']:
b = np.array(1.0, dtype=dt)
a = np.nextafter(np.array(0.0, dtype=dt), -b)
rem = operator.mod(a, b)
assert_(rem <= b, 'dt: %s' % dt)
rem = operator.mod(-a, -b)
assert_(rem >= -b, 'dt: %s' % dt)
# Check nans, inf
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "invalid value encountered in remainder")
sup.filter(RuntimeWarning, "divide by zero encountered in remainder")
sup.filter(RuntimeWarning, "divide by zero encountered in floor_divide")
sup.filter(RuntimeWarning, "divide by zero encountered in divmod")
sup.filter(RuntimeWarning, "invalid value encountered in divmod")
for dt in np.typecodes['Float']:
fone = np.array(1.0, dtype=dt)
fzer = np.array(0.0, dtype=dt)
finf =
|
np.array(np.inf, dtype=dt)
|
numpy.array
|
"""
Example of gradcam with guided backprop on VGG16 with demo image of cat.
https://morioh.com/p/64064daff26c
"""
import cv2
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
IMAGE_PATH = '/home/jlamstein/PycharmProjects/GEDI-ORDER/examples/cat.3.jpg'
LAYER_NAME = 'block5_conv3'
CAT_CLASS_INDEX = 281
img = tf.keras.preprocessing.image.load_img(IMAGE_PATH, target_size=(224, 224))
plt.figure()
plt.imshow(img)
img = tf.keras.preprocessing.image.img_to_array(img)
# Load initial model
model = tf.keras.applications.vgg16.VGG16(weights='imagenet', include_top=True)
# Create a graph that outputs target convolution and output
grad_model = tf.keras.models.Model(model.inputs, [model.get_layer(LAYER_NAME).output, model.output])
print(model.get_layer(LAYER_NAME).output)
# Get the score for target class
with tf.GradientTape() as tape:
conv_outputs, predictions = grad_model(np.array([img]))
loss = predictions[:, CAT_CLASS_INDEX]
# Extract filters and gradients
output = conv_outputs[0]
grads = tape.gradient(loss, conv_outputs)[0]
# Apply guided backpropagation
gate_f = tf.cast(output > 0, 'float32')
gate_r = tf.cast(grads > 0, 'float32')
guided_grads = gate_f * gate_r * grads
# Average gradients spatially
weights = tf.reduce_mean(guided_grads, axis=(0, 1))
# Build a ponderated map of filters according to gradients importance
cam =
|
np.ones(output.shape[0:2], dtype=np.float32)
|
numpy.ones
|
from PIL import Image
import numpy as np
import os
import torch
import torch.nn as nn
import torch.utils.data as utils
import torch.nn.functional as F
import higher
import pickle
from torch.utils.data import Dataset, DataLoader, Subset
from torchvision import datasets, transforms
from torch.autograd import grad
from tqdm import tqdm
from simclr_models import *
from simclr_datasets import *
from nt_xent import NTXentLoss
from torch.backends import cudnn
cudnn.deterministic = True
cudnn.benchmark = False
import argparse
parser = argparse.ArgumentParser(description='Eval SIMCLR ECG')
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--runseed', type=int, default=0)
parser.add_argument('--gpu', type=int, default=0)
parser.add_argument('--ex', type=int, default=500, help='num data points')
parser.add_argument('--finetune_lr', type=float, default=1e-3)
parser.add_argument('--epochs', default=200, type=int)
parser.add_argument('--studentarch', type=str, default='resnet18')
parser.add_argument('--dataset', type=str, default='ecg')
parser.add_argument('--batch_size', type=int, default=64)
parser.add_argument('--savefol', type=str, default='simclr-ecg-eval')
parser.add_argument('--transfer_eval', action='store_true')
parser.add_argument('--checkpoint', type=str)
args = parser.parse_args()
torch.manual_seed(args.runseed)
torch.multiprocessing.set_sharing_strategy('file_system')
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu)
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
if args.transfer_eval:
args.savefol += f'-transfereval-{args.ex}ex'
else:
args.savefol += f'-lineval-{args.ex}ex'
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def model_saver(epoch, student, head, teacher, pt_opt, pt_sched, ft_opt, hyp_opt, path):
torch.save({
'student_sd': student.state_dict(),
'teacher_sd': teacher.state_dict() if teacher is not None else None,
'head_sd': head.state_dict(),
'ft_opt_state_dict': ft_opt.state_dict(),
}, path + f'/checkpoint_epoch{epoch}.pt')
def get_save_path():
modfol = f"""seed{args.seed}-runseed{args.runseed}-student{args.studentarch}-ftlr{args.finetune_lr}-epochs{args.epochs}-ckpt{args.checkpoint}"""
pth = os.path.join(args.savefol, modfol)
os.makedirs(pth, exist_ok=True)
return pth
def get_loss(student,head,teacher, x, y):
head_op = head(student.logits(x))
l_obj = nn.BCEWithLogitsLoss()
clf_loss = l_obj(head_op, y)
y_loss_stud = clf_loss
acc_stud = 0 #torch.mean(torch.sigmoid(head_op) > 0.5 * y).item()
return y_loss_stud, acc_stud
# Utility function to update lossdict
def update_lossdict(lossdict, update, action='append'):
for k in update.keys():
if action == 'append':
if k in lossdict:
lossdict[k].append(update[k])
else:
lossdict[k] = [update[k]]
elif action == 'sum':
if k in lossdict:
lossdict[k] += update[k]
else:
lossdict[k] = update[k]
else:
raise NotImplementedError
return lossdict
from sklearn.metrics import roc_auc_score
# Evaluate student on complete train/test set.
def eval_student(student, head, dl):
student.eval()
net_loss = 0
correct = 0
y_pred = []
y_true = []
l_obj = nn.BCEWithLogitsLoss(reduction='sum')
# clf_loss = l_obj(head_op, y)
with torch.no_grad():
for data, target in dl:
y_true.append(target.detach().cpu().numpy())
data, target = data.to(device), target.to(device)
output = head(student.logits(data))
net_loss += l_obj(output, target).item() # sum up batch loss
y_pred.append(output.detach().cpu().numpy())
# pred = torch.sigmoid(output) > 0.5
# correct += torch.sum(pred == target).item()
y_pred = np.concatenate(y_pred, axis=0)
y_true =
|
np.concatenate(y_true, axis=0)
|
numpy.concatenate
|
from classes.cell_trace_config import cell_trace_config_filter, get_event_start_rows
import logging
import matplotlib.pyplot as plt
from matplotlib import ticker
from functions import merge_dataframe_list
# Plotting - single sample for whole time
def layout_plot(plot, tick_spacing=100, fov=(0, 2400, 0, 1.2), legend=False):
# Set fine x-axis scale
plot.xaxis.set_major_locator(ticker.MultipleLocator(tick_spacing))
# Set x and y limits and legend (default = False)
plot.axis(fov)
plot.legend().set_visible(legend)
def aligned_layout_plot(
plot, tick_spacing=0.5, fov=(-20, 50, -0.05, 1.9), legend=False
):
# Set fine x-axis scale
plot.xaxis.set_major_locator(ticker.MultipleLocator(tick_spacing))
# Set x and y limits and legend (default = False)
plot.axis(fov)
plot.legend().set_visible(legend)
# plots single event
def plot_single_event(filtered_data, sample_id, cell_trace_config, event_df):
# TODO: Handle plotting multiple celltypes/filterpatterns with legend
cell_subset_df = filtered_data.get(sample_id)
# Single sample analysis
# For single sample over the whole experimental time
# Calculate min, max, avg, stddev, sem from cell_subset_df (defined earlier)
cell_subset_df.index = cell_subset_df.time
del cell_subset_df["time"] # delete time_column
if len(cell_subset_df.columns) == 0:
logging.warning("Sample {} has no data to plot".format(sample_id))
return None
cell_subset_df.index.name = None # delete index name
event_avg_df = cell_subset_df.mean(axis=1)
event_min_df = cell_subset_df.min(axis=1)
event_max_df = cell_subset_df.max(axis=1)
# Standard deviation (distribution)
event_std_df = cell_subset_df.std(axis=1)
# standard error of mean
event_sem_df = cell_subset_df.sem(axis=1)
# print(ctc.sample_id, cell_avg_df)
fig = plt.figure()
fig.suptitle(sample_id)
fig.set_facecolor("white")
# Plot all cells from cell_subset_df over entire time (specified in Cell_Trace_Config). <For a single sample>
sub1 = fig.add_subplot(211) # 211
cell_subset_df.plot(ax=sub1)
cell_trace_config.add_event_time_points_to_plot(event_df, sub1)
layout_plot(sub1)
# Avg, min, max, std-dev for multiple cells in single sample over whole time.
# All cells are averaged. For cell and filterpattern see below.
sub2 = fig.add_subplot(212) # 212
event_avg_df.plot(
ax=sub2, color="g", label=cell_trace_config.cell_type, linewidth=1
)
cell_trace_config.add_event_time_points_to_plot(event_df, sub2)
# cell_min_df.plot(ax=sub2, color = 'r', linewidth=1, alpha = 0.5)
# cell_max_df.plot(ax=sub2, color = 'r', linewidth=1, alpha = 0.5)
event_avg_df.plot.line(yerr=event_std_df, ax=sub2, color="r", alpha=0.1)
# cell_avg_df.plot.line(yerr=cell_sem_df, ax=sub2, color = 'c', alpha = 0.1)
layout_plot(sub2)
plt.show()
def plot_all_events(sample_data, cell_trace_configs):
cell_trace_config_id_map = {ctc.sample_id: ctc for ctc in cell_trace_configs}
filtered_data = cell_trace_config_filter(sample_data, cell_trace_configs)
event_start_dfs = get_event_start_rows(sample_data, cell_trace_configs)
for sample_id in filtered_data:
plot_single_event(
filtered_data,
sample_id,
cell_trace_config_id_map[sample_id],
event_start_dfs[sample_id],
)
def plot_windowed_events(df):
if len(df) == 0:
logging.warning("No data to plot!")
return
# For single or multiple sample, aligned for certain event (no transitions taken into account).
# Interpolated data used!
# Averaged all events and all cells pro timepoint
# Average for specific cell type filter-pattern (see below)
all_cell_avg_df = df.mean(axis=1)
all_cell_min_df = df.min(axis=1)
all_cell_max_df = df.max(axis=1)
all_cell_std_df = df.std(axis=1)
all_cell_sem_df = df.sem(axis=1)
logging.info("Ignores filterpattern in statistic calculations")
fig = plt.figure()
# Plot all cells from all_df, aligned at zero for event_start, specified in Cell_Trace_Config.
sub1 = fig.add_subplot(211)
df.plot(ax=sub1, marker="*", label="A00C")
aligned_layout_plot(sub1)
sub2 = fig.add_subplot(212)
all_cell_avg_df.plot(
ax=sub2, color="k", label="A00C"
) # use interpolated df to calculate average...
# all_cell_min_df.plot(ax=sub2, color = 'r', linewidth=1, alpha = 0.5)
# all_cell_max_df.plot(ax=sub2, color = 'r', linewidth=1, alpha = 0.5)
all_cell_avg_df.plot.line(yerr=all_cell_std_df, ax=sub2, color="r", alpha=0.1)
# all_cell_avg_df.plot.line(yerr=all_cell_sem_df, ax=sub2, color = 'c', alpha = 0.1)
aligned_layout_plot(sub2)
plt.show()
from logging_functions import log_overlapping_transition
import numpy as np
def plot_transition_gaps_hist(found_transitions):
# get event diffs
gap_Ptrans = []
for sample in found_transitions:
for found_transition in sample:
gap_Ptrans.append(
(found_transition["second_event_start"])
- (found_transition["first_event_end"])
)
log_overlapping_transition(found_transition, seconds_overlap=0)
# print(gap_Ptrans)
avg_duration = np.mean(gap_Ptrans)
max_duration =
|
np.max(gap_Ptrans)
|
numpy.max
|
import numpy as np
import glob
import os
import urllib.request as req
from keras.models import Model
from keras.layers import Dense, Dropout
from keras.applications.mobilenet import MobileNet
from keras.applications.mobilenet import preprocess_input
from keras.applications.inception_resnet_v2 import InceptionResNetV2
# from keras.applications.inception_resnet_v2 import preprocess_input
from utils.nasnet import NASNetMobile
from utils.nasnet import preprocess_input as preprocess_input2
from keras.preprocessing.image import load_img, img_to_array
import tensorflow as tf
# import IPython
# clear_output = IPython.core.display.clear_output
from utils.score_utils import mean_score, std_score
weigths_path = "./weights/"
def inference_batchwise(model,batch):
x = preprocess_input2(batch)
scores = model.predict(x, batch_size=len(batch), verbose=0)
return scores
def evaluate(model,imgs,batch_size=32):
target_size = (224, 224)
with tf.device('/CPU:0'):
total_imgs = len(imgs)
score_list = []
batches = int(
|
np.ceil(total_imgs / batch_size)
|
numpy.ceil
|
"""
Author: <NAME>
Last modfied: 10/31/2020
Description:
This module consists of simulations of the spread of multiple
contigions on a single network under the threshold model.
"""
#--------------------------- Imports ------------------------------#
import numpy as np
import mms.utility as mu
from scipy import sparse
#----------------------- Funciton Defintions ----------------------#
def isolate_threshold_count(A, B, T, k, r = 0):
"""
Description
-----------
This function simulate the spread of multiple contigions on a single network
where each contagion has 2 states (0 or 1).
Contagions are not interrelated.
Parameters
----------
A: scipy array, int {0, 1}
The adjacency matrix of G.
A is sparse
B: scipy array, int {0, 1}
The initial configuration matrix where $B_{vj}$ is the state value of
vertex v for contagion j.
B is sparse
T: numpy array, int
The threshold matrix where $T_{vj}$ is the threshold of vertex v for
contagion j.
k: int
The number of system iterations
r: float, optional
The recovery probability. In each iteration, each vertex has a probability
r changing the state to 0 for each contigion.
Returns
-------
B: numpy array
The final configuration
"""
# Make all 1s along the diagonal of A (since we are considering the closed neighborhood)
#np.fill_diagonal(A, 1)
# The recovery probability
recovery = False
if r != 0:
recovery = True
# The main loop
for i in range(k):
# matrix operation
B_last = B
B = A @ B - T #B = np.matmul(A, B_last) - T
# update states
B[B >= 0] = 1
B[B < 0] = 0
# If a recovery probability is set
if recovery:
B[np.random.rand(*B.shape) < r] = 0
# if fixed point
if np.array_equal(B, B_last):
print("A fixed point is reached at iteration {}".format(i))
return B
print("Max number of iteratios reached")
return B
###########################################################################################
def correlate_threshold_weight(A, B, T, W, k, r = 0):
"""
Description
-----------
This function simulate the spread of multiple contigions on a single network
where each contagion has 2 states (0 or 1).
Contagions are interrelated as described by the thrid model.
Parameters
----------
A: numpy array, int {0, 1}
The adjacency matrix of G.
A is sparse
B: numpy array, int {0, 1}
The initial configuration matrix where $B_{vj}$ is the state value of
vertex v for contagion j.
B is sparse
T: numpy array, int
The threshold matrix where $T_{vj}$ is the threshold of vertex v for
contagion j.
W: numpy array, float [0, 1]
The weight matrix where $W_{ij}$ is the weight of contagion j w.r.t
contagion i
k: int
The number of system iterations
r: float, optional
The recovery probability. In each iteration, each vertex has a probability
r changing the state to 0 for each contigion.
Returns
-------
B: numpy array
The final configuration
"""
# Make all 1s along the diagonal of A (since we are considering the closed neighborhood)
#A.setdiag(1)
# The recovery probability
recovery = False
if r != 0:
recovery = True
# Take the transpose of the weight matrix
W = np.transpose(W)
# The main loop
for i in range(k):
# matrix operation
B_last = B
#B = np.linalg.multi_dot([A, B_last, W]) - T
B = A @ B_last @ W - T
# update states
B[B >= 0] = 1
B[B < 0] = 0
# If a recovery probability is set
if recovery:
B[np.random.rand(*B.shape) < r] = 0
# if fixed point
if np.array_equal(B, B_last):
print("A fixed point is reached at iteration {}".format(i))
return B
#h = hpy()
#print(h.heap())
print("Max number of iteratios reached")
return B
def correlate_threshold_density(A, B, T, d, k):
"""
Description
-----------
This function simulate the spread of multiple contigions on a single network
where each contagion has 2 states (0 or 1).
Contagions interrelated as described by the second model.
Parameters
----------
A: numpy array, int {0, 1}
The adjacency matrix of G.
A is sparse
B: numpy array, int {0, 1}
The initial configuration matrix where $B_{vj}$ is the state value of
vertex v for contagion j.
B is sparse
T: numpy array, int
The threshold matrix where $T_{vj}$ is the threshold of vertex v for
contagion j.
d: numpy array, int
The density vector
k: int
The number of system iterations
Returns
-------
B: numpy array
The final configuration
"""
# Compute the reciprocal
d_bar = np.transpose( np.reciprocal(d.astype(float)) ) # Make sure that d is a column vector
# The number of contagions
c = np.shape(T)[1]
# k * 1 ones
one = np.ones((c, 1), dtype = 'float')
# The main loop
for i in range(k):
B_last = B
# Compute M
M = B @ one @ d_bar #M = np.linalg.multi_dot([B, one, d_bar])
M[M >= 1.0] = 1.0
M[M < 1.0] = 0.0
#B = np.matmul(A, M) - T
B = A @ M - T
# update states
B[B >= 0.0] = 1.0
B[B < 0.0] = 0.0
# if fixed point
if np.array_equal(B, B_last):
print("A fixed point is reached at iteration {}".format(i))
return B
print("Max number of iteratios reached")
return B
def covid_mask(A_1, A_2, D_inverse, a_1, t_2, t_3, b_1, b_2, p, alpha, beta, r, k):
"""
Description
-----------
This funciton simulate the spread of two contagions on two different
networks, where contagions are correlated as described in the project
report.
Parameters
----------
A_1: n x n scipy sparse matrix, int {0, 1}
The adjacency matrix of the social layer
A_2: n x n scipy sparse matrix, int {0, 1}
The adjacency matrix of the disease layer
D_inverse: n x n scipy sparse matrix, float [0, 1]
The inversed diagonal matrix of the social layer.
a_1: n x 1 scipy sparse matrix, int {0, 1}
(a_1)_i = 1 if the person i is prosocial, and (a_1)_i = 0 otherwise.
t_2: n x 1 numpy array, float [0, 1]
(t_2)_i is threshold percentage of neighbors who wear masks for person
i to wear a mask in the next iteration.
t_3: n x 1 numpy array, float [0, 1]
(t_3)_i is the threshold percentage of the overall infection of the population
for person i to wear a mask in the next iteration.
b_1: n x 1 scipy sparse matrix, int {0, 1}
(b_1)_i = 1 if the person i wears a mask at the current iteration.
b_2: n x 1 scipy sparse matrix, int {0, 1}
(b_2)_1 = 1 if the person i is infected by the disease at the current iteration
p: float [0, 1]
Transimission probability of the disease
alpha: The damping factor on p when the person himself wears a mask.
beta: The damping factor on p when a neighbor of a person wears a mask.
r: Recovery probability.
k: The maximum number of time-steps.
"""
# Keep track of the dynamic: {time: [# of masks, # of infections]}
# dynamic = {}
# Compute the degree fraction matrix
F = D_inverse @ A_1
F = sparse.csr_matrix(F)
# The number of vertices
n = np.shape(A_1)[0]
# The one and zero vectors
one = np.ones((n, 1), dtype = 'float')
zero = np.zeros((n, 1), dtype = 'float')
# The recovery vector: b_3
b_3 = np.zeros((n, 1), dtype = 'float') # Initially, no one has recovered.
# The susceptible vector: b_4
b_4 = -b_2 - b_3
b_4[b_4 == 0.0] = 1.0
b_4[b_4 < 0.0] = 0.0
# The largest fraction of infection reached throughout the time
max_frac = 0.0
# The number of days the infection lasts
days = 0
# total number of mask wearings (sum over all days)
total_mask = 0
# mask vector thoughout the time
mask_vector = [] #new
# infection vector
infection_vector = [] #new
# The main loop
for i in range(k):
days += 1
mask_vector.append(float(np.count_nonzero(b_1) / n)) #new
infection_vector.append(float(np.count_nonzero(b_2) / n)) #new
# dynamic[i] = [np.count_nonzero(b_1), np.count_nonzero(b_2), np.count_nonzero(b_3), np.count_nonzero(b_4)]
# need b_1_last to update the state of the second contagion
b_1_last = b_1
b_4_last = b_4
b_2_last = b_2
# The fraction of total number of infections
a_3 = np.count_nonzero(b_2) / float(n)
# Update the max_frac
if a_3 > max_frac:
max_frac = a_3
# determine if the overall faction of infection exceed the threshold
l_3 = -(t_3 - a_3) # Note that I cannot do a_3 - t_3 since a_3 is not a vector
l_3[l_3 >= 0.0] = 1.0
l_3[l_3 < 0.0] = 0.0
# l3 = t_3 <= a_3
# Determine if the fraction of neighbors with wear face masks exceeds a threshold
l_2 = F @ b_1_last - t_2 # sparse?
l_2[l_2 >= 0.0] = 1.0
l_2[l_2 < 0.0] = 0.0
# l_2 = (F @ b_1_last) >= t_2 WORTH TRYING!
# Update the mask state b_1
b_1 = a_1 + l_2 + l_3 # logical operation?
b_1[b_1 >= 1.0] = 1.0 # sparse?
#b_1 = np.logical_or(np.logical_or(a_1, l_2), l_3) WORTH TRYING
total_mask += np.count_nonzero(b_1)
# The # of infected neighbors of each v
d = A_2 @ b_2_last
# The # of infected neighbors with mask
d_2 = A_2 @ np.multiply(b_1_last, b_2_last) # Very important to pass b_1_last here
# The # of infected neighbors without mask
d_1 = d - d_2
# Only susceptibles (b_4) can be infected
#--------------------------------------------------#
# h1 : the probability of not getting infected from neighbors who do not wear masks (1 - p or 1 - alpha p)
temp = one - (b_1 * (1.0 - alpha)) # IMPORTANT: b_1_last vs b_1 (syn vs asyn)
h_1 = one - (temp * p)
# h2: contains the probability of not getting infected from neighbors who wear masks (1 - beta p or 1 - alpha beta p)
h_2 = one - (temp * beta * p)
temp = np.multiply(np.power(h_1, d_1), np.power(h_2, d_2))
q = np.multiply(b_4, one - temp)
#--------------------------------------------------#
# Has to flatten q to pass it to the binomial funciton
q_f = q.flatten()
# Compute newly infected nodes
newly_infected = np.reshape(np.random.binomial(1, q_f), (-1 ,1))
# Computer R0 (do this before recovery)
# R_0 = np.count_nonzero(newly_infected) / np.count_nonzero(b_2)
# Recovery
rr = np.random.choice([0, 1], size = (n, 1), p=[1.0 - r, r])
b_3 = np.logical_and(b_2, rr) + b_3 # update b_3
b_2 = b_2 - rr
b_2[b_2 == -1] = 0.0
# Update b_2
b_2 = newly_infected + b_2
# Update the susceptible vector
b_4 = -b_2 - b_3
b_4[b_4 == 0.0] = 1.0
b_4[b_4 < 0.0] = 0.0
# A fixed point is reached under zero infection
if np.array_equal(b_2, zero):
# print("A fixed point is reached at iteration {}".format(i))
average_mask = float(total_mask / days)
return round(float(np.count_nonzero(b_3) / n), 4), round(max_frac, 4), days, round(float(average_mask / n), 4)
#return infection_vector, mask_vector
average_mask = float(total_mask / days)
return round(float(np.count_nonzero(b_3) / n), 4), round(max_frac, 4), days, round(float(average_mask / n), 4)
#return infection_vector, mask_vector
def covid_mask_sym_fear(A_1, A_2, D_inverse, a_1, t_2, t_3, b_1, b_2, p, alpha, beta, r, k, sym_ratio):
"""
Description
-----------
This funciton simulate the spread of two contagions on two different
networks, where contagions are correlated as described in the project
report.
Parameters
----------
A_1: n x n scipy sparse matrix, int {0, 1}
The adjacency matrix of the social layer
A_2: n x n scipy sparse matrix, int {0, 1}
The adjacency matrix of the disease layer
D_inverse: n x n scipy sparse matrix, float [0, 1]
The inversed diagonal matrix of the social layer.
a_1: n x 1 scipy sparse matrix, int {0, 1}
(a_1)_i = 1 if the person i is prosocial, and (a_1)_i = 0 otherwise.
t_2: n x 1 numpy array, float [0, 1]
(t_2)_i is threshold percentage of neighbors who wear masks for person
i to wear a mask in the next iteration.
t_3: n x 1 numpy array, float [0, 1]
(t_3)_i is the threshold percentage of the overall infection of the population
for person i to wear a mask in the next iteration.
b_1: n x 1 scipy sparse matrix, int {0, 1}
(b_1)_i = 1 if the person i wears a mask at the current iteration.
b_2: n x 1 scipy sparse matrix, int {0, 1}
(b_2)_1 = 1 if the person i is infected by the disease at the current iteration
p: float [0, 1]
Transimission probability of the disease
alpha: The damping factor on p when the person himself wears a mask.
beta: The damping factor on p when a neighbor of a person wears a mask.
r: Recovery probability.
k: The maximum number of time-steps.
"""
# Keep track of the dynamic: {time: [# of masks, # of infections]}
# dynamic = {}
# Compute the degree fraction matrix
F = D_inverse @ A_1
F = sparse.csr_matrix(F)
# The number of vertices
n = np.shape(A_1)[0]
# The one and zero vectors
one = np.ones((n, 1), dtype = 'float')
zero = np.zeros((n, 1), dtype = 'float')
# The recovery vector: b_3
b_3 = np.zeros((n, 1), dtype = 'float') # Initially, no one has recovered.
# The susceptible vector: b_4
b_4 = -b_2 - b_3
b_4[b_4 == 0.0] = 1.0
b_4[b_4 < 0.0] = 0.0
# The largest fraction of infection reached throughout the time
max_frac = 0.0
# The number of days the infection lasts
days = 0
# total number of mask wearings (sum over all days)
total_mask = 0
# mask vector thoughout the time
mask_vector = [] #new
# infection vector
infection_vector = [] #new
# The main loop
for i in range(k):
days += 1
mask_vector.append(float(np.count_nonzero(b_1) / n)) #new
infection_vector.append(float(np.count_nonzero(b_2) / n)) #new
# dynamic[i] = [np.count_nonzero(b_1), np.count_nonzero(b_2), np.count_nonzero(b_3), np.count_nonzero(b_4)]
# need b_1_last to update the state of the second contagion
b_1_last = b_1
b_4_last = b_4
b_2_last = b_2
# The fraction of total number of infections
a_3 = np.count_nonzero(b_2) / float(n)
# Update the max_frac
if a_3 > max_frac:
max_frac = a_3
# determine if the overall faction of infection exceed the threshold
l_3 = -(t_3 - sym_ratio * a_3) # Note that I cannot do a_3 - t_3 since a_3 is not a vector
l_3[l_3 >= 0.0] = 1.0
l_3[l_3 < 0.0] = 0.0
# l3 = t_3 <= a_3
# Determine if the fraction of neighbors with wear face masks exceeds a threshold
l_2 = F @ b_1_last - t_2 # sparse?
l_2[l_2 >= 0.0] = 1.0
l_2[l_2 < 0.0] = 0.0
# l_2 = (F @ b_1_last) >= t_2 WORTH TRYING!
# Update the mask state b_1
b_1 = a_1 + l_2 + l_3 # logical operation?
b_1[b_1 >= 1.0] = 1.0 # sparse?
#b_1 = np.logical_or(np.logical_or(a_1, l_2), l_3) WORTH TRYING
total_mask += np.count_nonzero(b_1)
# The # of infected neighbors of each v
d = A_2 @ b_2_last
# The # of infected neighbors with mask
d_2 = A_2 @ np.multiply(b_1_last, b_2_last) # Very important to pass b_1_last here
# The # of infected neighbors without mask
d_1 = d - d_2
# Only susceptibles (b_4) can be infected
#--------------------------------------------------#
# h1 : the probability of not getting infected from neighbors who do not wear masks (1 - p or 1 - alpha p)
temp = one - (b_1 * (1.0 - alpha)) # IMPORTANT: b_1_last vs b_1 (syn vs asyn)
h_1 = one - (temp * p)
# h2: contains the probability of not getting infected from neighbors who wear masks (1 - beta p or 1 - alpha beta p)
h_2 = one - (temp * beta * p)
temp = np.multiply(np.power(h_1, d_1), np.power(h_2, d_2))
q = np.multiply(b_4, one - temp)
#--------------------------------------------------#
# Has to flatten q to pass it to the binomial funciton
q_f = q.flatten()
# Compute newly infected nodes
newly_infected = np.reshape(np.random.binomial(1, q_f), (-1 ,1))
# Computer R0 (do this before recovery)
# R_0 = np.count_nonzero(newly_infected) / np.count_nonzero(b_2)
# Recovery
rr = np.random.choice([0, 1], size = (n, 1), p=[1.0 - r, r])
b_3 = np.logical_and(b_2, rr) + b_3 # update b_3
b_2 = b_2 - rr
b_2[b_2 == -1] = 0.0
# Update b_2
b_2 = newly_infected + b_2
# Update the susceptible vector
b_4 = -b_2 - b_3
b_4[b_4 == 0.0] = 1.0
b_4[b_4 < 0.0] = 0.0
# A fixed point is reached under zero infection
if np.array_equal(b_2, zero):
# print("A fixed point is reached at iteration {}".format(i))
average_mask = float(total_mask / days)
return round(float(np.count_nonzero(b_3) / n), 4), round(max_frac, 4), days, round(float(average_mask / n), 4)
#return infection_vector, mask_vector
average_mask = float(total_mask / days)
return round(float(np.count_nonzero(b_3) / n), 4), round(max_frac, 4), days, round(float(average_mask / n), 4)
#return infection_vector, mask_vector
def covid_mask_peak_diff(A_1, A_2, D_inverse, a_1, t_2, t_3, b_1, b_2, p, alpha, beta, r, k):
"""
Description
-----------
This funciton simulate the spread of two contagions on two different
networks, where contagions are correlated as described in the project
report.
Parameters
----------
A_1: n x n scipy sparse matrix, int {0, 1}
The adjacency matrix of the social layer
A_2: n x n scipy sparse matrix, int {0, 1}
The adjacency matrix of the disease layer
D_inverse: n x n scipy sparse matrix, float [0, 1]
The inversed diagonal matrix of the social layer.
a_1: n x 1 scipy sparse matrix, int {0, 1}
(a_1)_i = 1 if the person i is prosocial, and (a_1)_i = 0 otherwise.
t_2: n x 1 numpy array, float [0, 1]
(t_2)_i is threshold percentage of neighbors who wear masks for person
i to wear a mask in the next iteration.
t_3: n x 1 numpy array, float [0, 1]
(t_3)_i is the threshold percentage of the overall infection of the population
for person i to wear a mask in the next iteration.
b_1: n x 1 scipy sparse matrix, int {0, 1}
(b_1)_i = 1 if the person i wears a mask at the current iteration.
b_2: n x 1 scipy sparse matrix, int {0, 1}
(b_2)_1 = 1 if the person i is infected by the disease at the current iteration
p: float [0, 1]
Transimission probability of the disease
alpha: The damping factor on p when the person himself wears a mask.
beta: The damping factor on p when a neighbor of a person wears a mask.
r: Recovery probability.
k: The maximum number of time-steps.
"""
# Keep track of the dynamic: {time: [# of masks, # of infections]}
# dynamic = {}
# Compute the degree fraction matrix
F = D_inverse @ A_1
F = sparse.csr_matrix(F)
# The number of vertices
n = np.shape(A_1)[0]
# The one and zero vectors
one = np.ones((n, 1), dtype = 'float')
zero = np.zeros((n, 1), dtype = 'float')
# The recovery vector: b_3
b_3 = np.zeros((n, 1), dtype = 'float') # Initially, no one has recovered.
# The susceptible vector: b_4
b_4 = -b_2 - b_3
b_4[b_4 == 0.0] = 1.0
b_4[b_4 < 0.0] = 0.0
# The largest fraction of infection reached throughout the time
max_frac_1 = 0.0
# The second largest fraction of infection reached throughout the time
max_frac_2 = 0.0
# The time where the largest infection occurs
peak_time_1 = 0
# The time where the second largest infection occurs
peak_time_2 = 0
# The number of days the infection lasts
days = 0
# total number of mask wearings (sum over all days)
total_mask = 0
# mask vector thoughout the time
mask_vector = [] #new
# infection vector
infection_vector = [] #new
# The main loop
for i in range(k):
days += 1
mask_vector.append(float(np.count_nonzero(b_1) / n)) #new
infection_vector.append(float(np.count_nonzero(b_2) / n)) #new
# dynamic[i] = [np.count_nonzero(b_1), np.count_nonzero(b_2), np.count_nonzero(b_3), np.count_nonzero(b_4)]
# need b_1_last to update the state of the second contagion
b_1_last = b_1
b_4_last = b_4
b_2_last = b_2
# The fraction of total number of infections
a_3 = np.count_nonzero(b_2) / float(n)
# Update the max_frac
if a_3 > max_frac_1:
max_frac_2 = max_frac_1
peak_time_2 = peak_time_1
max_frac_1 = a_3
peak_time_1 = i
# determine if the overall faction of infection exceed the threshold
l_3 = -(t_3 - a_3) # Note that I cannot do a_3 - t_3 since a_3 is not a vector
l_3[l_3 >= 0.0] = 1.0
l_3[l_3 < 0.0] = 0.0
# l3 = t_3 <= a_3
# Determine if the fraction of neighbors with wear face masks exceeds a threshold
l_2 = F @ b_1_last - t_2 # sparse?
l_2[l_2 >= 0.0] = 1.0
l_2[l_2 < 0.0] = 0.0
# l_2 = (F @ b_1_last) >= t_2 WORTH TRYING!
# Update the mask state b_1
b_1 = a_1 + l_2 + l_3 # logical operation?
b_1[b_1 >= 1.0] = 1.0 # sparse?
#b_1 = np.logical_or(np.logical_or(a_1, l_2), l_3) WORTH TRYING
total_mask += np.count_nonzero(b_1)
# The # of infected neighbors of each v
d = A_2 @ b_2_last
# The # of infected neighbors with mask
d_2 = A_2 @ np.multiply(b_1_last, b_2_last) # Very important to pass b_1_last here
# The # of infected neighbors without mask
d_1 = d - d_2
# Only susceptibles (b_4) can be infected
#--------------------------------------------------#
# h1 : the probability of not getting infected from neighbors who do not wear masks (1 - p or 1 - alpha p)
temp = one - (b_1 * (1.0 - alpha)) # IMPORTANT: b_1_last vs b_1 (syn vs asyn)
h_1 = one - (temp * p)
# h2: contains the probability of not getting infected from neighbors who wear masks (1 - beta p or 1 - alpha beta p)
h_2 = one - (temp * beta * p)
temp = np.multiply(np.power(h_1, d_1), np.power(h_2, d_2))
q = np.multiply(b_4, one - temp)
#--------------------------------------------------#
# Has to flatten q to pass it to the binomial funciton
q_f = q.flatten()
# Compute newly infected nodes
newly_infected = np.reshape(np.random.binomial(1, q_f), (-1 ,1))
# Computer R0 (do this before recovery)
# R_0 = np.count_nonzero(newly_infected) / np.count_nonzero(b_2)
# Recovery
rr = np.random.choice([0, 1], size = (n, 1), p=[1.0 - r, r])
b_3 = np.logical_and(b_2, rr) + b_3 # update b_3
b_2 = b_2 - rr
b_2[b_2 == -1] = 0.0
# Update b_2
b_2 = newly_infected + b_2
# Update the susceptible vector
b_4 = -b_2 - b_3
b_4[b_4 == 0.0] = 1.0
b_4[b_4 < 0.0] = 0.0
# A fixed point is reached under zero infection
if np.array_equal(b_2, zero):
return peak_time
return peak_time
def covid_mask_control(A_1, A_2, D_inverse, a_1, t_2, t_3, b_1, b_2, p, alpha, beta, r, k):
"""
Description
-----------
This funciton simulate the spread of two contagions on two different
networks, where contagions are correlated as described in the project
report.
Parameters
----------
A_1: n x n scipy sparse matrix, int {0, 1}
The adjacency matrix of the social layer
A_2: n x n scipy sparse matrix, int {0, 1}
The adjacency matrix of the disease layer
D_inverse: n x n scipy sparse matrix, float [0, 1]
The inversed diagonal matrix of the social layer.
a_1: n x 1 scipy sparse matrix, int {0, 1}
(a_1)_i = 1 if the person i is prosocial, and (a_1)_i = 0 otherwise.
t_2: n x 1 numpy array, float [0, 1]
(t_2)_i is threshold percentage of neighbors who wear masks for person
i to wear a mask in the next iteration.
t_3: n x 1 numpy array, float [0, 1]
(t_3)_i is the threshold percentage of the overall infection of the population
for person i to wear a mask in the next iteration.
b_1: n x 1 scipy sparse matrix, int {0, 1}
(b_1)_i = 1 if the person i wears a mask at the current iteration.
b_2: n x 1 scipy sparse matrix, int {0, 1}
(b_2)_1 = 1 if the person i is infected by the disease at the current iteration
p: float [0, 1]
Transimission probability of the disease
alpha: The damping factor on p when the person himself wears a mask.
beta: The damping factor on p when a neighbor of a person wears a mask.
r: Recovery probability.
k: The maximum number of time-steps.
"""
# Keep track of the dynamic: {time: [# of masks, # of infections]}
# dynamic = {}
# Compute the degree fraction matrix
F = D_inverse @ A_1
F = sparse.csr_matrix(F)
# The number of vertices
n = np.shape(A_1)[0]
# The one and zero vectors
one = np.ones((n, 1), dtype = 'float')
zero = np.zeros((n, 1), dtype = 'float')
# The recovery vector: b_3
b_3 = np.zeros((n, 1), dtype = 'float') # Initially, no one has recovered.
# The susceptible vector: b_4
b_4 = -b_2 - b_3
b_4[b_4 == 0.0] = 1.0
b_4[b_4 < 0.0] = 0.0
# The largest fraction of infection reached throughout the time
max_frac = 0.0
# The number of days the infection lasts
days = 0
# total number of mask wearings (sum over all days)
total_mask = 0
# mask vector thoughout the time
mask_vector = [] #new
# infection vector
infection_vector = [] #new
# The main loop
for i in range(k):
days += 1
mask_vector.append(float(np.count_nonzero(b_1) / n)) #new
infection_vector.append(float(np.count_nonzero(b_2) / n)) #new
# dynamic[i] = [np.count_nonzero(b_1), np.count_nonzero(b_2), np.count_nonzero(b_3), np.count_nonzero(b_4)]
# need b_1_last to update the state of the second contagion
b_1_last = b_1
b_4_last = b_4
b_2_last = b_2
# The fraction of total number of infections
a_3 = np.count_nonzero(b_2) / float(n)
# Update the max_frac
if a_3 > max_frac:
max_frac = a_3
# determine if the overall faction of infection exceed the threshold
l_3 = -(t_3 - a_3) # Note that I cannot do a_3 - t_3 since a_3 is not a vector
l_3[l_3 >= 0.0] = 1.0
l_3[l_3 < 0.0] = 0.0
# l3 = t_3 <= a_3
# Determine if the fraction of neighbors with wear face masks exceeds a threshold
l_2 = F @ b_1_last - t_2 # sparse?
l_2[l_2 >= 0.0] = 1.0
l_2[l_2 < 0.0] = 0.0
# l_2 = (F @ b_1_last) >= t_2 WORTH TRYING!
# Update the mask state b_1
b_1 = a_1 + l_2 + l_3 # logical operation?
b_1[b_1 >= 1.0] = 1.0 # sparse?
#b_1 = np.logical_or(np.logical_or(a_1, l_2), l_3) WORTH TRYING
total_mask += np.count_nonzero(b_1)
# The # of infected neighbors of each v
d = A_2 @ b_2_last
# The # of infected neighbors with mask
d_2 = A_2 @ np.multiply(b_1_last, b_2_last) # Very important to pass b_1_last here
# The # of infected neighbors without mask
d_1 = d - d_2
# Only susceptibles (b_4) can be infected
#--------------------------------------------------#
# h1 : the probability of not getting infected from neighbors who do not wear masks (1 - p or 1 - alpha p)
temp = one - (b_1 * (1.0 - alpha)) # IMPORTANT: b_1_last vs b_1 (syn vs asyn)
h_1 = one - (temp * p)
# h2: contains the probability of not getting infected from neighbors who wear masks (1 - beta p or 1 - alpha beta p)
h_2 = one - (temp * beta * p)
temp = np.multiply(np.power(h_1, d_1), np.power(h_2, d_2))
q = np.multiply(b_4, one - temp)
#--------------------------------------------------#
# Has to flatten q to pass it to the binomial funciton
q_f = q.flatten()
# Compute newly infected nodes
newly_infected = np.reshape(np.random.binomial(1, q_f), (-1 ,1))
# Computer R0 (do this before recovery)
# R_0 = np.count_nonzero(newly_infected) / np.count_nonzero(b_2)
# Recovery
rr = np.random.choice([0, 1], size = (n, 1), p=[1.0 - r, r])
b_3 = np.logical_and(b_2, rr) + b_3 # update b_3
b_2 = b_2 - rr
b_2[b_2 == -1] = 0.0
# Update b_2
b_2 = newly_infected + b_2
# Update the susceptible vector
b_4 = -b_2 - b_3
b_4[b_4 == 0.0] = 1.0
b_4[b_4 < 0.0] = 0.0
# The control happens here
if float(np.count_nonzero(b_2) / n) < float(np.count_nonzero(b_2_last) / n):
a_1 = b_1 # Mask wearing people continue to wear masks
# A fixed point is reached under zero infection
if np.array_equal(b_2, zero):
# print("A fixed point is reached at iteration {}".format(i))
average_mask = float(total_mask / days)
# return round(float(np.count_nonzero(b_3) / n), 4), round(max_frac, 4), days, round(float(average_mask / n), 4)
return infection_vector, mask_vector
average_mask = float(total_mask / days)
#return round(float(np.count_nonzero(b_3) / n), 4), round(max_frac, 4), days, round(float(average_mask / n), 4)
return infection_vector, mask_vector
def covid_mask_prob_social(A_1, A_2, D_inverse, a_1, t_2, t_3, b_1, b_2, p, alpha, beta, r, k, g_peer, g_fear):
"""
Description
-----------
This funciton simulate the spread of two contagions on two different
networks, where contagions are correlated as described in the project
report.
Parameters
----------
A_1: n x n scipy sparse matrix, int {0, 1}
The adjacency matrix of the social layer
A_2: n x n scipy sparse matrix, int {0, 1}
The adjacency matrix of the disease layer
D_inverse: n x n scipy sparse matrix, float [0, 1]
The inversed diagonal matrix of the social layer.
a_1: n x 1 scipy sparse matrix, int {0, 1}
(a_1)_i = 1 if the person i is prosocial, and (a_1)_i = 0 otherwise.
t_2: n x 1 numpy array, float [0, 1]
(t_2)_i is threshold percentage of neighbors who wear masks for person
i to wear a mask in the next iteration.
t_3: n x 1 numpy array, float [0, 1]
(t_3)_i is the threshold percentage of the overall infection of the population
for person i to wear a mask in the next iteration.
b_1: n x 1 scipy sparse matrix, int {0, 1}
(b_1)_i = 1 if the person i wears a mask at the current iteration.
b_2: n x 1 scipy sparse matrix, int {0, 1}
(b_2)_1 = 1 if the person i is infected by the disease at the current iteration
p: float [0, 1]
Transimission probability of the disease
alpha: The damping factor on p when the person himself wears a mask.
beta: The damping factor on p when a neighbor of a person wears a mask.
r: Recovery probability.
k: The maximum number of time-steps.
"""
# Keep track of the dynamic: {time: [# of masks, # of infections]}
# dynamic = {}
# Growth rate of the logistic function
g_2 = g_peer
g_3 = g_fear
# Compute the degree fraction matrix
F = D_inverse @ A_1
F = sparse.csr_matrix(F)
# The number of vertices
n = np.shape(A_1)[0]
# The one and zero vectors
one = np.ones((n, 1), dtype = 'float')
zero = np.zeros((n, 1), dtype = 'float')
# The recovery vector: b_3
b_3 = np.zeros((n, 1), dtype = 'float') # Initially, no one has recovered.
# The susceptible vector: b_4
b_4 = -b_2 - b_3
b_4[b_4 == 0.0] = 1.0
b_4[b_4 < 0.0] = 0.0
# The largest fraction of infection reached throughout the time
max_frac = 0.0
# The number of days the infection lasts
days = 0
# total number of mask wearings (sum over all days)
total_mask = 0
# mask vector thoughout the time
mask_vector = [] #new
# infection vector
infection_vector = [] #new
# The main loop
for i in range(k):
days += 1
mask_vector.append(float(np.count_nonzero(b_1) / n)) #new
infection_vector.append(float(np.count_nonzero(b_2) / n)) #new
# dynamic[i] = [np.count_nonzero(b_1), np.count_nonzero(b_2), np.count_nonzero(b_3), np.count_nonzero(b_4)]
# need b_1_last to update the state of the second contagion
b_1_last = b_1
b_4_last = b_4
b_2_last = b_2
# The fraction of total number of infections
a_3 = np.count_nonzero(b_2) / float(n)
# Update the max_frac
if a_3 > max_frac:
max_frac = a_3
# Fear
# 1 / (1 + e^{-g (a_3 - t_3)})
p_3 = -(t_3 - a_3) # Note that I cannot do a_3 - t_3
p_3 = -g_3 * p_3
p_3 = one + np.exp(p_3)
p_3 = np.reciprocal(p_3)
p_3 = p_3.flatten()
l_3 = np.reshape(np.random.binomial(1, p_3), (-1 ,1))
# Peer pressure
# 1 / (1 + e^{-g (c - t)})
p_2 = F @ b_1_last - t_2
p_2 = -g_2 * p_2
p_2 = one + np.exp(p_2)
p_2 = np.reciprocal(p_2)
p_2 = p_2.flatten()
l_2 = np.reshape(np.random.binomial(1, p_2), (-1 ,1))
# Update the mask state b_1
b_1 = a_1 + l_2 + l_3 # logical operation?
b_1[b_1 >= 1.0] = 1.0 # sparse?
total_mask += np.count_nonzero(b_1)
# The # of infected neighbors of each v
d = A_2 @ b_2_last
# The # of infected neighbors with mask
d_2 = A_2 @ np.multiply(b_1_last, b_2_last) # Very important to pass b_1_last here
# The # of infected neighbors without mask
d_1 = d - d_2
# Only susceptibles (b_4) can be infected
#--------------------------------------------------#
# h1 : the probability of not getting infected from neighbors who do not wear masks (1 - p or 1 - alpha p)
temp = one - (b_1 * (1.0 - alpha)) # IMPORTANT: b_1_last vs b_1 (syn vs asyn)
h_1 = one - (temp * p)
# h2: contains the probability of not getting infected from neighbors who wear masks (1 - beta p or 1 - alpha beta p)
h_2 = one - (temp * beta * p)
temp = np.multiply(np.power(h_1, d_1), np.power(h_2, d_2))
q = np.multiply(b_4, one - temp)
#--------------------------------------------------#
# Has to flatten q to pass it to the binomial funciton
q_f = q.flatten()
# Compute newly infected nodes
newly_infected = np.reshape(np.random.binomial(1, q_f), (-1 ,1))
# Computer R0 (do this before recovery)
# R_0 = np.count_nonzero(newly_infected) / np.count_nonzero(b_2)
# Recovery
rr = np.random.choice([0, 1], size = (n, 1), p=[1.0 - r, r])
b_3 = np.logical_and(b_2, rr) + b_3 # update b_3
b_2 = b_2 - rr
b_2[b_2 == -1] = 0.0
# Update b_2
b_2 = newly_infected + b_2
# Update the susceptible vector
b_4 = -b_2 - b_3
b_4[b_4 == 0.0] = 1.0
b_4[b_4 < 0.0] = 0.0
# A fixed point is reached under zero infection
if np.array_equal(b_2, zero):
# print("A fixed point is reached at iteration {}".format(i))
average_mask = float(total_mask / days)
return round(float(np.count_nonzero(b_3) / n), 4), round(max_frac, 4), days, round(float(average_mask / n), 4)
#return infection_vector, mask_vector
return round(float(np.count_nonzero(b_3) / n), 4), round(max_frac, 4), days, round(float(average_mask / n), 4)
#return infection_vector, mask_vector
def covid_mask_habit(A_1, A_2, D_inverse, a_1, t_2, t_3, b_1, b_2, p, alpha, beta, r, k, habit_p):
"""
Description
-----------
This funciton simulate the spread of two contagions on two different
networks, where contagions are correlated as described in the project
report.
Parameters
----------
A_1: n x n scipy sparse matrix, int {0, 1}
The adjacency matrix of the social layer
A_2: n x n scipy sparse matrix, int {0, 1}
The adjacency matrix of the disease layer
D_inverse: n x n scipy sparse matrix, float [0, 1]
The inversed diagonal matrix of the social layer.
a_1: n x 1 scipy sparse matrix, int {0, 1}
(a_1)_i = 1 if the person i is prosocial, and (a_1)_i = 0 otherwise.
t_2: n x 1 numpy array, float [0, 1]
(t_2)_i is threshold percentage of neighbors who wear masks for person
i to wear a mask in the next iteration.
t_3: n x 1 numpy array, float [0, 1]
(t_3)_i is the threshold percentage of the overall infection of the population
for person i to wear a mask in the next iteration.
b_1: n x 1 scipy sparse matrix, int {0, 1}
(b_1)_i = 1 if the person i wears a mask at the current iteration.
b_2: n x 1 scipy sparse matrix, int {0, 1}
(b_2)_1 = 1 if the person i is infected by the disease at the current iteration
p: float [0, 1]
Transimission probability of the disease
alpha: The damping factor on p when the person himself wears a mask.
beta: The damping factor on p when a neighbor of a person wears a mask.
r: Recovery probability.
k: The maximum number of time-steps.
"""
# Keep track of the dynamic: {time: [# of masks, # of infections]}
# dynamic = {}
# Compute the degree fraction matrix
F = D_inverse @ A_1
F = sparse.csr_matrix(F)
# The number of vertices
n = np.shape(A_1)[0]
# The one and zero vectors
one = np.ones((n, 1), dtype = 'float')
zero = np.zeros((n, 1), dtype = 'float')
# The recovery vector: b_3
b_3 = np.zeros((n, 1), dtype = 'float') # Initially, no one has recovered.
# The susceptible vector: b_4
b_4 = -b_2 - b_3
b_4[b_4 == 0.0] = 1.0
b_4[b_4 < 0.0] = 0.0
# The largest fraction of infection reached throughout the time
max_frac = 0.0
# The number of days the infection lasts
days = 0
# total number of mask wearings (sum over all days)
total_mask = 0
# mask vector thoughout the time
mask_vector = [] #new
# infection vector
infection_vector = [] #new
# The main loop
for i in range(k):
days += 1
mask_vector.append(float(np.count_nonzero(b_1) / n)) #new
infection_vector.append(float(np.count_nonzero(b_2) / n)) #new
# dynamic[i] = [np.count_nonzero(b_1), np.count_nonzero(b_2), np.count_nonzero(b_3), np.count_nonzero(b_4)]
# need b_1_last to update the state of the second contagion
b_1_last = b_1
b_4_last = b_4
b_2_last = b_2
# Habit formation step
l_habit = np.random.choice([0, 1], size = (n, 1), p=[1.0 - habit_p, habit_p])
l_habit = np.multiply(l_habit, b_1_last)
# The fraction of total number of infections
a_3 = np.count_nonzero(b_2) / float(n)
# Update the max_frac
if a_3 > max_frac:
max_frac = a_3
# determine if the overall faction of infection exceed the threshold
l_3 = -(t_3 - a_3) # Note that I cannot do a_3 - t_3 since a_3 is not a vector
l_3[l_3 >= 0.0] = 1.0
l_3[l_3 < 0.0] = 0.0
# l3 = t_3 <= a_3
# Determine if the fraction of neighbors with wear face masks exceeds a threshold
l_2 = F @ b_1_last - t_2 # sparse?
l_2[l_2 >= 0.0] = 1.0
l_2[l_2 < 0.0] = 0.0
# Update the mask state b_1
b_1 = a_1 + l_2 + l_3 + l_habit # logical operation?
b_1[b_1 >= 1.0] = 1.0 # sparse?
total_mask += np.count_nonzero(b_1)
# The # of infected neighbors of each v
d = A_2 @ b_2_last
# The # of infected neighbors with mask
d_2 = A_2 @ np.multiply(b_1_last, b_2_last) # Very important to pass b_1_last here
# The # of infected neighbors without mask
d_1 = d - d_2
# Only susceptibles (b_4) can be infected
#--------------------------------------------------#
# h1 : the probability of not getting infected from neighbors who do not wear masks (1 - p or 1 - alpha p)
temp = one - (b_1 * (1.0 - alpha)) # IMPORTANT: b_1_last vs b_1 (syn vs asyn)
h_1 = one - (temp * p)
# h2: contains the probability of not getting infected from neighbors who wear masks (1 - beta p or 1 - alpha beta p)
h_2 = one - (temp * beta * p)
temp = np.multiply(np.power(h_1, d_1), np.power(h_2, d_2))
q = np.multiply(b_4, one - temp)
#--------------------------------------------------#
# Has to flatten q to pass it to the binomial funciton
q_f = q.flatten()
# Compute newly infected nodes
newly_infected = np.reshape(np.random.binomial(1, q_f), (-1 ,1))
# Computer R0 (do this before recovery)
# R_0 = np.count_nonzero(newly_infected) / np.count_nonzero(b_2)
# Recovery
rr = np.random.choice([0, 1], size = (n, 1), p=[1.0 - r, r])
b_3 = np.logical_and(b_2, rr) + b_3 # update b_3
b_2 = b_2 - rr
b_2[b_2 == -1] = 0.0
# Update b_2
b_2 = newly_infected + b_2
# Update the susceptible vector
b_4 = -b_2 - b_3
b_4[b_4 == 0.0] = 1.0
b_4[b_4 < 0.0] = 0.0
# A fixed point is reached under zero infection
if np.array_equal(b_2, zero):
# print("A fixed point is reached at iteration {}".format(i))
average_mask = float(total_mask / days)
return round(float(
|
np.count_nonzero(b_3)
|
numpy.count_nonzero
|
from collections import namedtuple
import numpy as np
import scipy.ndimage as sn
import scipy.ndimage.morphology as morph
from bruges.util import sigmoid, root, power
def pad_func(before, after):
"""
Padding function. Operates on vector *in place*, per the np.pad
documentation.
"""
def pad_with(x, pad_width, iaxis, kwargs):
x[:pad_width[0]] = before[-pad_width[0]:]
x[-pad_width[1]:] = after[:pad_width[1]]
return
return pad_with
def get_strat(strat,
thickness,
kind='nearest',
position=1,
wedge=None,
zoom_mode='nearest'
):
"""
Take a 'stratigraphy' (either an int, a tuple of ints, or a list-like of
floats) and expand or compress it to the required thickness.
`kind` can be 'nearest', 'linear', 'quadratic', or 'cubic'.
"""
orders = {'nearest': 0, 'linear': 1, 'quadratic': 2, 'cubic': 3}
order = orders.get(kind, 0)
if np.issubdtype(type(strat), int) and (order == 0):
out = np.repeat([strat], thickness)
elif np.issubdtype(type(strat), float) and (order == 0):
out = np.repeat([strat], thickness)
elif isinstance(strat, tuple) and (order == 0):
out = np.repeat(strat, int(round(thickness/len(strat))))
else:
if position == 0:
wedge_zoom = wedge[1]/len(wedge[0])
strat = strat[-int(thickness/wedge_zoom):]
elif position == -1:
wedge_zoom = wedge[1]/len(wedge[0])
strat = strat[:int(thickness/wedge_zoom)]
zoom = thickness / len(strat)
out = sn.zoom(strat, zoom=zoom, order=order, mode=zoom_mode)
# Guarantee correct length by adjusting bottom layer.
missing = int(np.ceil(thickness - out.size))
if out.size > 0 and missing > 0:
out = np.pad(out, [0, missing], mode='edge')
elif out.size > 0 and missing < 0:
out = out[:missing]
return out
def get_conforming(strat, thickness, conformance):
"""
Function to deal with top and bottom conforming wedges.
"""
thickness = int(
|
np.ceil(thickness)
|
numpy.ceil
|
import itertools
import warnings
import numpy as np
from numba import njit
from suitesparse_graphblas.utils import claim_buffer, unclaim_buffer
import graphblas as gb
from .. import ffi, lib, monoid
from ..base import call
from ..dtypes import _INDEX, INT64, UINT64, _string_to_dtype, lookup_dtype
from ..exceptions import _error_code_lookup, check_status, check_status_carg
from ..scalar import Scalar, _as_scalar
from ..utils import _CArray, ints_to_numpy_buffer, libget, values_to_numpy_buffer, wrapdoc
from .config import BaseConfig
from .descriptor import get_compression_descriptor, get_nthreads_descriptor
from .matrix import MatrixArray, _concat_mn, normalize_chunks
from .prefix_scan import prefix_scan
from .utils import get_order
ffi_new = ffi.new
NULL = ffi.NULL
@njit
def _head_indices_vector_bitmap(bitmap, values, size, dtype, n, is_iso): # pragma: no cover
indices = np.empty(n, dtype=np.uint64)
if is_iso:
vals = np.empty(1, dtype=dtype)
vals[0] = values[0]
else:
vals = np.empty(n, dtype=dtype)
j = 0
for i in range(size):
if bitmap[i]:
indices[j] = i
if not is_iso:
vals[j] = values[i]
j += 1
if j == n:
break
return indices, vals
def head(vector, n=10, dtype=None, *, sort=False):
"""Like ``vector.to_values()``, but only returns the first n elements.
If sort is True, then the results will be sorted by index, otherwise the order of the
result is not guaranteed. Formats full and bitmap should always return in sorted order.
This changes ``vector.gb_obj``, so care should be taken when using multiple threads.
"""
if dtype is None:
dtype = vector.dtype
else:
dtype = lookup_dtype(dtype)
n = min(n, vector._nvals)
if n == 0:
return (np.empty(0, dtype=np.uint64), np.empty(0, dtype=dtype.np_type))
is_iso = vector.ss.is_iso
d = vector.ss.unpack(raw=True, sort=sort)
fmt = d["format"]
try:
if fmt == "full":
indices = np.arange(n, dtype=np.uint64)
vals = d["values"][:n].astype(dtype.np_type)
elif fmt == "bitmap":
indices, vals = _head_indices_vector_bitmap(
d["bitmap"], d["values"], d["size"], dtype.np_type, n, is_iso
)
elif fmt == "sparse":
indices = d["indices"][:n].copy()
vals = d["values"][:n].astype(dtype.np_type)
else: # pragma: no cover
raise RuntimeError(f"Invalid format: {fmt}")
finally:
vector.ss.pack_any(take_ownership=True, **d)
if is_iso:
vals = np.broadcast_to(vals[:1], (n,))
return indices, vals
class VectorConfig(BaseConfig):
"""Get and set configuration options for this Vector.
See SuiteSparse:GraphBLAS documentation for more details.
Config parameters
-----------------
bitmap_switch : double
Threshold that determines when to switch to bitmap format
sparsity_control : Set[str] from {"sparse", "bitmap", "full", "auto"}
Allowed sparsity formats. May be set with a single string or a set of strings.
sparsity_status : str, {"sparse", "bitmap", "full"}
Current sparsity format
"""
_get_function = lib.GxB_Vector_Option_get
_set_function = lib.GxB_Vector_Option_set
_options = {
"bitmap_switch": (lib.GxB_BITMAP_SWITCH, "double"),
"sparsity_control": (lib.GxB_SPARSITY_CONTROL, "int"),
# read-only
"sparsity_status": (lib.GxB_SPARSITY_STATUS, "int"),
# "format": (lib.GxB_FORMAT, "GxB_Format_Value"), # Not useful to show
}
_bitwise = {
"sparsity_control": {
# lib.GxB_HYPERSPARSE: "hypersparse", # For matrices, not vectors
lib.GxB_SPARSE: "sparse",
lib.GxB_BITMAP: "bitmap",
lib.GxB_FULL: "full",
lib.GxB_AUTO_SPARSITY: "auto",
},
}
_enumerations = {
"format": {
lib.GxB_BY_ROW: "by_row",
lib.GxB_BY_COL: "by_col",
# lib.GxB_NO_FORMAT: "no_format", # Used by iterators; not valid here
},
"sparsity_status": {
lib.GxB_HYPERSPARSE: "hypersparse",
lib.GxB_SPARSE: "sparse",
lib.GxB_BITMAP: "bitmap",
lib.GxB_FULL: "full",
},
}
_defaults = {
"sparsity_control": "auto",
}
_read_only = {"sparsity_status", "format"}
class ss:
__slots__ = "_parent", "config"
def __init__(self, parent):
self._parent = parent
self.config = VectorConfig(parent)
@property
def nbytes(self):
size = ffi_new("size_t*")
check_status(lib.GxB_Vector_memoryUsage(size, self._parent._carg), self._parent)
return size[0]
@property
def is_iso(self):
is_iso = ffi_new("bool*")
check_status(lib.GxB_Vector_iso(is_iso, self._parent._carg), self._parent)
return is_iso[0]
@property
def iso_value(self):
if self.is_iso:
# This may not be thread-safe if the parent is being modified in another thread
return Scalar.from_value(next(self.itervalues()), dtype=self._parent.dtype, name="")
raise ValueError("Vector is not iso-valued")
@property
def format(self):
parent = self._parent
sparsity_ptr = ffi_new("GxB_Option_Field*")
check_status(
lib.GxB_Vector_Option_get(parent._carg, lib.GxB_SPARSITY_STATUS, sparsity_ptr),
parent,
)
sparsity_status = sparsity_ptr[0]
if sparsity_status == lib.GxB_SPARSE:
format = "sparse"
elif sparsity_status == lib.GxB_BITMAP:
format = "bitmap"
elif sparsity_status == lib.GxB_FULL:
format = "full"
else: # pragma: no cover
raise NotImplementedError(f"Unknown sparsity status: {sparsity_status}")
return format
def diag(self, matrix, k=0):
"""
GxB_Vector_diag
**This function is deprecated. Use ``Matrix.diag`` or ``Vector.ss.build_diag`` instead.**
"""
warnings.warn(
"`Matrix.ss.diag` is deprecated; "
"please use `Matrix.diag` or `Vector.ss.build_diag` instead",
DeprecationWarning,
)
self.build_diag(matrix, k)
def build_diag(self, matrix, k=0):
"""
GxB_Vector_diag
Extract a diagonal from a Matrix or TransposedMatrix into a Vector.
Existing entries in the Vector are discarded.
Parameters
----------
matrix : Matrix or TransposedMatrix
Extract a diagonal from this matrix.
k : int, default 0
Diagonal in question. Use `k>0` for diagonals above the main diagonal,
and `k<0` for diagonals below the main diagonal.
See Also
--------
Matrix.diag
Vector.diag
"""
matrix = self._parent._expect_type(
matrix,
(gb.Matrix, gb.matrix.TransposedMatrix),
within="ss.build_diag",
argname="matrix",
)
if type(matrix) is gb.matrix.TransposedMatrix:
# Transpose descriptor doesn't do anything, so use the parent
k = -k
matrix = matrix._matrix
call("GxB_Vector_diag", [self._parent, matrix, _as_scalar(k, INT64, is_cscalar=True), None])
def split(self, chunks, *, name=None):
"""
GxB_Matrix_split
Split a Vector into a 1D array of sub-vectors according to `chunks`.
This performs the opposite operation as ``concat``.
`chunks` is short for "chunksizes" and indicates the chunk sizes.
`chunks` may be a single integer, or a tuple or list. Example chunks:
- ``chunks=10``
- Split vector into chunks of size 10 (the last chunk may be smaller).
- ``chunks=[5, 10]``
- Split vector into two chunks of size 5 and 10.
See Also
--------
Vector.ss.concat
graphblas.ss.concat
"""
from ..vector import Vector
tile_nrows, _ = normalize_chunks([chunks, None], (self._parent._size, 1))
m = len(tile_nrows)
tiles = ffi_new("GrB_Matrix[]", m)
parent = self._parent._as_matrix()
call(
"GxB_Matrix_split",
[
MatrixArray(tiles, parent, name="tiles"),
_as_scalar(m, _INDEX, is_cscalar=True),
_as_scalar(1, _INDEX, is_cscalar=True),
_CArray(tile_nrows),
_CArray([1]),
parent,
None,
],
)
rv = []
dtype = self._parent.dtype
if name is None:
name = self._parent.name
for i, size in enumerate(tile_nrows):
# Copy to a new handle so we can free `tiles`
new_vector = ffi_new("GrB_Vector*")
new_vector[0] = ffi.cast("GrB_Vector", tiles[i])
tile = Vector._from_obj(new_vector, dtype, size, name=f"{name}_{i}")
rv.append(tile)
return rv
def _concat(self, tiles, m):
ctiles = ffi_new("GrB_Matrix[]", m)
for i, tile in enumerate(tiles):
ctiles[i] = tile.gb_obj[0]
call(
"GxB_Matrix_concat",
[
self._parent._as_matrix(),
MatrixArray(ctiles, name="tiles"),
_as_scalar(m, _INDEX, is_cscalar=True),
_as_scalar(1, _INDEX, is_cscalar=True),
None,
],
)
def concat(self, tiles):
"""
GxB_Matrix_concat
Concatenate a 1D list of Vector objects into the current Vector.
Any existing values in the current Vector will be discarded.
To concatenate into a new Vector, use `graphblas.ss.concat`.
This performs the opposite operation as ``split``.
See Also
--------
Vector.ss.split
graphblas.ss.concat
"""
tiles, m, n, is_matrix = _concat_mn(tiles, is_matrix=False)
self._concat(tiles, m)
def build_scalar(self, indices, value):
"""
GxB_Vector_build_Scalar
Like ``build``, but uses a scalar for all the values.
See Also
--------
Vector.build
Vector.from_values
"""
indices = ints_to_numpy_buffer(indices, np.uint64, name="indices")
scalar = _as_scalar(value, self._parent.dtype, is_cscalar=False) # pragma: is_grbscalar
call(
"GxB_Vector_build_Scalar",
[
self._parent,
_CArray(indices),
scalar,
_as_scalar(indices.size, _INDEX, is_cscalar=True),
],
)
def _begin_iter(self, seek):
it_ptr = ffi_new("GxB_Iterator*")
info = lib.GxB_Iterator_new(it_ptr)
it = it_ptr[0]
success = lib.GrB_SUCCESS
info = lib.GxB_Vector_Iterator_attach(it, self._parent._carg, NULL)
if info != success: # pragma: no cover
lib.GxB_Iterator_free(it_ptr)
raise _error_code_lookup[info]("Vector iterator failed to attach")
if seek < 0:
p = lib.GxB_Vector_Iterator_getpmax(it)
seek += p
if seek < 0:
seek = 0
info = lib.GxB_Vector_Iterator_seek(it, seek)
if info != success:
lib.GxB_Iterator_free(it_ptr)
raise _error_code_lookup[info]("Vector iterator failed to seek")
return it_ptr
def iterkeys(self, seek=0):
"""Iterate over all the indices of a Vector.
Parameters
----------
seek : int, default 0
Index of entry to seek to. May be negative to seek backwards from the end.
Vector objects in bitmap format seek as if it's full format (i.e., it
ignores the bitmap mask).
The Vector should not be modified during iteration; doing so will
result in undefined behavior.
"""
try:
it_ptr = self._begin_iter(seek)
except StopIteration:
return
it = it_ptr[0]
info = success = lib.GrB_SUCCESS
key_func = lib.GxB_Vector_Iterator_getIndex
next_func = lib.GxB_Vector_Iterator_next
while info == success:
yield key_func(it)
info = next_func(it)
lib.GxB_Iterator_free(it_ptr)
if info != lib.GxB_EXHAUSTED: # pragma: no cover
raise _error_code_lookup[info]("Vector iterator failed")
def itervalues(self, seek=0):
"""Iterate over all the values of a Vector.
Parameters
----------
seek : int, default 0
Index of entry to seek to. May be negative to seek backwards from the end.
Vector objects in bitmap format seek as if it's full format (i.e., it
ignores the bitmap mask).
The Vector should not be modified during iteration; doing so will
result in undefined behavior.
"""
try:
it_ptr = self._begin_iter(seek)
except StopIteration:
return
it = it_ptr[0]
info = success = lib.GrB_SUCCESS
val_func = getattr(lib, f"GxB_Iterator_get_{self._parent.dtype.name}")
next_func = lib.GxB_Vector_Iterator_next
while info == success:
yield val_func(it)
info = next_func(it)
lib.GxB_Iterator_free(it_ptr)
if info != lib.GxB_EXHAUSTED: # pragma: no cover
raise _error_code_lookup[info]("Vector iterator failed")
def iteritems(self, seek=0):
"""Iterate over all the indices and values of a Vector.
Parameters
----------
seek : int, default 0
Index of entry to seek to. May be negative to seek backwards from the end.
Vector objects in bitmap format seek as if it's full format (i.e., it
ignores the bitmap mask).
The Vector should not be modified during iteration; doing so will
result in undefined behavior.
"""
try:
it_ptr = self._begin_iter(seek)
except StopIteration:
return
it = it_ptr[0]
info = success = lib.GrB_SUCCESS
key_func = lib.GxB_Vector_Iterator_getIndex
val_func = getattr(lib, f"GxB_Iterator_get_{self._parent.dtype.name}")
next_func = lib.GxB_Vector_Iterator_next
while info == success:
yield (key_func(it), val_func(it))
info = next_func(it)
lib.GxB_Iterator_free(it_ptr)
if info != lib.GxB_EXHAUSTED: # pragma: no cover
raise _error_code_lookup[info]("Vector iterator failed")
def export(self, format=None, *, sort=False, give_ownership=False, raw=False):
"""
GxB_Vextor_export_xxx
Parameters
----------
format : str or None, default None
If `format` is not specified, this method exports in the currently stored format.
To control the export format, set `format` to one of:
- "sparse"
- "bitmap"
- "full"
sort : bool, default False
Whether to sort indices if the format is "sparse"
give_ownership : bool, default False
Perform a zero-copy data transfer to Python if possible. This gives ownership of
the underlying memory buffers to Numpy.
** If True, this nullifies the current object, which should no longer be used! **
raw : bool, default False
If True, always return array the same size as returned by SuiteSparse.
If False, arrays may be trimmed to be the expected size.
It may make sense to choose ``raw=True`` if one wants to use the data to perform
a zero-copy import back to SuiteSparse.
Returns
-------
dict; keys depend on `format` and `raw` arguments (see below).
See Also
--------
Vector.to_values
Vector.ss.import_any
Return values
- Note: for `raw=True`, arrays may be larger than specified.
- "sparse" format
- indices : ndarray(dtype=uint64, size=nvals)
- values : ndarray(size=nvals)
- sorted_index : bool
- True if the values in "indices" are sorted
- size : int
- nvals : int, only present if raw == True
- "bitmap" format
- bitmap : ndarray(dtype=bool8, size=size)
- values : ndarray(size=size)
- Elements where bitmap is False are undefined
- nvals : int
- The number of True elements in the bitmap
- size : int, only present if raw == True
- "full" format
- values : ndarray(size=size)
- size : int, only present if raw == True or is_iso == True
Examples
--------
Simple usage:
>>> pieces = v.ss.export()
>>> v2 = Vector.ss.import_any(**pieces)
"""
return self._export(
format=format, sort=sort, give_ownership=give_ownership, raw=raw, method="export"
)
def unpack(self, format=None, *, sort=False, raw=False):
"""
GxB_Vector_unpack_xxx
`unpack` is like `export`, except that the Vector remains valid but empty.
`pack_*` methods are the opposite of `unpack`.
See `Vector.ss.export` documentation for more details.
"""
return self._export(format=format, sort=sort, give_ownership=True, raw=raw, method="unpack")
def _export(self, format=None, *, sort=False, give_ownership=False, raw=False, method):
if give_ownership:
parent = self._parent
else:
parent = self._parent.dup(name=f"v_{method}")
dtype = parent.dtype.np_type
index_dtype = np.dtype(np.uint64)
if format is None:
format = self.format
else:
format = format.lower()
size = parent._size
if method == "export":
vhandle = ffi_new("GrB_Vector*", parent._carg)
type_ = ffi_new("GrB_Type*")
size_ = ffi_new("GrB_Index*")
args = (type_, size_)
else:
vhandle = parent._carg
args = ()
vx = ffi_new("void**")
vx_size = ffi_new("GrB_Index*")
if sort:
jumbled = NULL
else:
jumbled = ffi_new("bool*")
is_iso = ffi_new("bool*")
if format == "sparse":
vi = ffi_new("GrB_Index**")
vi_size = ffi_new("GrB_Index*")
nvals = ffi_new("GrB_Index*")
check_status(
libget(f"GxB_Vector_{method}_CSC")(
vhandle,
*args,
vi,
vx,
vi_size,
vx_size,
is_iso,
nvals,
jumbled,
NULL,
),
parent,
)
is_iso = is_iso[0]
nvals = nvals[0]
indices = claim_buffer(ffi, vi[0], vi_size[0] // index_dtype.itemsize, index_dtype)
values = claim_buffer(ffi, vx[0], vx_size[0] // dtype.itemsize, dtype)
if not raw:
if indices.size > nvals: # pragma: no cover
indices = indices[:nvals]
if is_iso:
if values.size > 1: # pragma: no cover
values = values[:1]
else:
if values.size > nvals: # pragma: no cover
values = values[:nvals]
rv = {
"size": size,
"indices": indices,
"sorted_index": True if sort else not jumbled[0],
}
if raw:
rv["nvals"] = nvals
elif format == "bitmap":
vb = ffi_new("int8_t**")
vb_size = ffi_new("GrB_Index*")
nvals = ffi_new("GrB_Index*")
check_status(
libget(f"GxB_Vector_{method}_Bitmap")(
vhandle, *args, vb, vx, vb_size, vx_size, is_iso, nvals, NULL
),
parent,
)
is_iso = is_iso[0]
bool_dtype = np.dtype(np.bool8)
bitmap = claim_buffer(ffi, vb[0], vb_size[0] // bool_dtype.itemsize, bool_dtype)
values = claim_buffer(ffi, vx[0], vx_size[0] // dtype.itemsize, dtype)
if not raw:
if bitmap.size > size: # pragma: no cover
bitmap = bitmap[:size]
if is_iso:
if values.size > 1: # pragma: no cover
values = values[:1]
else:
if values.size > size: # pragma: no cover
values = values[:size]
rv = {
"bitmap": bitmap,
"nvals": nvals[0],
}
if raw:
rv["size"] = size
elif format == "full":
check_status(
libget(f"GxB_Vector_{method}_Full")(vhandle, *args, vx, vx_size, is_iso, NULL),
parent,
)
is_iso = is_iso[0]
values = claim_buffer(ffi, vx[0], vx_size[0] // dtype.itemsize, dtype)
if not raw:
if is_iso:
if values.size > 1: # pragma: no cover
values = values[:1]
else:
if values.size > size: # pragma: no cover
values = values[:size]
rv = {}
if raw or is_iso:
rv["size"] = size
else:
raise ValueError(f"Invalid format: {format}")
rv["is_iso"] = is_iso
rv.update(
format=format,
values=values,
)
if method == "export":
parent.gb_obj[0] = NULL
if parent.dtype._is_udt:
rv["dtype"] = parent.dtype
return rv
@classmethod
def import_any(
cls,
*,
# All
values,
size=None,
is_iso=False,
take_ownership=False,
format=None,
dtype=None,
name=None,
# Sparse
indices=None,
sorted_index=False,
# Bitmap
bitmap=None,
# Bitmap/Sparse
nvals=None, # optional
):
"""
GxB_Vector_import_xxx
Dispatch to appropriate import method inferred from inputs.
See the other import functions and `Vector.ss.export`` for details.
Returns
-------
Vector
See Also
--------
Vector.from_values
Vector.ss.export
Vector.ss.import_sparse
Vector.ss.import_bitmap
Vector.ss.import_full
Examples
--------
Simple usage:
>>> pieces = v.ss.export()
>>> v2 = Vector.ss.import_any(**pieces)
"""
return cls._import_any(
values=values,
size=size,
is_iso=is_iso,
take_ownership=take_ownership,
format=format,
dtype=dtype,
name=name,
# Sparse
indices=indices,
sorted_index=sorted_index,
# Bitmap
bitmap=bitmap,
# Bitmap/Sparse
nvals=nvals,
method="import",
)
def pack_any(
self,
*,
# All
values,
is_iso=False,
take_ownership=False,
format=None,
# Sparse
indices=None,
sorted_index=False,
# Bitmap
bitmap=None,
# Bitmap/Sparse
nvals=None, # optional
# Unused for pack
size=None,
dtype=None,
name=None,
):
"""
GxB_Vector_pack_xxx
`pack_any` is like `import_any` except it "packs" data into an
existing Vector. This is the opposite of ``unpack()``
See `Vector.ss.import_any` documentation for more details.
"""
return self._import_any(
values=values,
is_iso=is_iso,
take_ownership=take_ownership,
format=format,
# Sparse
indices=indices,
sorted_index=sorted_index,
# Bitmap
bitmap=bitmap,
# Bitmap/Sparse
nvals=nvals,
method="pack",
vector=self._parent,
)
@classmethod
def _import_any(
cls,
*,
# All
values,
size=None,
is_iso=False,
take_ownership=False,
format=None,
dtype=None,
name=None,
# Sparse
indices=None,
sorted_index=False,
# Bitmap
bitmap=None,
# Bitmap/Sparse
nvals=None, # optional
method,
vector=None,
):
if format is None:
if indices is not None:
if bitmap is not None:
raise TypeError("Cannot provide both `indptr` and `bitmap`")
format = "sparse"
elif bitmap is not None:
format = "bitmap"
else:
format = "full"
else:
format = format.lower()
if method == "pack":
obj = vector.ss
else:
obj = cls
if format == "sparse":
return getattr(obj, f"{method}_sparse")(
size=size,
indices=indices,
values=values,
nvals=nvals,
is_iso=is_iso,
sorted_index=sorted_index,
take_ownership=take_ownership,
dtype=dtype,
name=name,
)
elif format == "bitmap":
return getattr(obj, f"{method}_bitmap")(
nvals=nvals,
bitmap=bitmap,
values=values,
size=size,
is_iso=is_iso,
take_ownership=take_ownership,
dtype=dtype,
name=name,
)
elif format == "full":
return getattr(obj, f"{method}_full")(
values=values,
size=size,
is_iso=is_iso,
take_ownership=take_ownership,
dtype=dtype,
name=name,
)
else:
raise ValueError(f"Invalid format: {format}")
@classmethod
def import_sparse(
cls,
*,
size,
indices,
values,
nvals=None,
is_iso=False,
sorted_index=False,
take_ownership=False,
dtype=None,
format=None,
name=None,
):
"""
GxB_Vector_import_CSC
Create a new Vector from sparse input.
Parameters
----------
size : int
indices : array-like
values : array-like
nvals : int, optional
The number of elements in "values" to use.
If not specified, will be set to ``len(values)``.
is_iso : bool, default False
Is the Vector iso-valued (meaning all the same value)?
If true, then `values` should be a length 1 array.
sorted_index : bool, default False
Indicate whether the values in "col_indices" are sorted.
take_ownership : bool, default False
If True, perform a zero-copy data transfer from input numpy arrays
to GraphBLAS if possible. To give ownership of the underlying
memory buffers to GraphBLAS, the arrays must:
- be C contiguous
- have the correct dtype (uint64 for indices)
- own its own data
- be writeable
If all of these conditions are not met, then the data will be
copied and the original array will be unmodified. If zero copy
to GraphBLAS is successful, then the array will be modified to be
read-only and will no longer own the data.
dtype : dtype, optional
dtype of the new Vector.
If not specified, this will be inferred from `values`.
format : str, optional
Must be "sparse" or None. This is included to be compatible with
the dict returned from exporting.
name : str, optional
Name of the new Vector.
Returns
-------
Vector
"""
return cls._import_sparse(
size=size,
indices=indices,
values=values,
nvals=nvals,
is_iso=is_iso,
sorted_index=sorted_index,
take_ownership=take_ownership,
dtype=dtype,
format=format,
name=name,
method="import",
)
def pack_sparse(
self,
*,
indices,
values,
nvals=None,
is_iso=False,
sorted_index=False,
take_ownership=False,
format=None,
**ignored_kwargs,
):
"""
GxB_Vector_pack_CSC
`pack_sparse` is like `import_sparse` except it "packs" data into an
existing Vector. This is the opposite of ``unpack("sparse")``
See `Vector.ss.import_sparse` documentation for more details.
"""
return self._import_sparse(
indices=indices,
values=values,
nvals=nvals,
is_iso=is_iso,
sorted_index=sorted_index,
take_ownership=take_ownership,
format=format,
method="pack",
vector=self._parent,
)
@classmethod
def _import_sparse(
cls,
*,
size=None,
indices,
values,
nvals=None,
is_iso=False,
sorted_index=False,
take_ownership=False,
dtype=None,
format=None,
name=None,
method,
vector=None,
):
if format is not None and format.lower() != "sparse":
raise ValueError(f"Invalid format: {format!r}. Must be None or 'sparse'.")
copy = not take_ownership
indices = ints_to_numpy_buffer(indices, np.uint64, copy=copy, ownable=True, name="indices")
if method == "pack":
dtype = vector.dtype
values, dtype = values_to_numpy_buffer(values, dtype, copy=copy, ownable=True)
if indices is values:
values = np.copy(values)
vi = ffi_new("GrB_Index**", ffi.from_buffer("GrB_Index*", indices))
vx = ffi_new("void**", ffi.from_buffer("void*", values))
if nvals is None:
if is_iso:
nvals = indices.size
elif dtype.np_type.subdtype is not None:
nvals = values.shape[0]
else:
nvals = values.size
if method == "import":
vhandle = ffi_new("GrB_Vector*")
args = (dtype._carg, size)
else:
vhandle = vector._carg
args = ()
status = libget(f"GxB_Vector_{method}_CSC")(
vhandle,
*args,
vi,
vx,
indices.nbytes,
values.nbytes,
is_iso,
nvals,
not sorted_index,
NULL,
)
if method == "import":
check_status_carg(
status,
"Vector",
vhandle[0],
)
vector = gb.Vector._from_obj(vhandle, dtype, size, name=name)
else:
check_status(status, vector)
unclaim_buffer(indices)
unclaim_buffer(values)
return vector
@classmethod
def import_bitmap(
cls,
*,
bitmap,
values,
nvals=None,
size=None,
is_iso=False,
take_ownership=False,
dtype=None,
format=None,
name=None,
):
"""
GxB_Vector_import_Bitmap
Create a new Vector from values and bitmap (as mask) arrays.
Parameters
----------
bitmap : array-like
True elements indicate where there are values in "values".
values : array-like
nvals : int, optional
The number of True elements in the bitmap for this Vector.
size : int, optional
The size of the new Vector.
If not specified, it will be set to the size of values.
is_iso : bool, default False
Is the Vector iso-valued (meaning all the same value)?
If true, then `values` should be a length 1 array.
take_ownership : bool, default False
If True, perform a zero-copy data transfer from input numpy arrays
to GraphBLAS if possible. To give ownership of the underlying
memory buffers to GraphBLAS, the arrays must:
- be C contiguous
- have the correct dtype (bool8 for bitmap)
- own its own data
- be writeable
If all of these conditions are not met, then the data will be
copied and the original array will be unmodified. If zero copy
to GraphBLAS is successful, then the array will be modified to be
read-only and will no longer own the data.
dtype : dtype, optional
dtype of the new Vector.
If not specified, this will be inferred from `values`.
format : str, optional
Must be "bitmap" or None. This is included to be compatible with
the dict returned from exporting.
name : str, optional
Name of the new Vector.
Returns
-------
Vector
"""
return cls._import_bitmap(
bitmap=bitmap,
values=values,
nvals=nvals,
size=size,
is_iso=is_iso,
take_ownership=take_ownership,
dtype=dtype,
format=format,
name=name,
method="import",
)
def pack_bitmap(
self,
*,
bitmap,
values,
nvals=None,
is_iso=False,
take_ownership=False,
format=None,
**unused_kwargs,
):
"""
GxB_Vector_pack_Bitmap
`pack_bitmap` is like `import_bitmap` except it "packs" data into an
existing Vector. This is the opposite of ``unpack("bitmap")``
See `Vector.ss.import_bitmap` documentation for more details.
"""
return self._import_bitmap(
bitmap=bitmap,
values=values,
nvals=nvals,
is_iso=is_iso,
take_ownership=take_ownership,
format=format,
method="pack",
vector=self._parent,
)
@classmethod
def _import_bitmap(
cls,
*,
bitmap,
values,
nvals=None,
size=None,
is_iso=False,
take_ownership=False,
dtype=None,
format=None,
name=None,
method,
vector=None,
):
if format is not None and format.lower() != "bitmap":
raise ValueError(f"Invalid format: {format!r}. Must be None or 'bitmap'.")
copy = not take_ownership
bitmap = ints_to_numpy_buffer(bitmap, np.bool8, copy=copy, ownable=True, name="bitmap")
if method == "pack":
dtype = vector.dtype
size = vector._size
values, dtype = values_to_numpy_buffer(values, dtype, copy=copy, ownable=True)
if bitmap is values:
values = np.copy(values)
vhandle = ffi_new("GrB_Vector*")
vb = ffi_new("int8_t**", ffi.from_buffer("int8_t*", bitmap))
vx = ffi_new("void**", ffi.from_buffer("void*", values))
if size is None:
if is_iso:
size = bitmap.size
elif dtype.np_type.subdtype is not None:
size = values.shape[0]
else:
size = values.size
if nvals is None:
if bitmap.size == size:
nvals = np.count_nonzero(bitmap)
else:
nvals = np.count_nonzero(bitmap.ravel()[:size])
if method == "import":
vhandle = ffi_new("GrB_Vector*")
args = (dtype._carg, size)
else:
vhandle = vector._carg
args = ()
status = libget(f"GxB_Vector_{method}_Bitmap")(
vhandle,
*args,
vb,
vx,
bitmap.nbytes,
values.nbytes,
is_iso,
nvals,
NULL,
)
if method == "import":
check_status_carg(
status,
"Vector",
vhandle[0],
)
vector = gb.Vector._from_obj(vhandle, dtype, size, name=name)
else:
check_status(status, vector)
unclaim_buffer(bitmap)
unclaim_buffer(values)
return vector
@classmethod
def import_full(
cls,
values,
*,
size=None,
is_iso=False,
take_ownership=False,
dtype=None,
format=None,
name=None,
):
"""
GxB_Vector_import_Full
Create a new Vector from values.
Parameters
----------
values : array-like
size : int, optional
The size of the new Vector.
If not specified, it will be set to the size of values.
is_iso : bool, default False
Is the Vector iso-valued (meaning all the same value)?
If true, then `values` should be a length 1 array.
take_ownership : bool, default False
If True, perform a zero-copy data transfer from input numpy arrays
to GraphBLAS if possible. To give ownership of the underlying
memory buffers to GraphBLAS, the arrays must:
- be C contiguous
- have the correct dtype (bool8 for bitmap)
- own its own data
- be writeable
If all of these conditions are not met, then the data will be
copied and the original array will be unmodified. If zero copy
to GraphBLAS is successful, then the array will be modified to be
read-only and will no longer own the data.
dtype : dtype, optional
dtype of the new Vector.
If not specified, this will be inferred from `values`.
format : str, optional
Must be "full" or None. This is included to be compatible with
the dict returned from exporting.
name : str, optional
Name of the new Vector.
Returns
-------
Vector
"""
return cls._import_full(
values=values,
size=size,
is_iso=is_iso,
take_ownership=take_ownership,
dtype=dtype,
format=format,
name=name,
method="import",
)
def pack_full(
self,
values,
*,
is_iso=False,
take_ownership=False,
format=None,
**unused_kwargs,
):
"""
GxB_Vector_pack_Full
`pack_full` is like `import_full` except it "packs" data into an
existing Vector. This is the opposite of ``unpack("full")``
See `Vector.ss.import_full` documentation for more details.
"""
return self._import_full(
values=values,
is_iso=is_iso,
take_ownership=take_ownership,
format=format,
method="pack",
vector=self._parent,
)
@classmethod
def _import_full(
cls,
*,
values,
size=None,
is_iso=False,
take_ownership=False,
dtype=None,
format=None,
name=None,
method,
vector=None,
):
if format is not None and format.lower() != "full":
raise ValueError(f"Invalid format: {format!r}. Must be None or 'full'.")
copy = not take_ownership
if method == "pack":
dtype = vector.dtype
size = vector._size
values, dtype = values_to_numpy_buffer(values, dtype, copy=copy, ownable=True)
vhandle = ffi_new("GrB_Vector*")
vx = ffi_new("void**", ffi.from_buffer("void*", values))
if size is None:
if dtype.np_type.subdtype is not None:
size = values.shape[0]
else:
size = values.size
if method == "import":
vhandle = ffi_new("GrB_Vector*")
args = (dtype._carg, size)
else:
vhandle = vector._carg
args = ()
status = libget(f"GxB_Vector_{method}_Full")(
vhandle,
*args,
vx,
values.nbytes,
is_iso,
NULL,
)
if method == "import":
check_status_carg(
status,
"Vector",
vhandle[0],
)
vector = gb.Vector._from_obj(vhandle, dtype, size, name=name)
else:
check_status(status, vector)
unclaim_buffer(values)
return vector
@wrapdoc(head)
def head(self, n=10, dtype=None, *, sort=False):
return head(self._parent, n, dtype, sort=sort)
def scan(self, op=monoid.plus, *, name=None):
"""Perform a prefix scan with the given monoid.
For example, use `monoid.plus` (the default) to perform a cumulative sum,
and `monoid.times` for cumulative product. Works with any monoid.
Returns
-------
Scalar
"""
return prefix_scan(self._parent, op, name=name, within="scan")
def reshape(self, nrows, ncols=None, order="rowwise", *, name=None):
"""Return a copy of the Vector as a Matrix of the given shape.
The shape of the Matrix must be compatible with the original shape.
That is, the number of elements must be equal: ``nrows * ncols == size``.
One of the dimensions may be -1, which will infer the correct size.
Parameters
----------
nrows : int or tuple of ints
ncols : int or None
order : {"rowwise", "columnwise"}, optional
"rowwise" means to fill the Matrix in row-major (C-style) order.
Aliases of "rowwise" also accepted: "row", "rows", "C".
"columnwise" means to fill the Matrix in column-major (F-style) order.
Aliases of "rowwise" also accepted: "col", "cols", "column", "columns", "F".
The default is "rowwise".
name : str, optional
Name of the new Matrix.
Returns
-------
Matrix
See Also
--------
Matrix.ss.flatten : flatten a Matrix into a Vector.
"""
order = get_order(order)
array = np.broadcast_to(False, self._parent._size)
if ncols is None:
array = array.reshape(nrows)
else:
array = array.reshape(nrows, ncols)
if array.ndim != 2:
raise ValueError(f"Shape tuple must be of length 2, not {array.ndim}")
nrows, ncols = array.shape
fmt = self.format
if fmt == "sparse":
info = self.export(sort=True)
indices = info["indices"]
if order == "rowwise":
return gb.Matrix.ss.import_coor(
nrows=nrows,
ncols=ncols,
rows=indices // ncols,
cols=indices % ncols,
values=info["values"],
is_iso=info["is_iso"],
sorted_cols=True,
take_ownership=True,
name=name,
)
else:
return gb.Matrix.ss.import_cooc(
nrows=nrows,
ncols=ncols,
cols=indices // nrows,
rows=indices % nrows,
values=info["values"],
is_iso=info["is_iso"],
sorted_rows=True,
take_ownership=True,
name=name,
)
elif fmt == "bitmap":
info = self.export(raw=True)
if order == "rowwise":
method = gb.Matrix.ss.import_bitmapr
else:
method = gb.Matrix.ss.import_bitmapc
return method(
nrows=nrows,
ncols=ncols,
bitmap=info["bitmap"],
values=info["values"],
nvals=info["nvals"],
is_iso=info["is_iso"],
take_ownership=True,
name=name,
)
elif fmt == "full":
info = self.export(raw=True)
if order == "rowwise":
method = gb.Matrix.ss.import_fullr
else:
method = gb.Matrix.ss.import_fullc
return method(
nrows=nrows,
ncols=ncols,
values=info["values"],
is_iso=info["is_iso"],
take_ownership=True,
name=name,
)
else:
raise NotImplementedError(fmt)
def selectk(self, how, k, *, name=None):
"""Select (up to) k elements.
Parameters
----------
how : str
- "random": choose k elements with equal probability
Chosen values may not be ordered randomly
- "first": choose the first k elements
- "last": choose the last k elements
- "largest": choose the k largest elements. If tied, any may be chosen.
- "smallest": choose the k smallest elements. If tied, any may be chosen.
k : int
The number of elements to choose
**THIS API IS EXPERIMENTAL AND MAY CHANGE**
"""
how = how.lower()
if k < 0:
raise ValueError("negative k is not allowed")
do_sort = how in {"first", "last"}
info = self._parent.ss.export("sparse", sort=do_sort)
if how == "random":
choices = random_choice(self._parent._nvals, k)
elif how == "first" or info["is_iso"] and how in {"largest", "smallest"}:
choices = slice(None, k)
elif how == "last":
choices = slice(-k, None)
elif how == "largest":
choices = np.argpartition(info["values"], -k)[-k:] # not sorted
elif how == "smallest":
choices = np.argpartition(info["values"], k)[:k] # not sorted
else:
raise ValueError(
'`how` argument must be one of: "random", "first", "last", "largest", "smallest"'
)
newinfo = dict(info, indices=info["indices"][choices])
if not info["is_iso"]:
newinfo["values"] = info["values"][choices]
if k == 1:
newinfo["sorted_index"] = True
elif not do_sort:
newinfo["sorted_index"] = False
return gb.Vector.ss.import_sparse(
**newinfo,
take_ownership=True,
name=name,
)
def compactify(self, how="first", size=None, *, reverse=False, asindex=False, name=None):
"""Shift all values to the beginning so all values are contiguous.
This returns a new Vector.
Parameters
----------
how : {"first", "last", "smallest", "largest", "random"}, optional
How to compress the values:
- first : take the values nearest the beginning
- last : take the values nearest the end
- smallest : take the smallest values (if tied, may take any)
- largest : take the largest values (if tied, may take any)
- random : take values randomly with equal probability and without replacement
reverse : bool, default False
Reverse the values when True
asindex : bool, default False
Return the index of the value when True. If there are ties for
"smallest" and "largest", then any valid index may be returned.
size : int, optional
The size of the returned Vector. If not specified, then the Vector
will be "compacted" to the smallest size that doesn't lose values.
**THIS API IS EXPERIMENTAL AND MAY CHANGE**
"""
how = how.lower()
if how not in {"first", "last", "smallest", "largest", "random"}:
raise ValueError(
'`how` argument must be one of: "first", "last", "smallest", "largest", "random"'
)
if size is None and self._parent._nvals == 0 or size == 0:
if asindex:
return gb.Vector(UINT64, size=0, name=name)
else:
return gb.Vector(self._parent.dtype, size=0, name=name)
do_sort = how in {"first", "last"}
info = self._parent.ss.export("sparse", sort=do_sort)
if size is None:
size = info["indices"].size
if info["is_iso"]:
if how in {"smallest", "largest"} or how == "random" and not asindex:
# order of smallest/largest/random doesn't matter
how = "first"
reverse = False
if not asindex:
how = "finished"
reverse = False
else:
info["is_iso"] = False
if how == "random":
choices = random_choice(self._parent._nvals, size)
elif how == "first":
if reverse:
choices = slice(size - 1, None, -1)
reverse = False
else:
choices = slice(None, size)
elif how == "last":
if reverse:
choices = slice(-size, None)
reverse = False
else:
choices = slice(None, -size - 1, -1)
elif how in {"largest", "smallest"}:
values = info["values"]
if how == "largest":
slc = slice(-size, None)
stop = -size
reverse = not reverse
else:
slc = slice(size)
stop = size
if asindex:
if size < values.size:
idx = np.argpartition(values, stop)[slc]
choices = idx[np.argsort(values[idx])]
else:
choices = np.argsort(values)
values = info["indices"][choices]
else:
if size < values.size:
values = np.partition(values, stop)[slc]
values.sort()
else:
choices = slice(None)
if how not in {"largest", "smallest"}:
if asindex:
values = info["indices"][choices]
else:
values = info["values"][choices]
if reverse:
values = values[::-1]
newinfo = dict(
info,
values=values,
indices=np.arange(size, dtype=np.uint64),
sorted_index=True,
size=size,
)
return gb.Vector.ss.import_sparse(
**newinfo,
take_ownership=True,
name=name,
)
def serialize(self, compression="default", level=None, *, nthreads=None):
"""Serialize a Vector to bytes (as numpy array) using SuiteSparse GxB_Vector_serialize.
Parameters
----------
compression : {"default", "lz4", "lz4hc", "none", None}, optional
Whether and how to compress the data.
- "default": the default in SuiteSparse:GraphBLAS, which is currently LZ4
- "lz4": the default LZ4 compression
- "lz4hc": LZ4 compression that allows the compression level (1-9) to be set.
Low compression level (1) is faster, high (9) is more compact. Default is 9.
- "none" or None: no compression
level : int [1-9], optional
The compression level, between 1 to 9, to use with "lz4hc" compression.
(1) is the fastest and largest, (9) is the slowest and most compressed.
Level 9 is the default when using "lz4hc" compression.
nthreads : int, optional
The maximum number of threads to use when serializing the Vector.
None, 0 or negative nthreads means to use the default number of threads.
For best performance, this function returns a numpy array with uint8 dtype.
Use `Vector.ss.deserialize(blob)` to create a Vector from the result of serialization·
This method is intended to support all serialization options from SuiteSparse:GraphBLAS.
*Warning*: Behavior of serializing UDTs is experimental and may change in a future release.
"""
desc = get_compression_descriptor(compression, level=level, nthreads=nthreads)
blob_handle = ffi_new("void**")
blob_size_handle = ffi_new("GrB_Index*")
parent = self._parent
check_status(
lib.GxB_Vector_serialize(
blob_handle,
blob_size_handle,
parent._carg,
desc._carg,
),
parent,
)
return claim_buffer(ffi, blob_handle[0], blob_size_handle[0], np.dtype(np.uint8))
@classmethod
def deserialize(cls, data, dtype=None, *, nthreads=None, name=None):
"""Deserialize a Vector from bytes, buffer, or numpy array using GxB_Vector_deserialize.
The data should have been previously serialized with a compatible version of
SuiteSparse:GraphBLAS. For example, from the result of `data = vector.ss.serialize()`.
Examples
--------
>>> data = vector.serialize()
>>> new_vector = Vector.ss.deserialize(data)
>>> new_vector.isequal(vector)
True
Parameters
----------
dtype : DataType, optional
If given, this should specify the dtype of the object. This is usually unnecessary.
If the dtype doesn't match what is in the serialized metadata, deserialize will fail.
You may need to specify the dtype to load user-defined types.
nthreads : int, optional
The maximum number of threads to use when deserializing.
None, 0 or negative nthreads means to use the default number of threads.
"""
if isinstance(data, np.ndarray):
data = ints_to_numpy_buffer(data, np.uint8)
else:
data = np.frombuffer(data, np.uint8)
data_obj = ffi.from_buffer("void*", data)
if dtype is None:
# Get the dtype name first
cname = ffi_new(f"char[{lib.GxB_MAX_NAME_LEN}]")
info = lib.GxB_deserialize_type_name(
cname,
data_obj,
data.nbytes,
)
if info != lib.GrB_SUCCESS:
raise _error_code_lookup[info]("Vector deserialize failed to get the dtype name")
dtype_name = b"".join(itertools.takewhile(b"\x00".__ne__, cname)).decode()
dtype = _string_to_dtype(dtype_name)
else:
dtype = lookup_dtype(dtype)
if nthreads is not None:
desc_obj = get_nthreads_descriptor(nthreads)._carg
else:
desc_obj = NULL
gb_obj = ffi_new("GrB_Vector*")
check_status_carg(
lib.GxB_Vector_deserialize(gb_obj, dtype._carg, data_obj, data.nbytes, desc_obj),
"Vector",
gb_obj[0],
)
rv = gb.Vector._from_obj(gb_obj, dtype, -1, name=name)
rv._size = rv.size
return rv
@njit
def random_choice(n, k): # pragma: no cover
if k >= n:
return np.arange(n, dtype=np.uint64)
choices = np.empty(k, dtype=np.uint64)
if 2 * k <= n:
if k == 1:
# Select a single edge
choices[0] = np.random.randint(n)
elif k == 2:
# Select two edges
choices[0] = np.random.randint(n)
choices[1] = np.random.randint(n - 1)
if choices[0] <= choices[1]:
choices[1] += 1
else:
# Move the ones we want to keep to the front of `a`
a = np.arange(n)
for i in range(k):
j = np.random.randint(i, n)
a[i], a[j] = a[j], a[i]
choices[i] = a[i]
elif k == n - 1:
# Select all but one edge
j =
|
np.random.randint(n)
|
numpy.random.randint
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import json
import numpy as np
import matplotlib.pyplot as plt
import h5py
from contextlib import redirect_stdout
class Saver:
def __init__(self, config, params, save_dir, model, metrics=''):
self.config = config
self.params = params
self.save_dir = save_dir
self.save_model = model,
self.metrics = metrics
def save(self):
if not os.path.exists(self.save_dir):
os.makedirs(self.save_dir)
# Save config
config_filename = self.save_dir + '/config.txt'
with open(config_filename, 'w') as f:
f.write(json.dumps(self.config))
print('Configs saved.')
# Save parameter
params_filename = self.save_dir + '/params.txt'
with open(params_filename, 'w') as f:
f.write(json.dumps(self.params))
print('Parameters saved.')
# Save metrics
metric_filename = self.save_dir + '/metrics.txt'
with open(metric_filename, 'w') as f:
f.write(json.dumps(self.metrics))
print('Metrics saved.')
# TODO find out why save_model is tuple
# save_model = model in train, there it is still ...
# <class 'tensorflow.python.keras._impl.keras.engine.training.Model'>
# Save model and weights
self.save_model[0].save_weights(str(self.save_dir + '/model_weights.h5'))
print('Keras model saved.')
# Save model summary
summary_file = self.save_dir + '/model_summary.txt'
with open(summary_file, 'w') as f:
with redirect_stdout(f):
self.save_model[0].summary()
# Plot all metrics
print('Plotting metrics:\n')
for key in self.metrics.keys():
if 'val' in key:
self.save_all_curves(key)
# Close all open figures
plt.close('all')
print('Done plotting')
del self.save_model
def save_loss_curves(self):
'''Save loss curves as plot (.png) in save_dir'''
loss = np.array(self.metrics['loss'])
val_loss = np.array(self.metrics['val_loss'])
plt.figure(figsize=[8,6]) # Width, height in inches
plt.plot(loss, 'r', linewidth=1.0)
plt.plot(val_loss, 'b', linewidth=1.0)
plt.legend(['Training Loss', 'Validation Loss'], fontsize=18)
x_int = []
locs, labels = plt.xticks()
for each in locs:
x_int.append(int(each))
plt.xticks(x_int)
plt.xlabel('Epochs', fontsize=16)
plt.ylabel('Loss', fontsize=16)
plt.title('Loss Curves', fontsize=16)
plt.savefig(str(self.save_dir + '/fig_loss.png'))
plt.savefig(str(self.save_dir + '/fig_loss.svg'))
print('Loss curves saved.')
def save_accuracy_curves(self):
'''Save accuracy curves as plot (.png) in save_dir'''
acc = np.array(self.metrics['acc'])
val_acc =
|
np.array(self.metrics['val_acc'])
|
numpy.array
|
import numpy as np
import tensorflow as tf
from ncem.estimators import EstimatorNoGraph
from ncem.models import ModelCVAE
class EstimatorCVAE(EstimatorNoGraph):
"""Estimator class for conditional variational autoencoder models. Subclass of EstimatorNoGraph."""
def __init__(
self,
use_type_cond: bool = True,
log_transform: bool = False,
):
"""
Initialize a EstimatorCVAE object.
Parameters
----------
use_type_cond : bool
whether to use the categorical cell type label in conditional.
log_transform : bool
Whether to log transform h_1.
"""
super(EstimatorCVAE, self).__init__()
self.adj_type = "none"
self.model_type = "cvae"
self.use_type_cond = use_type_cond
self.log_transform = log_transform
def init_model(
self,
optimizer: str = "adam",
learning_rate: float = 0.0001,
latent_dim: int = 10,
intermediate_dim_enc: int = 128,
intermediate_dim_dec: int = 128,
depth_enc: int = 1,
depth_dec: int = 1,
dropout_rate: float = 0.1,
l2_coef: float = 0.0,
l1_coef: float = 0.0,
n_eval_nodes_per_graph: int = 10,
use_domain: bool = False,
use_batch_norm: bool = False,
scale_node_size: bool = True,
transform_input: bool = False,
beta: float = 0.01,
max_beta: float = 1.0,
pre_warm_up: int = 0,
output_layer: str = "gaussian",
**kwargs
):
"""
Initialize a ModelCVAE object.
Parameters
----------
optimizer : str
Optimizer.
learning_rate : float
Learning rate.
latent_dim : int
Latent dimension.
dropout_rate : float
Dropout rate.
l2_coef : float
l2 regularization coefficient.
l1_coef : float
l1 regularization coefficient.
intermediate_dim_enc : int
Encoder intermediate dimension.
depth_enc : int
Encoder depth.
intermediate_dim_dec : int
Decoder intermediate dimension.
depth_dec : int
Decoder depth.
n_eval_nodes_per_graph : int
Number of nodes per graph.
use_domain : bool
Whether to use domain information.
use_batch_norm : bool
Whether to use batch normalization.
scale_node_size : bool
Whether to scale output layer by node sizes.
transform_input : bool
Whether to transform input.
beta : float
Beta used in BetaScheduler.
max_beta : float
Maximal beta used in BetaScheduler.
pre_warm_up : int
Number of epochs in pre warm up.
output_layer : str
Output layer.
kwargs
Arbitrary keyword arguments.
"""
self.n_eval_nodes_per_graph = n_eval_nodes_per_graph
self.model = ModelCVAE(
input_shapes=(
self.n_features_0,
self.n_features_1,
self.max_nodes,
self.n_eval_nodes_per_graph,
self.n_node_covariates,
self.n_domains,
),
latent_dim=latent_dim,
intermediate_dim_enc=intermediate_dim_enc,
intermediate_dim_dec=intermediate_dim_dec,
depth_enc=depth_enc,
depth_dec=depth_dec,
dropout_rate=dropout_rate,
l2_coef=l2_coef,
l1_coef=l1_coef,
use_domain=use_domain,
use_type_cond=self.use_type_cond,
use_batch_norm=use_batch_norm,
scale_node_size=scale_node_size,
transform_input=transform_input,
output_layer=output_layer,
)
optimizer = tf.keras.optimizers.get(optimizer)
tf.keras.backend.set_value(optimizer.lr, learning_rate)
self.beta = beta
self.max_beta = max_beta
self.pre_warm_up = pre_warm_up
self._compile_model(optimizer=optimizer, output_layer=output_layer)
self.optimizer = optimizer
def evaluate_any_posterior_sampling(
self,
img_keys,
node_idx,
batch_size: int = 1,
):
"""
Evaluate model based on resampled dataset for posterior resampling.
node_1 + domain_1 -> encoder -> z_1 + domain_2 -> decoder -> reconstruction_2.
Parameters
----------
img_keys
Image keys in partition.
node_idx
Dictionary of nodes per image in partition.
batch_size : int
Batch size.
Returns
-------
Tuple of dictionary of evaluated metrics and latent space arrays (z, z_mean, z_log_var).
"""
# generating a resampled dataset for neighbourhood transfer evaluation
ds = self._get_resampled_dataset(image_keys=img_keys, nodes_idx=node_idx, batch_size=batch_size, seed=None)
eval_posterior = []
true = []
pred = []
latent_z = []
latent_z_mean = []
latent_z_log_var = []
for _step, (x_batch, _y_batch, resampled_x_batch, resampled_y_batch) in enumerate(ds):
(h, sf, node_covar, g) = x_batch
(h_resampled, sf_resampled, node_covar_resampled, g) = resampled_x_batch
z, z_mean, z_log_var = self.model.encoder((h, node_covar, g))
latent_z.append(z)
latent_z_mean.append(z_mean)
latent_z_log_var.append(z_log_var)
z = tf.reshape(z, [batch_size, self.n_eval_nodes_per_graph, -1])
results = self.model.decoder.evaluate(
(z, sf_resampled, node_covar_resampled, g),
resampled_y_batch,
)
prediction = self.model.decoder.predict((z, sf_resampled, node_covar_resampled, g))[0]
eval_posterior.append(results)
true.append(h_resampled.numpy().squeeze())
pred.append(prediction.squeeze())
eval_posterior = np.concatenate(np.expand_dims(eval_posterior, axis=0), axis=0)
eval_posterior = np.mean(eval_posterior, axis=0)
true = np.concatenate(true, axis=0)
pred = np.split(np.concatenate(pred, axis=0), indices_or_sections=2, axis=-1)[0]
latent_z =
|
np.concatenate(latent_z, axis=0)
|
numpy.concatenate
|
from collections import OrderedDict
import copy
import getpass
import itertools
import numpy as np
from scipy import signal
import time
LOCAL_MODE = getpass.getuser() == 'tom'
CONFIG = {
'halite_config_setting_divisor': 1.0,
'collect_smoothed_multiplier': 0.0,
'collect_actual_multiplier': 5.0,
'collect_less_halite_ships_multiplier_base': 0.55,
'collect_base_nearest_distance_exponent': 0.2,
'return_base_multiplier': 8.0,
'return_base_less_halite_ships_multiplier_base': 0.85,
'early_game_return_base_additional_multiplier': 0.1,
'early_game_return_boost_step': 50,
'establish_base_smoothed_multiplier': 0.0,
'establish_first_base_smoothed_multiplier_correction': 2.0,
'establish_base_dm_exponent': 1.1,
'first_base_no_4_way_camping_spot_bonus': 300*0,
'start_camp_if_not_winning': 0,
'max_camper_ship_budget': 2*1,
'relative_step_start_camping': 0.15,
'establish_base_deposit_multiplier': 1.0,
'establish_base_less_halite_ships_multiplier_base': 1.0,
'max_attackers_per_base': 3*1,
'attack_base_multiplier': 300.0,
'attack_base_less_halite_ships_multiplier_base': 0.9,
'attack_base_halite_sum_multiplier': 2.0,
'attack_base_run_opponent_multiplier': 1.0,
'attack_base_catch_opponent_multiplier': 1.0,
'collect_run_opponent_multiplier': 10.0,
'return_base_run_opponent_multiplier': 2.5,
'establish_base_run_opponent_multiplier': 2.5,
'collect_catch_opponent_multiplier': 1.0,
'return_base_catch_opponent_multiplier': 1.0,
'establish_base_catch_opponent_multiplier': 0.5,
'two_step_avoid_boxed_opponent_multiplier_base': 0.7,
'n_step_avoid_boxed_opponent_multiplier_base': 0.45,
'min_consecutive_chase_extrapolate': 6,
'chase_return_base_exponential_bonus': 2.0,
'ignore_catch_prob': 0.3,
'max_initial_ships': 60,
'max_final_ships': 60,
'max_standard_ships_decided_end_pack_hunting': 2,
'nearby_ship_halite_spawn_constant': 3.0,
'nearby_halite_spawn_constant': 5.0,
'remaining_budget_spawn_constant': 0.2,
'spawn_score_threshold': 75.0,
'boxed_in_halite_convert_divisor': 1.0,
'n_step_avoid_min_die_prob_cutoff': 0.05,
'n_step_avoid_window_size': 7,
'influence_map_base_weight': 2.0,
'influence_map_min_ship_weight': 0.0,
'influence_weights_additional_multiplier': 2.0,
'influence_weights_exponent': 8.0,
'escape_influence_prob_divisor': 3.0,
'rescue_ships_in_trouble': 1,
'target_strategic_base_distance': 8.0,
'target_strategic_num_bases_ship_divisor': 9,
'target_strategic_triangle_weight': 20.0, # initially: 20
'target_strategic_independent_base_distance_multiplier': 8.0, # initially 8.0
'target_strategic_influence_desirability_multiplier': 1.0, # initially: 1.0
'target_strategic_potential_divisor': 15.0, # initially: 15.0
'max_spawn_relative_step_divisor': 12.0,
'no_spawn_near_base_ship_limit': 100,
'avoid_cycles': 1,
'max_risk_n_step_risky': 0.5,
'max_steps_n_step_risky': 70,
'log_near_base_distance': 2,
'max_recent_considered_relevant_zero_move_count': 120,
'near_base_2_step_risky_min_count': 50,
'relative_stand_still_collect_boost': 1.5,
'initial_collect_boost_away_from_base': 2.0,
'start_hunting_season_relative_step': 0.1875,
'end_hunting_season_relative_step': 0.75,
'early_hunting_season_less_collect_relative_step': 0.375,
'max_standard_ships_early_hunting_season': 2,
'late_hunting_season_more_collect_relative_step': 0.5,
'late_hunting_season_collect_max_n_step_risk': 0.2,
'after_hunting_season_collect_max_n_step_risk': 0.5,
'late_hunting_season_standard_min_fraction': 0.7,
'max_standard_ships_late_hunting_season': 15,
'collect_on_safe_return_relative_step': 0.075,
'min_halite_to_stop_early_hunt': 15000.0,
'early_best_opponent_relative_step': 0.5,
'surrounding_ships_cycle_extrapolate_step_count': 5,
'surrounding_ships_extended_cycle_extrapolate_step_count': 7,
}
NORTH = "NORTH"
SOUTH = "SOUTH"
EAST = "EAST"
WEST = "WEST"
CONVERT = "CONVERT"
SPAWN = "SPAWN"
NOT_NONE_DIRECTIONS = [NORTH, SOUTH, EAST, WEST]
MOVE_DIRECTIONS = [None, NORTH, SOUTH, EAST, WEST]
MOVE_DIRECTIONS_TO_ID = {None: 0, NORTH: 1, SOUTH: 2, EAST: 3, WEST: 4}
RELATIVE_DIR_MAPPING = {None: (0, 0), NORTH: (-1, 0), SOUTH: (1, 0),
EAST: (0, 1), WEST: (0, -1)}
RELATIVE_DIR_TO_DIRECTION_MAPPING = {
v: k for k, v in RELATIVE_DIR_MAPPING.items()}
OPPOSITE_MAPPING = {None: None, NORTH: SOUTH, SOUTH: NORTH, EAST: WEST,
WEST: EAST}
RELATIVE_DIRECTIONS = [(-1, 0), (1, 0), (0, -1), (0, 1), (0, 0)]
RELATIVE_NOT_NONE_DIRECTIONS = [(-1, 0), (1, 0), (0, -1), (0, 1)]
MOVE_GATHER_OPTIONS = [(-1, 0, False), (1, 0, False), (0, -1, False),
(0, 1, False), (0, 0, True)]
TWO_STEP_THREAT_DIRECTIONS = {
(-2, 0): [(-1, 0)],
(-1, -1): [(-1, 0), (0, -1)],
(-1, 0): [(-1, 0), (0, 0)],
(-1, 1): [(-1, 0), (0, 1)],
(0, -2): [(0, -1)],
(0, -1): [(0, -1), (0, 0)],
(0, 1): [(0, 1), (0, 0)],
(0, 2): [(0, 1)],
(1, -1): [(1, 0), (0, -1)],
(1, 0): [(1, 0), (0, 0)],
(1, 1): [(1, 0),(0, 1)],
(2, 0): [(1, 0)],
}
GAUSSIAN_2D_KERNELS = {}
for dim in range(3, 20, 2):
# Modified from https://scipy-lectures.org/intro/scipy/auto_examples/solutions/plot_image_blur.html
center_distance = np.floor(np.abs(np.arange(dim) - (dim-1)/2))
horiz_distance = np.tile(center_distance, [dim, 1])
vert_distance = np.tile(np.expand_dims(center_distance, 1), [1, dim])
manh_distance = horiz_distance + vert_distance
kernel = np.exp(-manh_distance/(dim/4))
kernel[manh_distance > dim/2] = 0
GAUSSIAN_2D_KERNELS[dim] = kernel
DISTANCES = {}
DISTANCE_MASKS = {}
HALF_PLANES_CATCH = {}
HALF_PLANES_RUN = {}
ROW_COL_DISTANCE_MASKS = {}
ROW_COL_MAX_DISTANCE_MASKS = {}
ROW_COL_BOX_MAX_DISTANCE_MASKS = {}
ROW_COL_BOX_DIR_MAX_DISTANCE_MASKS = {}
BOX_DIR_MAX_DISTANCE = 4
BOX_DIRECTION_MASKS = {}
ROW_MASK = {}
COLUMN_MASK = {}
DISTANCE_MASK_DIM = 21
half_distance_mask_dim = int(DISTANCE_MASK_DIM/2)
for row in range(DISTANCE_MASK_DIM):
row_mask = np.zeros((DISTANCE_MASK_DIM, DISTANCE_MASK_DIM), dtype=np.bool)
row_mask[row] = 1
col_mask = np.zeros((DISTANCE_MASK_DIM, DISTANCE_MASK_DIM), dtype=np.bool)
col_mask[:, row] = 1
ROW_MASK [row] = row_mask
COLUMN_MASK[row] = col_mask
for col in range(DISTANCE_MASK_DIM):
horiz_distance = np.minimum(
np.abs(np.arange(DISTANCE_MASK_DIM) - col),
np.abs(np.arange(DISTANCE_MASK_DIM) - col - DISTANCE_MASK_DIM))
horiz_distance = np.minimum(
horiz_distance,
np.abs(np.arange(DISTANCE_MASK_DIM) - col + DISTANCE_MASK_DIM))
vert_distance = np.minimum(
np.abs(np.arange(DISTANCE_MASK_DIM) - row),
np.abs(np.arange(DISTANCE_MASK_DIM) - row - DISTANCE_MASK_DIM))
vert_distance = np.minimum(
vert_distance,
np.abs(np.arange(DISTANCE_MASK_DIM) - row + DISTANCE_MASK_DIM))
horiz_distance = np.tile(horiz_distance, [DISTANCE_MASK_DIM, 1])
vert_distance = np.tile(np.expand_dims(vert_distance, 1),
[1, DISTANCE_MASK_DIM])
manh_distance = horiz_distance + vert_distance
kernel = np.exp(-manh_distance/(DISTANCE_MASK_DIM/4))
DISTANCE_MASKS[(row, col)] = kernel
DISTANCES[(row, col)] = manh_distance
catch_distance_masks = {}
run_distance_masks = {}
for d in MOVE_DIRECTIONS:
if d is None:
catch_rows = np.array([]).astype(np.int)
catch_cols = np.array([]).astype(np.int)
if d == NORTH:
catch_rows = np.mod(row - np.arange(half_distance_mask_dim) - 1,
DISTANCE_MASK_DIM)
catch_cols = np.arange(DISTANCE_MASK_DIM)
box_dir_rows = np.mod(row + np.arange(BOX_DIR_MAX_DISTANCE) + 1,
DISTANCE_MASK_DIM)
box_dir_cols = np.mod(col + np.arange(
2*(BOX_DIR_MAX_DISTANCE+1)-1) - BOX_DIR_MAX_DISTANCE,
DISTANCE_MASK_DIM)
if d == SOUTH:
catch_rows = np.mod(row + np.arange(half_distance_mask_dim) + 1,
DISTANCE_MASK_DIM)
catch_cols = np.arange(DISTANCE_MASK_DIM)
box_dir_rows = np.mod(row - np.arange(BOX_DIR_MAX_DISTANCE) - 1,
DISTANCE_MASK_DIM)
box_dir_cols = np.mod(col + np.arange(
2*(BOX_DIR_MAX_DISTANCE+1)-1) - BOX_DIR_MAX_DISTANCE,
DISTANCE_MASK_DIM)
if d == WEST:
catch_cols = np.mod(col - np.arange(half_distance_mask_dim) - 1,
DISTANCE_MASK_DIM)
catch_rows = np.arange(DISTANCE_MASK_DIM)
box_dir_cols = np.mod(col + np.arange(BOX_DIR_MAX_DISTANCE) + 1,
DISTANCE_MASK_DIM)
box_dir_rows = np.mod(row + np.arange(
2*(BOX_DIR_MAX_DISTANCE+1)-1) - BOX_DIR_MAX_DISTANCE,
DISTANCE_MASK_DIM)
if d == EAST:
catch_cols = np.mod(col + np.arange(half_distance_mask_dim) + 1,
DISTANCE_MASK_DIM)
catch_rows = np.arange(DISTANCE_MASK_DIM)
box_dir_cols = np.mod(col - np.arange(BOX_DIR_MAX_DISTANCE) - 1,
DISTANCE_MASK_DIM)
box_dir_rows = np.mod(row + np.arange(
2*(BOX_DIR_MAX_DISTANCE+1)-1) - BOX_DIR_MAX_DISTANCE,
DISTANCE_MASK_DIM)
catch_mask = np.zeros((DISTANCE_MASK_DIM, DISTANCE_MASK_DIM),
dtype=np.bool)
catch_mask[catch_rows[:, None], catch_cols] = 1
run_mask = np.copy(catch_mask)
run_mask[row, col] = 1
catch_distance_masks[d] = catch_mask
run_distance_masks[d] = run_mask
if d is not None:
box_dir_mask = np.zeros((DISTANCE_MASK_DIM, DISTANCE_MASK_DIM),
dtype=np.bool)
box_dir_mask[box_dir_rows[:, None], box_dir_cols] = 1
if d in [NORTH, SOUTH]:
box_dir_mask &= (horiz_distance <= vert_distance)
else:
box_dir_mask &= (horiz_distance >= vert_distance)
ROW_COL_BOX_DIR_MAX_DISTANCE_MASKS[(row, col, d)] = box_dir_mask
HALF_PLANES_CATCH[(row, col)] = catch_distance_masks
HALF_PLANES_RUN[(row, col)] = run_distance_masks
for d in range(1, DISTANCE_MASK_DIM):
ROW_COL_DISTANCE_MASKS[(row, col, d)] = manh_distance == d
for d in range(half_distance_mask_dim):
ROW_COL_MAX_DISTANCE_MASKS[(row, col, d)] = manh_distance <= d
ROW_COL_BOX_MAX_DISTANCE_MASKS[(row, col, d)] = np.logical_and(
horiz_distance <= d, vert_distance <= d)
for dist in range(2, half_distance_mask_dim+1):
dist_mask_dim = dist*2+1
row_pos = np.tile(np.expand_dims(np.arange(dist_mask_dim), 1),
[1, dist_mask_dim])
col_pos = np.tile(np.arange(dist_mask_dim), [dist_mask_dim, 1])
for direction in NOT_NONE_DIRECTIONS:
if direction == NORTH:
box_mask = (row_pos < dist) & (
np.abs(col_pos-dist) <= (dist-row_pos))
if direction == SOUTH:
box_mask = (row_pos > dist) & (
np.abs(col_pos-dist) <= (row_pos-dist))
if direction == WEST:
box_mask = (col_pos < dist) & (
np.abs(row_pos-dist) <= (dist-col_pos))
if direction == EAST:
box_mask = (col_pos > dist) & (
np.abs(row_pos-dist) <= (col_pos-dist))
BOX_DIRECTION_MASKS[(dist, direction)] = box_mask
CONSIDERED_OTHER_DISTANCES = [13]
OTHER_DISTANCES = {}
for other_distance in CONSIDERED_OTHER_DISTANCES:
for row in range(other_distance):
for col in range(other_distance):
horiz_distance = np.minimum(
np.abs(np.arange(other_distance) - col),
np.abs(np.arange(other_distance) - col - other_distance))
horiz_distance = np.minimum(
horiz_distance,
np.abs(np.arange(other_distance) - col + other_distance))
vert_distance = np.minimum(
np.abs(np.arange(other_distance) - row),
np.abs(np.arange(other_distance) - row - other_distance))
vert_distance = np.minimum(
vert_distance,
np.abs(np.arange(other_distance) - row + other_distance))
horiz_distance = np.tile(horiz_distance, [other_distance, 1])
vert_distance = np.tile(np.expand_dims(vert_distance, 1),
[1, other_distance])
manh_distance = horiz_distance + vert_distance
OTHER_DISTANCES[(row, col, other_distance)] = manh_distance
D2_ROW_COL_SHIFTS_DISTANCES = [
(-2, 0, 2),
(-1, -1, 2), (-1, 0, 1), (-1, 1, 2),
(0, -2, 2), (0, -1, 1), (0, 1, 1), (0, 2, 2),
(1, -1, 2), (1, 0, 1), (1, 1, 2),
(2, 0, 2),
]
def row_col_from_square_grid_pos(pos, size):
col = pos % size
row = pos // size
return row, col
def move_ship_row_col(row, col, direction, size):
if direction == "NORTH":
return (size-1 if row == 0 else row-1, col)
elif direction == "SOUTH":
return (row+1 if row < (size-1) else 0, col)
elif direction == "EAST":
return (row, col+1 if col < (size-1) else 0)
elif direction == "WEST":
return (row, size-1 if col == 0 else col-1)
elif direction is None:
return (row, col)
def get_directional_distance(r1, c1, r2, c2, size, d):
relative_pos = get_relative_position(r1, c1, r2, c2, size)
if d == NORTH:
directional_distance = -relative_pos[0]
elif d == SOUTH:
directional_distance = relative_pos[0]
elif d == EAST:
directional_distance = relative_pos[1]
elif d == WEST:
directional_distance = -relative_pos[1]
return directional_distance
def mirror_edges(observation, num_mirror_dim):
if num_mirror_dim > 0:
# observation = np.arange(225).reshape((15,15)) # Debugging test
assert len(observation.shape) == 2
grid_size = observation.shape[0]
new_grid_size = grid_size + 2*num_mirror_dim
mirrored_obs = np.full((new_grid_size, new_grid_size), np.nan)
# Fill in the original data
mirrored_obs[num_mirror_dim:(-num_mirror_dim),
num_mirror_dim:(-num_mirror_dim)] = observation
# Add top and bottom mirrored data
mirrored_obs[:num_mirror_dim, num_mirror_dim:(
-num_mirror_dim)] = observation[-num_mirror_dim:, :]
mirrored_obs[-num_mirror_dim:, num_mirror_dim:(
-num_mirror_dim)] = observation[:num_mirror_dim, :]
# Add left and right mirrored data
mirrored_obs[:, :num_mirror_dim] = mirrored_obs[
:, -(2*num_mirror_dim):(-num_mirror_dim)]
mirrored_obs[:, -num_mirror_dim:] = mirrored_obs[
:, num_mirror_dim:(2*num_mirror_dim)]
observation = mirrored_obs
return observation
def smooth2d(grid, smooth_kernel_dim=7, return_kernel=False):
edge_augmented = mirror_edges(grid, smooth_kernel_dim-1)
kernel = GAUSSIAN_2D_KERNELS[int(2*smooth_kernel_dim-1)]
convolved = signal.convolve2d(edge_augmented, kernel, mode="valid")
if return_kernel:
return convolved, kernel
else:
return convolved
def get_relative_position(row, col, other_row, other_col, size):
if row >= other_row:
if (other_row + size - row) < (row - other_row):
row_diff = (other_row + size - row)
else:
row_diff = -(row - other_row)
else:
if (row + size - other_row) < (other_row - row):
row_diff = -(row + size - other_row)
else:
row_diff = other_row - row
if col >= other_col:
if (other_col + size - col) < (col - other_col):
col_diff = (other_col + size - col)
else:
col_diff = -(col - other_col)
else:
if (col + size - other_col) < (other_col - col):
col_diff = -(col + size - other_col)
else:
col_diff = other_col - col
return (row_diff, col_diff)
def update_scores_opponent_ships(
config, collect_grid_scores, return_to_base_scores, establish_base_scores,
attack_base_scores, opponent_ships, opponent_bases, halite_ships, row, col,
grid_size, spawn_cost, drop_None_valid, obs_halite, collect_rate, np_rng,
opponent_ships_sensible_actions, opponent_ships_sensible_actions_no_risk,
ignore_bad_attack_directions, observation, ship_k, my_bases, my_ships,
steps_remaining, history, escape_influence_probs, player_ids, env_obs_ids,
env_observation, main_base_distances, nearest_base_distances,
end_game_base_return, camping_override_strategy,
attack_campers_override_strategy, boxed_in_attack_squares,
safe_to_collect, boxed_in_zero_halite_opponents, ignore_convert_positions,
avoid_attack_squares_zero_halite, n_step_avoid_min_die_prob_cutoff,
safe_to_return_halites, safe_to_return_base_halites,
my_nearest_base_distances):
direction_halite_diff_distance_raw = {
NORTH: [], SOUTH: [], EAST: [], WEST: []}
my_bases_or_ships = np.logical_or(my_bases, my_ships)
chase_details = history['chase_counter'][0].get(ship_k, None)
take_my_square_next_halite_diff = None
take_my_next_square_dir = None
wide_cycle_mask = ROW_COL_MAX_DISTANCE_MASKS[row, col, 3]
tight_cycle_mask = ROW_COL_MAX_DISTANCE_MASKS[row, col, 2]
opponents_in_cycle = np.any(opponent_ships[tight_cycle_mask]) and (
np.all(history['empty_or_cycled_positions'][wide_cycle_mask]) or (
np.all(history['empty_or_extended_cycled_positions'][tight_cycle_mask])))
if opponents_in_cycle:
print("EXTRAPOLATING OPPONENT CYCLIC BEHAVIOR", observation['step'], row,
col)
if len(camping_override_strategy) == 0:
navigation_zero_halite_risk_threshold = 0
else:
navigation_zero_halite_risk_threshold = camping_override_strategy[0]
if camping_override_strategy[1].max() >= 1e4:
collect_grid_scores = 1e-4*collect_grid_scores + (
camping_override_strategy[1])
else:
collect_grid_scores += camping_override_strategy[1]
attack_base_scores += camping_override_strategy[2]
if len(attack_campers_override_strategy) > 0:
ignore_opponent_row = attack_campers_override_strategy[0]
ignore_opponent_col = attack_campers_override_strategy[1]
ignore_opponent_distance = attack_campers_override_strategy[5]
collect_grid_scores[ignore_opponent_row, ignore_opponent_col] += (
attack_campers_override_strategy[2])
navigation_zero_halite_risk_threshold = max(
navigation_zero_halite_risk_threshold,
attack_campers_override_strategy[6])
else:
ignore_opponent_row = None
ignore_opponent_col = None
ignore_opponent_distance = None
# Identify directions where I can certainly reach the base in time and always
# mark them as valid
ship_halite = halite_ships[row, col]
safe_return_base_directions = []
if ship_halite < safe_to_return_halites[row, col]:
for base_safe_return_halite, base_location in safe_to_return_base_halites:
if ship_halite < base_safe_return_halite[row, col]:
for d in get_dir_from_target(
row, col, base_location[0], base_location[1], grid_size):
if not d is None and not d in safe_return_base_directions:
safe_return_base_directions.append(d)
# if observation['step'] == 131 and ship_k in ['63-1']:
# import pdb; pdb.set_trace()
can_stay_still_zero_halite = True
for row_shift, col_shift, distance in D2_ROW_COL_SHIFTS_DISTANCES:
considered_row = (row + row_shift) % grid_size
considered_col = (col + col_shift) % grid_size
if opponent_ships[considered_row, considered_col] and (
ignore_opponent_row is None or (((
considered_row != ignore_opponent_row) or (
considered_col != ignore_opponent_col)) and (
ignore_opponent_distance > 2))):
relevant_dirs = []
halite_diff = halite_ships[row, col] - halite_ships[
considered_row, considered_col]
assume_take_my_square_next = False
# if observation['step'] == 266 and row == 11 and col == 15:
# import pdb; pdb.set_trace()
# Extrapolate the opponent behavior if we have been chased for a
# while and chasing is likely to continue
if distance == 1 and chase_details is not None and (
chase_details[1] >= config[
'min_consecutive_chase_extrapolate']) and (
considered_row, considered_col) == (
chase_details[4], chase_details[5]):
chaser_row = chase_details[4]
chaser_col = chase_details[5]
to_opponent_dir = get_dir_from_target(
row, col, chaser_row, chaser_col, grid_size)[0]
opp_to_me_dir = OPPOSITE_MAPPING[to_opponent_dir]
rel_opp_to_me_dir = RELATIVE_DIR_MAPPING[opp_to_me_dir]
opp_can_move_to_me = rel_opp_to_me_dir in (
opponent_ships_sensible_actions_no_risk[chaser_row, chaser_col])
# There is a unique opponent id with the least amount of halite
# on the chaser square or the chaser has at least one friendly
# ship that can replace it
chaser_can_replace = None
chaser_is_chased_by_not_me = None
if opp_can_move_to_me:
chaser_id = player_ids[chaser_row, chaser_col]
near_chaser = ROW_COL_MAX_DISTANCE_MASKS[
chaser_row, chaser_col, 1]
near_halite = halite_ships[near_chaser]
near_chaser_friendly_halite = near_halite[
(near_halite >= 0) & (player_ids[near_chaser] == chaser_id)]
min_non_chaser_halite = near_halite[
(near_halite >= 0) & (
player_ids[near_chaser] != chaser_id)].min()
min_near_chaser_halite = near_halite[near_halite >= 0].min()
opponent_min_hal_ids = player_ids[np.logical_and(
near_chaser, halite_ships == min_near_chaser_halite)]
near_me = ROW_COL_MAX_DISTANCE_MASKS[row, col, 1]
near_me_threat_players = player_ids[np.logical_and(
near_me, (halite_ships >= 0) & (
halite_ships < halite_ships[row, col]))]
double_opp_chase = (near_me_threat_players.size > 1) and (
np.all(near_me_threat_players == chaser_id))
chaser_can_replace = ((opponent_min_hal_ids.size > 1) and (
np.all(opponent_min_hal_ids == chaser_id) or (
(opponent_min_hal_ids == chaser_id).sum() > 1)) or (
(near_chaser_friendly_halite <= (
min_non_chaser_halite)).sum() > 1)) or double_opp_chase
if opp_can_move_to_me and not chaser_can_replace:
chaser_players_index = env_obs_ids[chaser_id]
chaser_k = [k for k, v in env_observation.players[
chaser_players_index][2].items() if v[0] == (
chaser_row*grid_size + chaser_col)][0]
chaser_is_chased = chaser_k in history[
'chase_counter'][chaser_id]
chaser_is_chased_by_not_me = chaser_is_chased
if chaser_is_chased:
chaser_chaser = history['chase_counter'][chaser_id][chaser_k]
chaser_is_chased_by_not_me = (chaser_chaser[4] is None) or (
player_ids[chaser_chaser[4], chaser_chaser[5]] != 0)
if opp_can_move_to_me and not chaser_can_replace and not (
chaser_is_chased_by_not_me):
assume_take_my_square_next = True
take_my_square_next_halite_diff = halite_diff
take_my_next_square_dir = to_opponent_dir
# if observation['step'] == 96 and ship_k in ['80-1']:
# import pdb; pdb.set_trace()
can_ignore_ship = False
if (considered_row, considered_col) in boxed_in_zero_halite_opponents:
can_stay_still_zero_halite = can_stay_still_zero_halite and (
distance == 2)
else:
if halite_ships[row, col] == halite_ships[
considered_row, considered_col]:
opponent_id = player_ids[considered_row, considered_col]
# Note: use the opponent distance because the opponent model is
# learned using the opponent distance to the nearest base (with near
# base distance cutoff typically at 2)
is_near_base = nearest_base_distances[
considered_row, considered_col] <= config['log_near_base_distance']
risk_lookup_k = str(is_near_base) + '_' + str(distance)
if distance == 2:
can_ignore_ship = history['zero_halite_move_behavior'][
opponent_id][risk_lookup_k] <= (
navigation_zero_halite_risk_threshold)
else:
risk_lookup_k_dist_zero = str(is_near_base) + '_' + str(0)
d1_threat = history['zero_halite_move_behavior'][
opponent_id][risk_lookup_k] > (
navigation_zero_halite_risk_threshold)
d0_threat = history['zero_halite_move_behavior'][
opponent_id][risk_lookup_k_dist_zero] > (
navigation_zero_halite_risk_threshold)
can_stay_still_zero_halite = can_stay_still_zero_halite and (
not d0_threat)
# if is_near_base and history['zero_halite_move_behavior'][
# opponent_id][str(is_near_base) + '_' + str(0) + '_ever_risky']:
# import pdb; pdb.set_trace()
can_ignore_ship = not (d0_threat or d1_threat)
if not assume_take_my_square_next and not can_ignore_ship:
relevant_dirs += [] if row_shift >= 0 else [NORTH]
relevant_dirs += [] if row_shift <= 0 else [SOUTH]
relevant_dirs += [] if col_shift <= 0 else [EAST]
relevant_dirs += [] if col_shift >= 0 else [WEST]
# When the opponents are in a cycle: only consider the direction I
# expect my opponent to be at in the next step (if any)
if opponents_in_cycle:
relevant_dirs = []
opponent_ship_key = history['opponent_ship_pos_to_key'][(
considered_row, considered_col)]
opponent_id = player_ids[considered_row, considered_col]
likely_opponent_action = history['opponent_cycle_counters'][
opponent_id-1][opponent_ship_key][1][0]
likely_opponent_next_pos = move_ship_row_col(
considered_row, considered_col, likely_opponent_action, grid_size)
relative_other_pos = get_relative_position(
row, col, likely_opponent_next_pos[0], likely_opponent_next_pos[1],
grid_size)
current_opp_relative_dir = get_relative_position(
row, col, considered_row, considered_col, grid_size)
if np.abs(relative_other_pos[0]) + np.abs(
relative_other_pos[1]) <= 1:
# At distance 1 or 0
# import pdb; pdb.set_trace()
if relative_other_pos[0] == 0 and relative_other_pos[1] == 0:
relevant_dirs = [RELATIVE_DIR_TO_DIRECTION_MAPPING[
current_opp_relative_dir]]
elif relative_other_pos == (0, 0):
relevant_dirs = [RELATIVE_DIR_TO_DIRECTION_MAPPING[
relative_other_pos]]
# if observation['step'] == 215 and ship_k == '2-2':
# import pdb; pdb.set_trace()
for d in relevant_dirs:
direction_halite_diff_distance_raw[d].append(
(halite_diff, distance))
direction_halite_diff_distance = {}
for d in direction_halite_diff_distance_raw:
vals = np.array(direction_halite_diff_distance_raw[d])
if vals.size:
diffs = vals[:, 0]
distances = vals[:, 1]
max_diff = diffs.max()
if max_diff > 0:
if can_stay_still_zero_halite:
greater_min_distance = distances[diffs > 0].min()
else:
# My halite is > 0 and I have a threat at D1 of an aggressive equal
# halite ships and a threat of a less halite ship at D2
greater_min_distance = distances[diffs >= 0].min()
direction_halite_diff_distance[d] = (max_diff, greater_min_distance)
elif max_diff == 0:
equal_min_distance = distances[diffs == 0].min()
direction_halite_diff_distance[d] = (max_diff, equal_min_distance)
else:
min_diff = diffs.min()
min_diff_min_distance = distances[diffs == min_diff].min()
direction_halite_diff_distance[d] = (min_diff, min_diff_min_distance)
else:
direction_halite_diff_distance[d] = None
preferred_directions = []
strongly_preferred_directions = []
valid_directions = copy.copy(MOVE_DIRECTIONS)
one_step_valid_directions = copy.copy(MOVE_DIRECTIONS)
bad_directions = []
ignore_catch = np_rng.uniform() < config['ignore_catch_prob']
# if observation['step'] == 221 and ship_k == '54-1':
# import pdb; pdb.set_trace()
# x=1
for direction, halite_diff_dist in direction_halite_diff_distance.items():
if halite_diff_dist is not None:
move_row, move_col = move_ship_row_col(row, col, direction, grid_size)
no_escape_bonus = 0 if not (
boxed_in_attack_squares[move_row, move_col]) else 5e3
halite_diff = halite_diff_dist[0]
if halite_diff >= 0:
# I should avoid a collision
distance_multiplier = 1/halite_diff_dist[1]
mask_collect_return = np.copy(HALF_PLANES_RUN[(row, col)][direction])
valid_directions.remove(direction)
one_step_valid_directions.remove(direction)
bad_directions.append(direction)
if halite_diff_dist[1] == 1:
if halite_diff > 0 or not can_stay_still_zero_halite:
# Only suppress the stay still action if the opponent has something
# to gain.
# Exception: the opponent may aggressively attack my zero halite
# ships
if None in valid_directions:
valid_directions.remove(None)
one_step_valid_directions.remove(None)
bad_directions.append(None)
else:
mask_collect_return[row, col] = False
# I can safely mine halite at the current square if the opponent ship
# is >1 move away
if halite_diff_dist[1] > 1:
mask_collect_return[row, col] = False
collect_grid_scores -= mask_collect_return*(ship_halite+spawn_cost)*(
config['collect_run_opponent_multiplier'])*distance_multiplier
return_to_base_scores -= mask_collect_return*(ship_halite+spawn_cost)*(
config['return_base_run_opponent_multiplier'])
base_nearby_in_direction_mask = np.logical_and(
ROW_COL_MAX_DISTANCE_MASKS[(row, col, 2)], mask_collect_return)
base_nearby_in_direction = np.logical_and(
base_nearby_in_direction_mask, opponent_bases).sum() > 0
if not ignore_bad_attack_directions and not base_nearby_in_direction:
attack_base_scores -= mask_collect_return*(ship_halite+spawn_cost)*(
config['attack_base_run_opponent_multiplier'])
mask_establish = np.copy(mask_collect_return)
mask_establish[row, col] = False
establish_base_scores -= mask_establish*(ship_halite+spawn_cost)*(
config['establish_base_run_opponent_multiplier'])
elif halite_diff < 0 and (
not ignore_catch or no_escape_bonus > 0) and (not (
move_row, move_col) in ignore_convert_positions):
# I would like a collision unless if there is another opponent ship
# chasing me - risk avoiding policy for now: if there is at least
# one ship in a direction that has less halite, I should avoid it
if no_escape_bonus > 0:
halite_diff = max(-spawn_cost/2, halite_diff) - no_escape_bonus
else:
halite_diff = 0 # Dubious choice, likely not very important
# halite_diff = max(-spawn_cost/2, halite_diff) - no_escape_bonus
distance_multiplier = 1/halite_diff_dist[1]
mask_collect_return = np.copy(HALF_PLANES_CATCH[(row, col)][direction])
collect_grid_scores -= mask_collect_return*halite_diff*(
config['collect_catch_opponent_multiplier'])*distance_multiplier
return_to_base_scores -= mask_collect_return*halite_diff*(
config['return_base_catch_opponent_multiplier'])*distance_multiplier
attack_base_scores -= mask_collect_return*halite_diff*(
config['attack_base_catch_opponent_multiplier'])*distance_multiplier
mask_establish = np.copy(mask_collect_return)
mask_establish[row, col] = False
establish_base_scores -= mask_establish*halite_diff*(
config['establish_base_catch_opponent_multiplier'])*(
distance_multiplier)
if no_escape_bonus > 0:
strongly_preferred_directions.append(direction)
if boxed_in_attack_squares[row, col] and no_escape_bonus > 0 and (
ship_halite > 0 or obs_halite[row, col] == 0):
# Also incentivize the None action when it is a possible escape
# square of an opponent - divide by 2 to make the None action less
# dominant (likely check in several directions)
collect_grid_scores[row, col] += no_escape_bonus/2
if not None in strongly_preferred_directions:
strongly_preferred_directions.append(None)
preferred_directions.append(direction)
if take_my_square_next_halite_diff is not None and None in valid_directions:
valid_directions.remove(None)
one_step_valid_directions.remove(None)
bad_directions.append(None)
if drop_None_valid and None in valid_directions:
valid_directions.remove(None)
one_step_valid_directions.remove(None)
valid_non_base_directions = []
base_directions = []
for d in valid_directions:
move_row, move_col = move_ship_row_col(row, col, d, grid_size)
if not opponent_bases[move_row, move_col] :
valid_non_base_directions.append(d)
else:
base_directions.append(d)
# For the remaining valid non base directions: compute a score that resembles
# the probability of being boxed in during the next step
two_step_bad_directions = []
n_step_bad_directions = []
n_step_bad_directions_die_probs = {}
if steps_remaining > 1:
for d in valid_non_base_directions:
my_next_halite = halite_ships[row, col] if d != None else (
halite_ships[row, col] + int(collect_rate*obs_halite[row, col]))
move_row, move_col = move_ship_row_col(row, col, d, grid_size)
my_next_halite = 0 if my_bases[move_row, move_col] else my_next_halite
opponent_mask = ROW_COL_MAX_DISTANCE_MASKS[(move_row, move_col, 3)]
less_halite_threat_opponents = np.where(np.logical_and(
opponent_mask, np.logical_and(
opponent_ships, my_next_halite > halite_ships)))
num_threat_ships = less_halite_threat_opponents[0].size
if num_threat_ships > 1 and not d in safe_return_base_directions:
all_dir_threat_counter = {
(-1, 0): 0, (1, 0): 0, (0, -1): 0, (0, 1): 0, (0, 0): 0}
for i in range(num_threat_ships):
other_row = less_halite_threat_opponents[0][i]
other_col = less_halite_threat_opponents[1][i]
relative_other_pos = get_relative_position(
move_row, move_col, other_row, other_col, grid_size)
for diff_rel_row, diff_rel_col, other_gather in MOVE_GATHER_OPTIONS:
# Only consider sensible opponent actions
if (diff_rel_row, diff_rel_col) in opponent_ships_sensible_actions[
other_row, other_col]:
is_threat = (not other_gather) or (my_next_halite > (
halite_ships[other_row, other_col] + int(
collect_rate*obs_halite[other_row, other_col])))
if is_threat:
other_rel_row = relative_other_pos[0] + diff_rel_row
other_rel_col = relative_other_pos[1] + diff_rel_col
move_diff = np.abs(other_rel_row) + np.abs(other_rel_col)
if move_diff < 3 and move_diff > 0:
threat_dirs = TWO_STEP_THREAT_DIRECTIONS[
(other_rel_row, other_rel_col)]
for threat_row_diff, threat_col_diff in threat_dirs:
all_dir_threat_counter[
(threat_row_diff, threat_col_diff)] += 1
# if observation['step'] == 112 and ship_k == '76-1':
# import pdb; pdb.set_trace()
# Aggregate the threat count in all_dir_threat_counter
threat_counts = np.array(list(all_dir_threat_counter.values()))
threat_score = np.sqrt(threat_counts.prod())
if threat_score > 0:
# Disincentivize an action that can get me boxed in on the next step
mask_avoid_two_steps = np.copy(HALF_PLANES_RUN[(row, col)][d])
if d is not None:
mask_avoid_two_steps[row, col] = False
collect_grid_scores[mask_avoid_two_steps] *= ((
config['two_step_avoid_boxed_opponent_multiplier_base']) ** (
threat_score))
return_to_base_scores[mask_avoid_two_steps] *= ((
config['two_step_avoid_boxed_opponent_multiplier_base']) ** (
threat_score))
establish_base_scores[mask_avoid_two_steps] *= ((
config['two_step_avoid_boxed_opponent_multiplier_base']) ** (
threat_score))
two_step_bad_directions.append(d)
if d not in two_step_bad_directions and not end_game_base_return and (
my_next_halite > 0) and (not d in safe_return_base_directions) and (
d is not None or not safe_to_collect[row, col]):
# For the remaining valid directions: compute a score that resembles
# the probability of being boxed in sometime in the future
opponent_mask_lt = ROW_COL_MAX_DISTANCE_MASKS[
(move_row, move_col, min(
steps_remaining, config['n_step_avoid_window_size']))]
less_halite_threat_opponents_lt = np.where(np.logical_and(
opponent_mask_lt, np.logical_and(
opponent_ships, my_next_halite > halite_ships)))
num_threat_ships_lt = less_halite_threat_opponents_lt[0].size
# Ignore the box in threat if I have a base and at least one zero
# halite ship one step from the move square
ignore_threat = my_bases[
ROW_COL_DISTANCE_MASKS[(move_row, move_col, 1)]].sum() > 0 and ((
halite_ships[np.logical_and(
my_ships,
ROW_COL_DISTANCE_MASKS[move_row, move_col, 1])] == 0).sum() > 0)
# if observation['step'] == 359 and ship_k == '67-1':
# import pdb; pdb.set_trace()
if not ignore_threat:
lt_catch_prob = {k: [] for k in RELATIVE_NOT_NONE_DIRECTIONS}
for i in range(num_threat_ships_lt):
other_row = less_halite_threat_opponents_lt[0][i]
other_col = less_halite_threat_opponents_lt[1][i]
other_sensible_actions = opponent_ships_sensible_actions[
other_row, other_col]
relative_other_pos = get_relative_position(
move_row, move_col, other_row, other_col, grid_size)
# Give less weight to the other ship if there is a base of mine or
# a/multiple less halite ships in between
# FUTURE WORK: Also give additional move leeway if I have nearby
# bases? Especially relevant for None (collect) actions
distance_move_other = np.abs(relative_other_pos).sum()
mask_between_move_and_threat = np.logical_and(
DISTANCES[(move_row, move_col)] < distance_move_other,
DISTANCES[(other_row, other_col)] < distance_move_other)
less_halite_ship_base_count = np.logical_and(
np.logical_and(my_bases_or_ships, mask_between_move_and_threat),
halite_ships <= halite_ships[other_row, other_col]).sum() + 0*(
my_bases[ROW_COL_MAX_DISTANCE_MASKS[
move_row, move_col, 2]].sum())
my_material_defense_multiplier = 2**less_halite_ship_base_count
for threat_dir in RELATIVE_NOT_NONE_DIRECTIONS:
nz_dim = int(threat_dir[0] == 0)
dir_offset = relative_other_pos[nz_dim]*threat_dir[nz_dim]
other_dir_abs_offset = np.abs(relative_other_pos[1-nz_dim])
# if observation['step'] == 155 and ship_k == '63-2':
# import pdb; pdb.set_trace()
if dir_offset >= 0 and (other_dir_abs_offset-1) <= dir_offset:
# Ignore the threat if the ship is on the diagonal and can not
# move in the direction of the threat dir
if (other_dir_abs_offset-1) == dir_offset and len(
other_sensible_actions) < len(MOVE_DIRECTIONS):
if nz_dim == 0:
threat_other_dir = (
0, 1 if relative_other_pos[1-nz_dim] < 0 else -1)
else:
threat_other_dir = (
1 if relative_other_pos[1-nz_dim] < 0 else -1, 0)
threat_other_dirs = [threat_other_dir, threat_dir]
threats_actionable = np.array([
t in other_sensible_actions for t in threat_other_dirs])
consider_this_threat = np.any(threats_actionable)
if threats_actionable[1] and not threats_actionable[0]:
# Lower the threat weight - the opponent can not directly
# attack the considered threat direction and can only move
# along the threat direction
other_dir_abs_offset += 2
else:
consider_this_threat = True
if other_dir_abs_offset == 0 and dir_offset == 0:
# The scenario where a one step threat is ignored due to
# being chased for a while and moving to the threat is
# currently considered.
# This avoids division by zero but is overridden later anyway
other_dir_abs_offset = 2
if consider_this_threat:
lt_catch_prob[threat_dir].append(max(2,
other_dir_abs_offset+dir_offset)*(
my_material_defense_multiplier))
# Add a "bootstrapped" catch probability using the density of the
# players towards the edge of the threat direction
# Only add it if the next halite is > 0 (otherwise assume I can
# always escape)
# Also factor in the distance to my nearest non abandoned base
if my_next_halite > 0:
current_nearest_base_distance = my_nearest_base_distances[row, col]
moved_nearest_base_distance = my_nearest_base_distances[
move_row, move_col]
move_distance_difference = current_nearest_base_distance - (
moved_nearest_base_distance)
for threat_dir in RELATIVE_NOT_NONE_DIRECTIONS:
dens_threat_rows = np.mod(move_row + threat_dir[0]*(
np.arange(config['n_step_avoid_window_size']//2,
config['n_step_avoid_window_size'])), grid_size)
dens_threat_cols = np.mod(move_col + threat_dir[1]*(
1+np.arange(config['n_step_avoid_window_size']//2,
config['n_step_avoid_window_size'])), grid_size)
escape_probs = escape_influence_probs[
dens_threat_rows, dens_threat_cols]
mean_escape_prob = escape_probs.mean()
if escape_probs[:2].min() < 1:
if move_distance_difference > 0:
# When in trouble, it is typically better to move towards one
# of my bases. The move closer distance is of course 1.
mean_escape_prob *= 1.25
if mean_escape_prob < 1:
lt_catch_prob[threat_dir].append(1/(1-mean_escape_prob+1e-9))
# if observation['step'] == 75 and ship_k == '64-1' and d in [
# EAST, WEST]:
# import pdb; pdb.set_trace()
# if observation['step'] == 112 and ship_k == '76-1':
# import pdb; pdb.set_trace()
if np.all([len(v) > 0 for v in lt_catch_prob.values()]):
# Interpretation: for a threat at distance d, I have a probability
# of surviving it of (d-1)/d. The probability of surviving all
# threat is the product of all individual threats
survive_probs = np.array([
(np.maximum(0.2, (np.array(lt_catch_prob[k])-1)/np.array(
lt_catch_prob[k]))).prod() for k in lt_catch_prob])
min_die_prob = 1-survive_probs.max()
if main_base_distances.max() > 0:
if main_base_distances[move_row, move_col] <= 2:
min_die_prob = 0
else:
min_die_prob = max(
0, min_die_prob-0.33**main_base_distances[
move_row, move_col])
# if observation['step'] == 155 and ship_k in ['63-2', '63-1']:
# import pdb; pdb.set_trace()
# Disincentivize an action that can get me boxed in during the next
# N steps
mask_avoid_n_steps = np.copy(HALF_PLANES_RUN[(row, col)][d])
if d is not None:
mask_avoid_n_steps[row, col] = False
collect_grid_scores[mask_avoid_n_steps] *= ((
config['n_step_avoid_boxed_opponent_multiplier_base']) ** (
min_die_prob))
return_to_base_scores[mask_avoid_n_steps] *= (
config['n_step_avoid_boxed_opponent_multiplier_base']) ** (
min_die_prob)
establish_base_scores[mask_avoid_n_steps] *= (
config['n_step_avoid_boxed_opponent_multiplier_base']) ** (
min_die_prob)
n_step_bad_directions_die_probs[d] = min_die_prob
# Correction to act with more risk towards the end of the game
die_prob_cutoff = (n_step_avoid_min_die_prob_cutoff + 0.01*max(
0, 50-steps_remaining))
if d is None:
if observation['relative_step'] > config[
'end_hunting_season_relative_step']:
die_prob_cutoff = max(die_prob_cutoff, config[
'after_hunting_season_collect_max_n_step_risk'])
elif observation['relative_step'] > config[
'late_hunting_season_more_collect_relative_step']:
die_prob_cutoff = max(die_prob_cutoff, config[
'late_hunting_season_collect_max_n_step_risk'])
# print(observation['step'], die_prob_cutoff)
if min_die_prob > die_prob_cutoff:
n_step_bad_directions.append(d)
# if observation['step'] == 215 and ship_k == '2-2':
# import pdb; pdb.set_trace()
# Corner case: if I have a zero halite ship that is boxed in by other zero
# halite ships on a zero halite square: compute the risk for all available
# actions and only retain the actions with the lowest collision risks
if halite_ships[row, col] == 0 and len(valid_directions) == 0 and (
obs_halite[row, col] == 0):
risk_scores = np.zeros(len(MOVE_DIRECTIONS))
for risk_id, d in enumerate(MOVE_DIRECTIONS):
move_row, move_col = move_ship_row_col(row, col, d, grid_size)
for potential_threat_dir in MOVE_DIRECTIONS:
threat_row, threat_col = move_ship_row_col(
move_row, move_col, potential_threat_dir, grid_size)
if opponent_ships[threat_row, threat_col] and halite_ships[
threat_row, threat_col] == 0:
opponent_id = player_ids[threat_row, threat_col]
is_near_base = nearest_base_distances[
threat_row, threat_col] <= config['log_near_base_distance']
distance = int(d is not None) + int(potential_threat_dir is not None)
risk_lookup_k = str(is_near_base) + '_' + str(distance)
risk_scores[risk_id] = max(
risk_scores[risk_id], history['zero_halite_move_behavior'][
opponent_id][risk_lookup_k])
best_risk_score = risk_scores.min()
if best_risk_score < 0.05:
valid_directions = [d for d_id, d in enumerate(
MOVE_DIRECTIONS) if risk_scores[d_id] == best_risk_score]
else:
valid_directions = [None]
one_step_valid_directions = copy.copy(valid_directions)
bad_directions = list(set(MOVE_DIRECTIONS) - set(valid_directions))
# if observation['step'] == 169 and ship_k == '65-2':
# import pdb; pdb.set_trace()
# Corner case: if I have a zero halite ship that is boxed in by other zero
# halite ships on a non-zero halite square: prefer moving in directions where
# there is a lower risk of losing the ship as a function of opponent zero
# halite behavior
if halite_ships[row, col] == 0 and obs_halite[row, col] > 0 and (
(len(valid_directions) == 1 and (valid_directions[0] is None)) or (
len(valid_directions) == 0)):
risk_scores = np.zeros(len(MOVE_DIRECTIONS))
risk_scores[0] = 1 # Definitely don't stand still
for risk_id, d in enumerate(MOVE_DIRECTIONS):
move_row, move_col = move_ship_row_col(row, col, d, grid_size)
for potential_threat_dir in MOVE_DIRECTIONS:
threat_row, threat_col = move_ship_row_col(
move_row, move_col, potential_threat_dir, grid_size)
if opponent_ships[threat_row, threat_col] and halite_ships[
threat_row, threat_col] == 0:
opponent_id = player_ids[threat_row, threat_col]
is_near_base = nearest_base_distances[
threat_row, threat_col] <= config['log_near_base_distance']
distance = int(d is not None) + int(potential_threat_dir is not None)
risk_lookup_k = str(is_near_base) + '_' + str(distance)
risk_scores[risk_id] = max(
risk_scores[risk_id], history['zero_halite_move_behavior'][
opponent_id][risk_lookup_k])
best_risk_score = risk_scores.min()
valid_directions = [d for d_id, d in enumerate(
MOVE_DIRECTIONS) if risk_scores[d_id] == best_risk_score]
one_step_valid_directions = copy.copy(valid_directions)
bad_directions = list(set(MOVE_DIRECTIONS) - set(valid_directions))
# Treat attack squares I should avoid with a zero halite ship as N-step bad
# directions, if that leaves us with options
if np.any(avoid_attack_squares_zero_halite) and halite_ships[
row, col] == 0 and steps_remaining > 1:
avoid_attack_directions = []
for d in valid_non_base_directions:
move_row, move_col = move_ship_row_col(row, col, d, grid_size)
if avoid_attack_squares_zero_halite[move_row, move_col]:
avoid_attack_directions.append(d)
if len(avoid_attack_directions):
all_bad_dirs = set(bad_directions + (
two_step_bad_directions + n_step_bad_directions))
updated_bad_dirs = all_bad_dirs.union(set(avoid_attack_directions))
if len(updated_bad_dirs) > len(all_bad_dirs) and len(
updated_bad_dirs) < len(MOVE_DIRECTIONS):
new_bad_directions = list(updated_bad_dirs.difference(all_bad_dirs))
# import pdb; pdb.set_trace()
n_step_bad_directions.extend(new_bad_directions)
for new_bad_dir in new_bad_directions:
if not new_bad_dir in n_step_bad_directions_die_probs:
n_step_bad_directions_die_probs[new_bad_dir] = 0
# Corner case: if I can replace a chaser position and there are only very
# bad two step escape directions left: replace the chaser
if take_my_next_square_dir is not None and (
take_my_next_square_dir in two_step_bad_directions):
make_chase_replace_n_bad = True
for d in NOT_NONE_DIRECTIONS:
if not d == take_my_next_square_dir:
if d in n_step_bad_directions:
if n_step_bad_directions_die_probs[d] < 0.6:
make_chase_replace_n_bad = False
break
elif d in valid_directions:
make_chase_replace_n_bad = False
break
if make_chase_replace_n_bad:
print("CHASE: turning two step bad into n step bad", observation['step'],
row, col)
two_step_bad_directions.remove(take_my_next_square_dir)
# Treat the chasing - replace chaser position as an n-step bad action.
# Otherwise, we can get trapped in a loop of dumb behavior.
if take_my_next_square_dir is not None and not take_my_next_square_dir in (
two_step_bad_directions) and not take_my_next_square_dir in (
n_step_bad_directions):
n_step_bad_directions.append(take_my_next_square_dir)
n_step_bad_directions_die_probs[take_my_next_square_dir] = 1/4
# If all valid non base directions are n step bad actions: drop n step bad
# actions (call them 2 step bad) that are significantly worse than other n
# step bad actions
all_original_n_step_bad_directions = copy.copy(n_step_bad_directions)
all_n_step_bad_directions_die_probs = copy.copy(
n_step_bad_directions_die_probs)
if len(n_step_bad_directions) > 1 and len(
n_step_bad_directions) == len(valid_non_base_directions) and np.all(
np.array([d in n_step_bad_directions for d in (
valid_non_base_directions)])):
die_probs = np.array(list(n_step_bad_directions_die_probs.values()))
max_die_prob = min(die_probs.min()*2, die_probs.min()+0.1)
delete_from_n_step_bad = []
for d in n_step_bad_directions:
if n_step_bad_directions_die_probs[d] > max_die_prob and (
not d in safe_return_base_directions):
delete_from_n_step_bad.append(d)
for d in delete_from_n_step_bad:
two_step_bad_directions.append(d)
n_step_bad_directions.remove(d)
del n_step_bad_directions_die_probs[d]
if valid_non_base_directions:
valid_not_preferred_dirs = list(set(
two_step_bad_directions + n_step_bad_directions))
if valid_not_preferred_dirs and (
len(valid_non_base_directions) - len(valid_not_preferred_dirs)) > 0:
# Drop 2 and n step bad directions if that leaves us with valid options
bad_directions.extend(valid_not_preferred_dirs)
bad_directions = list(set(bad_directions))
valid_directions = list(
set(valid_directions) - set(valid_not_preferred_dirs))
else:
# Drop 2 step bad directions if that leaves us with valid options
valid_not_preferred_dirs = set(two_step_bad_directions)
if valid_not_preferred_dirs and (
len(valid_non_base_directions) - len(valid_not_preferred_dirs)) > 0:
bad_directions.extend(valid_not_preferred_dirs)
valid_directions = list(
set(valid_directions) - set(valid_not_preferred_dirs))
# Only keep the strongly preferred directions if there are any
if len(strongly_preferred_directions) > 0:
preferred_directions = strongly_preferred_directions
# Drop repetitive actions if that leaves us with valid options
if ship_k in history['avoid_cycle_actions']:
repetitive_action = history['avoid_cycle_actions'][ship_k]
if repetitive_action in valid_directions and len(valid_directions) > 1:
valid_directions.remove(repetitive_action)
if repetitive_action in preferred_directions:
preferred_directions.remove(repetitive_action)
if repetitive_action in one_step_valid_directions:
one_step_valid_directions.remove(repetitive_action)
bad_directions.append(repetitive_action)
# if observation['step'] == 180 and ship_k == '10-2':
# import pdb; pdb.set_trace()
return (collect_grid_scores, return_to_base_scores, establish_base_scores,
attack_base_scores, preferred_directions, valid_directions,
len(bad_directions) == len(MOVE_DIRECTIONS), two_step_bad_directions,
n_step_bad_directions, one_step_valid_directions,
n_step_bad_directions_die_probs, all_original_n_step_bad_directions,
all_n_step_bad_directions_die_probs)
# Update the scores as a function of blocking opponent bases
def update_scores_blockers(
collect_grid_scores, return_to_base_scores, establish_base_scores,
attack_base_scores, row, col, grid_size, blockers,
blocker_max_distances_to_consider, valid_directions,
one_step_valid_directions, early_base_direct_dir=None,
blocker_max_distance=half_distance_mask_dim, update_attack_base=True):
one_step_bad_directions = []
for d in NOT_NONE_DIRECTIONS:
if d == NORTH:
rows = np.mod(row - (1 + np.arange(blocker_max_distance)), grid_size)
cols = np.repeat(col, blocker_max_distance)
considered_vals = blockers[rows, col]
considered_max_distances = blocker_max_distances_to_consider[rows, col]
elif d == SOUTH:
rows = np.mod(row + (1 + np.arange(blocker_max_distance)), grid_size)
cols = np.repeat(col, blocker_max_distance)
considered_vals = blockers[rows, col]
considered_max_distances = blocker_max_distances_to_consider[rows, col]
elif d == WEST:
rows = np.repeat(row, blocker_max_distance)
cols = np.mod(col - (1 + np.arange(blocker_max_distance)), grid_size)
considered_vals = blockers[row, cols]
considered_max_distances = blocker_max_distances_to_consider[row, cols]
elif d == EAST:
rows = np.repeat(row, blocker_max_distance)
cols = np.mod(col + (1 + np.arange(blocker_max_distance)), grid_size)
considered_vals = blockers[row, cols]
considered_max_distances = blocker_max_distances_to_consider[row, cols]
if d == early_base_direct_dir:
considered_vals[0] = 1
is_blocking = np.logical_and(considered_vals, np.arange(
blocker_max_distance) < considered_max_distances)
if np.any(is_blocking):
first_blocking_id = np.where(is_blocking)[0][0]
mask_rows = rows[first_blocking_id:]
mask_cols = cols[first_blocking_id:]
collect_grid_scores[mask_rows, mask_cols] = -1e12
return_to_base_scores[mask_rows, mask_cols] = -1e12
establish_base_scores[mask_rows, mask_cols] = -1e12
if update_attack_base:
attack_base_scores[mask_rows, mask_cols] = -1e12
if first_blocking_id == 0:
one_step_bad_directions.append(d)
if d in valid_directions:
valid_directions.remove(d)
if d in one_step_valid_directions:
one_step_valid_directions.remove(d)
# Lower the score for entire quadrants when the two quadrant directions are
# blocking the movement
num_bad_one_directions = len(one_step_bad_directions)
if num_bad_one_directions > 1:
for i in range(num_bad_one_directions-1):
bad_direction_1 = one_step_bad_directions[i]
for j in range(i+1, num_bad_one_directions):
bad_direction_2 = one_step_bad_directions[j]
if (bad_direction_1 in [NORTH, SOUTH]) != (
bad_direction_2 in [NORTH, SOUTH]):
bad_quadrant_mask = np.logical_and(
HALF_PLANES_CATCH[row, col][bad_direction_1],
HALF_PLANES_CATCH[row, col][bad_direction_2])
collect_grid_scores[bad_quadrant_mask] = -1e12
return_to_base_scores[bad_quadrant_mask] = -1e12
establish_base_scores[bad_quadrant_mask] = -1e12
if update_attack_base:
attack_base_scores[bad_quadrant_mask] = -1e12
# Additional logic for the use of avoiding collisions when there is only a
# single escape direction
if blockers[row, col]:
collect_grid_scores[row, col] = -1e12
return_to_base_scores[row, col] = -1e12
establish_base_scores[row, col] = -1e12
attack_base_scores[row, col] = -1e12
if None in valid_directions:
valid_directions.remove(None)
if None in one_step_valid_directions:
one_step_valid_directions.remove(None)
return (collect_grid_scores, return_to_base_scores, establish_base_scores,
attack_base_scores, valid_directions, one_step_valid_directions,
one_step_bad_directions)
def set_scores_single_nearby_zero(scores, nearby, size, ship_row, ship_col,
nearby_distance=1):
nearby_pos = np.where(nearby)
row = nearby_pos[0][0]
col = nearby_pos[1][0]
next_nearby_pos = None
drop_None_valid = False
for i in range(-nearby_distance, nearby_distance+1):
near_row = (row + i) % size
for j in range(-nearby_distance, nearby_distance+1):
near_col = (col + j) % size
if i != 0 or j != 0:
# Don't gather near the base and don't move on top of it
scores[near_row, near_col] = -1e7
if near_row == ship_row and near_col == ship_col:
next_nearby_pos = get_dir_from_target(
ship_row, ship_col, row, col, size)[0]
else:
if near_row == ship_row and near_col == ship_col:
# Don't stay on top of the base
drop_None_valid = True
return scores, next_nearby_pos, drop_None_valid
def grid_distance(r1, c1, r2, c2, size):
horiz_diff = c2-c1
horiz_distance = min(np.abs(horiz_diff),
min(np.abs(horiz_diff-size), np.abs(horiz_diff+size)))
vert_diff = r2-r1
vert_distance = min(np.abs(vert_diff),
min(np.abs(vert_diff-size), np.abs(vert_diff+size)))
return horiz_distance+vert_distance
def override_early_return_base_scores(
base_return_grid_multiplier, my_bases, ship_row, ship_col, my_ship_count):
base_pos = np.where(my_bases)
base_row = base_pos[0][0]
base_col = base_pos[1][0]
dist_to_base = DISTANCES[base_row, base_col][ship_row, ship_col]
# Remember the rule that blocks spawning when a ship is about to return
if dist_to_base <= 10-my_ship_count:
base_return_grid_multiplier[base_row, base_col] = 0
return base_return_grid_multiplier
def get_nearest_base_distances(grid_size, ignore_abandoned, observation):
base_dms = []
base_distances = []
# for b in player_obs[1]:
# row, col = row_col_from_square_grid_pos(player_obs[1][b], grid_size)
# if not (row, col) in ignore_abandoned:
# base_dms.append(DISTANCE_MASKS[(row, col)])
# base_distances.append(DISTANCES[(row, col)])
my_bases = np.copy(observation['rewards_bases_ships'][0][1])
for r, c in ignore_abandoned:
my_bases[r, c] = 0
num_my_bases = my_bases.sum()
if num_my_bases > 0:
my_base_positions = np.where(my_bases)
for base_id in range(num_my_bases):
base_row = my_base_positions[0][base_id]
base_col = my_base_positions[1][base_id]
base_dms.append(DISTANCE_MASKS[(base_row, base_col)])
base_distances.append(DISTANCES[(base_row, base_col)])
if base_dms:
base_nearest_distance_scores = np.stack(base_dms).max(0)
all_base_distances = np.stack(base_distances)
else:
base_nearest_distance_scores = np.ones((grid_size, grid_size))
all_base_distances = 99*np.ones((1, grid_size, grid_size))
nearest_base_distances = np.min(all_base_distances, 0)
return (base_nearest_distance_scores, all_base_distances,
nearest_base_distances)
def get_valid_opponent_ship_actions(
config, rewards_bases_ships, halite_ships, size, history,
nearest_base_distances, observation, env_config):
opponent_ships_sensible_actions = {}
opponent_ships_sensible_actions_no_risk = {}
boxed_in_zero_halite_opponents = []
likely_convert_opponent_positions = []
possible_convert_opponent_positions = []
num_agents = len(rewards_bases_ships)
convert_cost = env_config.convertCost
stacked_bases = np.stack([rbs[1] for rbs in rewards_bases_ships])
stacked_ships = np.stack([rbs[2] for rbs in rewards_bases_ships])
num_players = stacked_ships.shape[0]
grid_size = stacked_ships.shape[1]
player_base_ids = -1*np.ones((grid_size, grid_size))
boxed_in_attack_squares = np.zeros((grid_size, grid_size), dtype=np.bool)
boxed_in_opponent_ids = -1*np.ones((grid_size, grid_size), dtype=np.int)
opponent_single_escape_pos = np.zeros(
(grid_size, grid_size), dtype=np.bool)
single_escape_mapping = {}
for i in range(num_players):
player_base_ids[stacked_bases[i]] = i
for i in range(1, num_agents):
opponent_ships = stacked_ships[i]
enemy_ships = np.delete(stacked_ships, (i), axis=0).sum(0)
ship_pos = np.where(opponent_ships)
num_ships = ship_pos[0].size
for j in range(num_ships):
valid_rel_directions = copy.copy(RELATIVE_DIRECTIONS)
valid_rel_directions_no_move_risk = copy.copy(RELATIVE_DIRECTIONS)
row = ship_pos[0][j]
col = ship_pos[1][j]
ship_halite = halite_ships[row, col]
for row_diff in range(-2, 3):
for col_diff in range(-2, 3):
distance = (np.abs(row_diff) + np.abs(col_diff))
if distance == 1 or distance == 2:
other_row = (row + row_diff) % size
other_col = (col + col_diff) % size
if enemy_ships[other_row, other_col]:
hal_diff = halite_ships[other_row, other_col] - ship_halite
# if observation['step'] == 189 and row == 14 and col == 2:
# import pdb; pdb.set_trace()
ignores_move_collision = False
risky_stay_still_collision = False
if halite_ships[row, col] == halite_ships[
other_row, other_col]:
# Note: use the opponent distance because the opponent model is
# learned using the opponent distance to the nearest base (with
# near base distance cutoff typically at 2)
is_near_base = nearest_base_distances[
other_row, other_col] <= config['log_near_base_distance']
risk_lookup_k = str(is_near_base) + '_' + str(distance) + (
'_ever_risky')
if distance == 2:
ignores_move_collision = history[
'zero_halite_move_behavior'][i][risk_lookup_k]
else:
risk_lookup_k_dist_zero = str(is_near_base) + '_' + str(
0) + '_ever_risky'
risky_stay_still_collision = history[
'zero_halite_move_behavior'][i][risk_lookup_k]
ignores_move_collision = history[
'zero_halite_move_behavior'][i][risk_lookup_k_dist_zero]
# if ignores_move_collision and distance == 1:
# import pdb; pdb.set_trace()
# x=1
rem_dirs = []
if risky_stay_still_collision:
rem_dirs += [(0, 0)] if distance == 1 and hal_diff <= 0 else []
else:
rem_dirs += [(0, 0)] if distance == 1 and hal_diff < 0 else []
if not ignores_move_collision:
rem_dirs += [(-1, 0)] if row_diff < 0 and hal_diff <= 0 else []
rem_dirs += [(1, 0)] if row_diff > 0 and hal_diff <= 0 else []
rem_dirs += [(0, -1)] if col_diff < 0 and hal_diff <= 0 else []
rem_dirs += [(0, 1)] if col_diff > 0 and hal_diff <= 0 else []
for d in rem_dirs:
if d in valid_rel_directions:
valid_rel_directions.remove(d)
# if observation['step'] == 146 and row == 13 and col == 13:
# import pdb; pdb.set_trace()
# Don't check for risky opponent zero halite behavior
rem_dirs = []
if risky_stay_still_collision:
rem_dirs += [(0, 0)] if distance == 1 and hal_diff <= 0 else []
else:
rem_dirs += [(0, 0)] if distance == 1 and hal_diff < 0 else []
rem_dirs += [(-1, 0)] if row_diff < 0 and hal_diff <= 0 else []
rem_dirs += [(1, 0)] if row_diff > 0 and hal_diff <= 0 else []
rem_dirs += [(0, -1)] if col_diff < 0 and hal_diff <= 0 else []
rem_dirs += [(0, 1)] if col_diff > 0 and hal_diff <= 0 else []
for d in rem_dirs:
if d in valid_rel_directions_no_move_risk:
valid_rel_directions_no_move_risk.remove(d)
# Prune for opponent base positions
rem_dirs = []
for rel_dir in valid_rel_directions:
d = RELATIVE_DIR_TO_DIRECTION_MAPPING[rel_dir]
move_row, move_col = move_ship_row_col(row, col, d, grid_size)
move_base_id = player_base_ids[move_row, move_col]
if move_base_id >= 0 and move_base_id != i:
rem_dirs.append(rel_dir)
for d in rem_dirs:
valid_rel_directions.remove(d)
# if observation['step'] == 146 and row == 14 and col == 13:
# import pdb; pdb.set_trace()
rem_dirs = []
for rel_dir in valid_rel_directions_no_move_risk:
d = RELATIVE_DIR_TO_DIRECTION_MAPPING[rel_dir]
move_row, move_col = move_ship_row_col(row, col, d, grid_size)
move_base_id = player_base_ids[move_row, move_col]
if move_base_id >= 0 and move_base_id != i:
rem_dirs.append(rel_dir)
for d in rem_dirs:
valid_rel_directions_no_move_risk.remove(d)
if len(valid_rel_directions) == 0:
player_halite_budget = observation['rewards_bases_ships'][i][0]
if ((ship_halite + player_halite_budget) >= convert_cost):
if ship_halite >= history['inferred_boxed_in_conv_threshold'][i][1]:
likely_convert_opponent_positions.append((row, col))
if ship_halite >= history['inferred_boxed_in_conv_threshold'][i][0]:
possible_convert_opponent_positions.append((row, col))
if ship_halite > 0:
for d in MOVE_DIRECTIONS:
move_row, move_col = move_ship_row_col(row, col, d, grid_size)
boxed_in_attack_squares[move_row, move_col] = True
boxed_in_opponent_ids[move_row, move_col] = i
if ship_halite == 0 and len(valid_rel_directions_no_move_risk) == 1 and (
valid_rel_directions_no_move_risk[0] == (0, 0)):
boxed_in_zero_halite_opponents.append((row, col))
if len(valid_rel_directions_no_move_risk) == 1:
escape_dir = RELATIVE_DIR_TO_DIRECTION_MAPPING[
valid_rel_directions_no_move_risk[0]]
escape_square = move_ship_row_col(row, col, escape_dir, grid_size)
opponent_single_escape_pos[escape_square] = 1
single_escape_mapping[(row, col)] = escape_square
opponent_ships_sensible_actions[(row, col)] = valid_rel_directions
opponent_ships_sensible_actions_no_risk[(row, col)] = (
valid_rel_directions_no_move_risk)
# if observation['step'] == 146:
# import pdb; pdb.set_trace()
# Do another pass over the zero halite ships to figure if they are boxed in
# by the escape squares of their own non zero halite ships - these ships
# will very likely take risky actions and should therefore be avoided
if np.any(opponent_single_escape_pos):
for j in range(num_ships):
row = ship_pos[0][j]
col = ship_pos[1][j]
ship_halite = halite_ships[row, col]
if ship_halite == 0:
valid_rel_directions = opponent_ships_sensible_actions[(row, col)]
valid_rel_directions_no_move_risk = (
opponent_ships_sensible_actions_no_risk[row, col])
# if observation['step'] == 146 and row == 15 and col == 12:
# import pdb; pdb.set_trace()
# if observation['step'] == 146 and row == 14 and col == 13:
# import pdb; pdb.set_trace()
for d in NOT_NONE_DIRECTIONS:
move_row, move_col = move_ship_row_col(row, col, d, grid_size)
if opponent_single_escape_pos[move_row, move_col]:
my_escape_square = False
if (row, col) in single_escape_mapping:
my_escape_square = (move_row, move_col) == (
single_escape_mapping[row, col])
if my_escape_square:
# Still treat it as a bad direction if there is another ship
# that has my escape square as it's only escape square
num_escape_count = np.array(
[v == (move_row, move_col) for v in (
single_escape_mapping.values())]).sum()
my_escape_square = num_escape_count == 1
if not my_escape_square:
avoid_rel_direction = RELATIVE_DIR_MAPPING[d]
if avoid_rel_direction in valid_rel_directions:
valid_rel_directions.remove(avoid_rel_direction)
if avoid_rel_direction in valid_rel_directions_no_move_risk:
valid_rel_directions_no_move_risk.remove(avoid_rel_direction)
if (len(valid_rel_directions_no_move_risk) == 0 or (
len(valid_rel_directions_no_move_risk) == 1 and (
valid_rel_directions_no_move_risk[0] == (0, 0)))) and (
not (row, col) in boxed_in_zero_halite_opponents):
# print("AVOIDING chained zero halite collision",
# observation['step'], row, col)
boxed_in_zero_halite_opponents.append((row, col))
opponent_ships_sensible_actions[(row, col)] = valid_rel_directions
opponent_ships_sensible_actions_no_risk[(row, col)] = (
valid_rel_directions_no_move_risk)
return (opponent_ships_sensible_actions,
opponent_ships_sensible_actions_no_risk, boxed_in_attack_squares,
boxed_in_opponent_ids, boxed_in_zero_halite_opponents,
likely_convert_opponent_positions,
possible_convert_opponent_positions)
def scale_attack_scores_bases_ships(
config, observation, player_obs, spawn_cost, non_abandoned_base_distances,
weighted_base_mask, steps_remaining, obs_halite, halite_ships, history,
smoothed_halite, player_influence_maps,
nearest_base_distances_with_my_excluded, player_ids,
laplace_smoother_rel_ship_count=4, initial_normalize_ship_diff=10,
final_normalize_ship_diff=3):
stacked_bases = np.stack([rbs[1] for rbs in observation[
'rewards_bases_ships']])
my_bases = stacked_bases[0]
# Exclude bases that are persistently camped by opponents
for base_pos in history['my_base_not_attacked_positions']:
my_bases[base_pos] = 0
stacked_opponent_bases = stacked_bases[1:]
stacked_ships = np.stack([rbs[2] for rbs in observation[
'rewards_bases_ships']])
stacked_opponent_ships = stacked_ships[1:]
base_counts = stacked_opponent_bases.sum((1, 2))
my_ship_count = len(player_obs[2])
ship_counts = stacked_opponent_ships.sum((1, 2))
grid_size = stacked_opponent_bases.shape[1]
approximate_scores = history['current_scores']
num_players = stacked_bases.shape[0]
player_ranks = np.zeros(num_players)
for i in range(num_players):
player_ranks[i] = (approximate_scores >= approximate_scores[i]).sum()
# print(approximate_scores)
# Factor 1: an opponent with less bases is more attractive to attack
base_count_multiplier = np.where(base_counts == 0, 0, 1/(base_counts+1e-9))
# Factor 2: an opponent that is closer in score is more attractive to attack
spawn_diffs = (approximate_scores[0] - approximate_scores[1:])/spawn_cost
abs_spawn_diffs = np.abs(spawn_diffs)
currently_winning = approximate_scores[0] >= approximate_scores[1:]
approximate_score_diff = approximate_scores[0] - approximate_scores[1:]
normalize_diff = initial_normalize_ship_diff - observation['relative_step']*(
initial_normalize_ship_diff-final_normalize_ship_diff)
abs_rel_normalized_diff = np.maximum(
0, (normalize_diff-abs_spawn_diffs)/normalize_diff)
rel_score_max_y = initial_normalize_ship_diff/normalize_diff
rel_score_multiplier = abs_rel_normalized_diff*rel_score_max_y
# Factor 3: an opponent with less ships is more attractive to attack since it
# is harder for them to defend the base
rel_ship_count_multiplier = (my_ship_count+laplace_smoother_rel_ship_count)/(
ship_counts+laplace_smoother_rel_ship_count)
# Additional term: attack bases nearby my main base
opponent_bases = stacked_opponent_bases.sum(0).astype(np.bool)
if opponent_bases.sum() > 0 and non_abandoned_base_distances.max() > 0:
additive_nearby_main_base = 3/max(0.15, observation['relative_step'])/(
1.5**non_abandoned_base_distances)/(
weighted_base_mask[my_bases].sum())
additive_nearby_main_base[~opponent_bases] = 0
else:
additive_nearby_main_base = 0
attack_multipliers = base_count_multiplier*rel_score_multiplier*(
rel_ship_count_multiplier)
tiled_multipliers = np.tile(attack_multipliers.reshape((-1, 1, 1)),
[1, grid_size, grid_size])
# if observation['step'] == 391:
# import pdb; pdb.set_trace()
opponent_bases_scaled = (stacked_opponent_bases*tiled_multipliers).sum(0) + (
additive_nearby_main_base)
# Compute the priority of attacking the ships of opponents
opponent_ships_scaled = np.maximum(0, 1 - np.abs(
approximate_scores[0]-approximate_scores[1:])/steps_remaining/10)
# print(observation['step'], opponent_ships_scaled, approximate_scores)
# if observation['step'] == 300:
# import pdb; pdb.set_trace()
# If I am winning by a considerable margin before the game is over, and the
# number three is far behind the number two: go ballistic on the number two
# Prefer opponent bases that are close to my bases and halite, and where the
# opponent has a relatively low density
# Make sure to guarantee some continuity with a start and stop mode
# Ballistic scenarios:
# - I am well ahead of all opponents: target the initial best agent
# - I am winning with a solid margin and the number three is far behind
# the number two: target the number two
# - I am in a close fight with the number two/one and the number three is
# very far behind: target the number two
winning_massively = np.all(spawn_diffs >= (
18-9*observation['relative_step']))
if not winning_massively:
history['ballistic_early_best_target_mode'] = False
winning_very_clearly = np.all(spawn_diffs >= (
14-7*observation['relative_step']))
winning_clearly = np.all(spawn_diffs >= (8-4*observation['relative_step']))
winning_considerably = np.all(spawn_diffs >= (
6-4*observation['relative_step'] + int(history[
'ballistic_early_best_target_mode'])))
winning_massively_near_end_game = winning_massively and observation[
'relative_step'] > 0.75
winning_massively_before_end_game = winning_massively and not (
winning_massively_near_end_game)
first_opp_id = np.argsort(spawn_diffs)[0]
second_opp_id = np.argsort(spawn_diffs)[1]
second_third_spawn_diff = spawn_diffs[second_opp_id] - spawn_diffs[
first_opp_id]
very_tight_fight_for_first = np.abs(spawn_diffs[first_opp_id]) < 1 and (
spawn_diffs[second_opp_id] >= (12-8*observation['relative_step']))
tight_fight_for_first = np.abs(spawn_diffs[first_opp_id]) < 3 and (
spawn_diffs[second_opp_id] >= (8-6*observation['relative_step']))
prev_ballistic_mode = history['ballistic_mode']
should_start_ballistic = (not winning_massively_before_end_game) and (
winning_clearly and second_third_spawn_diff > (
7-2*observation['relative_step']) or very_tight_fight_for_first or (
winning_massively_near_end_game)) and (
my_ship_count >= 15-max(0, 40*(observation['relative_step']-0.8)))
should_continue_ballistic = not (
winning_massively_before_end_game) and (winning_very_clearly or (
winning_clearly and (second_third_spawn_diff > 1)) or (
winning_considerably and (
second_third_spawn_diff > (2-observation['relative_step']))) or (
tight_fight_for_first)
) and (my_ship_count >= 10-max(0, 20*(observation['relative_step']-0.8)))
ballistic_mode = should_start_ballistic or (
prev_ballistic_mode and should_continue_ballistic)
# Select the next target in line if the opponent has no bases and no ships
if history['ballistic_early_best_targets_sorted'] is not None:
for opponent_id in history['ballistic_early_best_targets_sorted']:
ballistic_early_best_target_mode_target = opponent_id
num_opponent_bases = stacked_bases[opponent_id+1].sum()
num_opponent_ships = stacked_ships[opponent_id+1].sum()
if num_opponent_bases > 0 or num_opponent_ships > 0:
break
else:
ballistic_early_best_target_mode_target = first_opp_id
# print(observation['step'], ballistic_early_best_target_mode_target)
# if observation['step'] == 146:
# import pdb; pdb.set_trace()
# Ballistic early best target mode override of the opponent id: prefer to
# attack opponents that have a base which is close to one of my non
# abandoned bases
opponent_base_positions = np.where(stacked_opponent_bases.sum(0) > 0)
opponent_near_my_base_distances = nearest_base_distances_with_my_excluded[
opponent_base_positions]
targeted_base_override = None
if np.any(opponent_base_positions) and winning_very_clearly and (
opponent_near_my_base_distances.min() < 6):
prev_ballistic_target_override = history['prev_ballistic_target_override']
if history['prev_ballistic_target_override'] is not None and (
opponent_bases[prev_ballistic_target_override]):
targeted_base_override = prev_ballistic_target_override
else:
# Sort annoying bases by score: prefer to attack opponent bases that
# belong to the best opponent
smoothed_halite = smooth2d(observation['halite'])
opponent_near_my_base_scores = opponent_near_my_base_distances + 0.6*(
player_ranks[player_ids[opponent_base_positions]-1]) - 1e-9*(
smoothed_halite[opponent_base_positions])
target_base_id = np.argmin(opponent_near_my_base_scores)
targeted_base_override = (
opponent_base_positions[0][target_base_id],
opponent_base_positions[1][target_base_id])
history['prev_ballistic_target_override'] = targeted_base_override
if ballistic_mode and not prev_ballistic_mode and (
winning_massively_near_end_game):
# Switch to early best target mode - override of the target id
print(observation['step'], "Start attack on early best target",
ballistic_early_best_target_mode_target+1)
ballistic_early_best_target_mode = True
ballistic_target_id = ballistic_early_best_target_mode_target
elif ballistic_mode:
ballistic_early_best_target_mode = history[
'ballistic_early_best_target_mode'] and winning_very_clearly
if ballistic_early_best_target_mode or winning_massively_near_end_game:
# Early best target mode
ballistic_target_id = ballistic_early_best_target_mode_target
else:
# Standard ballistic mode
ballistic_target_id = first_opp_id
# print(observation['step'], "Winning massively near end?",
# winning_massively_near_end_game, ballistic_target_id)
else:
ballistic_early_best_target_mode = False
# Consider going ballistic on the nearest contender for the second place
# when the first place no longer seems possible
first_out_of_reach = spawn_diffs.min() <= (
-40+36*observation['relative_step']) # This should be conservative
if first_out_of_reach and np.abs(spawn_diffs[first_opp_id]) > np.abs(
spawn_diffs[second_opp_id]):
ballistic_target_id = second_opp_id
third_opp_id = np.argsort(spawn_diffs)[2]
spawn_diffs_not_best = np.array([spawn_diffs[i] for i in range(3) if (
not i == first_opp_id)])
winning_clearly_second = np.all(
spawn_diffs_not_best >= (8-4*observation['relative_step']))
winning_considerably_second = np.all(spawn_diffs_not_best >= (
6-4*observation['relative_step']))
third_fourth_spawn_diff = spawn_diffs[third_opp_id] - (
spawn_diffs[second_opp_id])
very_tight_fight_for_second = (
np.abs(spawn_diffs[second_opp_id]) < np.abs(
spawn_diffs[third_opp_id])/2) and (
spawn_diffs[third_opp_id] >= (12-8*observation['relative_step']))
tight_fight_for_second = (
np.abs(spawn_diffs[second_opp_id]) < np.abs(
spawn_diffs[third_opp_id])) and (
spawn_diffs[third_opp_id] >= (10-7*observation['relative_step']))
should_start_ballistic = (
winning_clearly_second and third_fourth_spawn_diff > (
4-2*observation['relative_step']) or (
very_tight_fight_for_second)) and (
my_ship_count >= 15-max(0, 40*(observation['relative_step']-0.8)))
should_continue_ballistic = ((
winning_clearly_second and (third_fourth_spawn_diff > 1)) or (
winning_considerably_second and (
third_fourth_spawn_diff > (2-observation['relative_step']))) or (
tight_fight_for_second)
) and (my_ship_count >= 10-max(
0, 20*(observation['relative_step']-0.8)))
ballistic_mode = should_start_ballistic or (
prev_ballistic_mode and should_continue_ballistic)
# if observation['step'] == 363:
# import pdb; pdb.set_trace()
# if ballistic_mode:
# print("SECOND BALLISTIC MODE", observation['step'],
# ballistic_target_id)
if not ballistic_mode:
ballistic_target_id = 0 # This could be 1 or 2 as well
history['ballistic_early_best_target_mode'] = (
ballistic_early_best_target_mode)
if not ballistic_mode and targeted_base_override is not None:
print("Go ballistic on nearby base", observation['step'],
targeted_base_override)
ballistic_mode = True
ballistic_target_id = np.argmax(base_counts)
num_target_bases = base_counts[ballistic_target_id]
ballistic_attack_base_targets = []
if ballistic_mode and num_target_bases > 0:
target_bases = stacked_opponent_bases[ballistic_target_id]
target_base_locations = np.where(target_bases)
attack_target_base_scores = np.zeros(num_target_bases)
my_base_density = smooth2d(my_bases, 10)
for base_id in range(num_target_bases):
base_row = target_base_locations[0][base_id]
base_col = target_base_locations[1][base_id]
attack_target_base_scores[base_id] = 5e-4*smoothed_halite[
base_row, base_col] + player_influence_maps[0, base_row, base_col] - (
player_influence_maps[ballistic_target_id+1, base_row, base_col]) + (
100*int((base_row, base_col) in history['prev_step'][
'ballistic_attack_base_targets'])) + 10*my_base_density[
base_row, base_col]
# import pdb; pdb.set_trace()
ordered_base_ids = np.argsort(-attack_target_base_scores)
num_attacked_bases = 1 # 1 is plenty of aggression for the world
for attack_id in range(num_attacked_bases):
if attack_id == 0 and targeted_base_override is not None:
# print("Targeted base override", observation['step'],
# targeted_base_override)
base_row, base_col = targeted_base_override
else:
base_id = ordered_base_ids[attack_id]
base_row = target_base_locations[0][base_id]
base_col = target_base_locations[1][base_id]
opponent_bases_scaled[base_row, base_col] = 1e4
ballistic_attack_base_targets.append((base_row, base_col))
del_keys = []
for k in history['camping_ships_strategy']:
if history['camping_ships_strategy'][k][5] in (
ballistic_attack_base_targets):
del_keys.append(k)
for k in del_keys:
del history['camping_ships_strategy'][k]
# print(observation['step'], ballistic_mode, ballistic_attack_base_targets)
history['ballistic_mode'] = ballistic_mode
return (opponent_bases_scaled, opponent_ships_scaled,
abs_rel_normalized_diff, currently_winning, approximate_score_diff,
history, ballistic_attack_base_targets)
def get_influence_map(config, stacked_bases, stacked_ships, halite_ships,
observation, player_obs, smooth_kernel_dim=7):
# FUTURE WORK: incorporate the number of ships in computing the base weights
# Reasoning: a base without ships is not really a threat
all_ships = stacked_ships.sum(0).astype(np.bool)
my_ships = stacked_ships[0].astype(np.bool)
if my_ships.sum() == 0:
return None, None, None, None, None, None
num_players = stacked_ships.shape[0]
grid_size = my_ships.shape[0]
ship_range = 1-config['influence_map_min_ship_weight']
all_ships_halite = halite_ships[all_ships]
unique_vals, unique_counts = np.unique(
all_ships_halite, return_counts=True)
assert np.all(np.diff(unique_vals) > 0)
unique_halite_vals = np.sort(unique_vals).astype(np.int).tolist()
num_ships = all_ships_halite.size
halite_ranks = [np.array(
[unique_halite_vals.index(hs) for hs in halite_ships[
stacked_ships[i]]]) for i in range(num_players)]
less_rank_cum_counts = np.cumsum(unique_counts)
num_unique = unique_counts.size
halite_rank_counts = [np.array(
[less_rank_cum_counts[r-1] if r > 0 else 0 for r in (
halite_ranks[i])]) for i in range(num_players)]
ship_weights = [1 - r/(num_ships-1+1e-9)*ship_range for r in (
halite_rank_counts)]
raw_influence_maps = np.zeros((num_players, grid_size, grid_size))
raw_influence_maps_unweighted = np.zeros((num_players, grid_size, grid_size))
influence_maps = np.zeros((num_players, grid_size, grid_size))
influence_maps_unweighted = np.zeros((num_players, grid_size, grid_size))
for i in range(num_players):
raw_influence_maps[i][stacked_ships[i]] += ship_weights[i]
raw_influence_maps[i][stacked_bases[i]] += config[
'influence_map_base_weight']
raw_influence_maps_unweighted[i][stacked_ships[i]] += 1
raw_influence_maps_unweighted[i][stacked_bases[i]] += 1
influence_maps[i] = smooth2d(raw_influence_maps[i],
smooth_kernel_dim=smooth_kernel_dim)
influence_maps_unweighted[i] = smooth2d(
raw_influence_maps_unweighted[i], smooth_kernel_dim=smooth_kernel_dim)
my_influence = influence_maps[0]
max_other_influence = influence_maps[1:].max(0)
influence_map = my_influence - max_other_influence
influence_map_unweighted = influence_maps_unweighted[0] - (
influence_maps_unweighted[1:].sum(0))
# Define the escape influence map
rem_other_influence = influence_maps[1:].sum(0) - max_other_influence
escape_influence_map = 3*my_influence-(
2*max_other_influence+rem_other_influence)
escape_influence_probs = np.exp(np.minimum(0, escape_influence_map)/config[
'escape_influence_prob_divisor'])
# Derive the priority scores based on the influence map
priority_scores = 1/(1+np.abs(influence_map))
# Extract a dict of my ship weights
ship_priority_weights = {}
for ship_k in player_obs[2]:
row, col = row_col_from_square_grid_pos(
player_obs[2][ship_k][0], grid_size)
ship_halite = halite_ships[row, col]
halite_rank = unique_halite_vals.index(ship_halite)
ship_priority_weights[ship_k] = 1 - halite_rank/(
num_unique-1+1e-9)*ship_range
return (influence_map, influence_map_unweighted, influence_maps,
priority_scores, ship_priority_weights, escape_influence_probs)
# Compute the weighted base mask - the base with value one represents the
# main base and the values are used as a multiplier in the return to base
# scores.
def get_weighted_base_mask(stacked_bases, stacked_ships, observation,
history, consistent_main_base_bonus=3):
my_bases = stacked_bases[0]
# Exclude bases that are persistently camped by opponents
for base_pos in history['my_base_not_attacked_positions']:
my_bases[base_pos] = 0
num_bases = my_bases.sum()
my_base_locations = np.where(my_bases)
grid_size = stacked_bases.shape[1]
ship_diff_smoothed = smooth2d(stacked_ships[0] - stacked_ships[1:].sum(0))
if num_bases == 0:
base_mask = np.ones((grid_size, grid_size))
main_base_distances = -1*np.ones((grid_size, grid_size))
non_abandoned_base_distances = -1*np.ones((grid_size, grid_size))
elif num_bases >= 1:
all_non_abandoned_base_distances = []
for base_id in range(num_bases):
base_row = my_base_locations[0][base_id]
base_col = my_base_locations[1][base_id]
all_non_abandoned_base_distances.append(DISTANCES[
base_row, base_col])
non_abandoned_base_distances = np.stack(
all_non_abandoned_base_distances).min(0)
# Add a bonus to identify the main base id, but don't include the bonus
# in the base scaling
ship_diff_smoothed_with_bonus = np.copy(ship_diff_smoothed)
prev_main_base_location = history['prev_step']['my_main_base_location']
# print(observation['step'], prev_main_base_location, num_bases)
if prev_main_base_location[0] >= 0:
ship_diff_smoothed_with_bonus[prev_main_base_location] += (
consistent_main_base_bonus)
base_densities = ship_diff_smoothed[my_base_locations]
base_densities_with_bonus = ship_diff_smoothed_with_bonus[
my_base_locations]
highest_base_density_with_bonus = base_densities_with_bonus.max()
best_ids = np.where(
base_densities_with_bonus == highest_base_density_with_bonus)[0]
highest_base_density = base_densities[best_ids[0]]
# Subtract some small value of the non max densities to break rare ties
main_base_row = my_base_locations[0][best_ids[0]]
main_base_col = my_base_locations[1][best_ids[0]]
main_base_distances = DISTANCES[main_base_row, main_base_col]
all_densities = np.minimum(ship_diff_smoothed, highest_base_density-1e-5)
all_densities[main_base_row, main_base_col] += 1e-5
# Linearly compute the weighted base mask: 1 is my best base and 0 is the
# lowest ship_diff_smoothed value
all_densities -= all_densities.min()
base_mask = all_densities/all_densities.max()
return (base_mask, main_base_distances, non_abandoned_base_distances,
ship_diff_smoothed)
# Force returning to a base when the episode is almost over
def force_return_base_end_episode(
my_bases, base_return_grid_multiplier, main_base_distances, row, col,
steps_remaining, opponent_less_halite_ships, weighted_base_mask,
safe_to_collect):
num_bases = my_bases.sum()
base_positions = np.where(my_bases)
# List the bases I *can* return to
can_return_scores = np.zeros(num_bases)
for i in range(num_bases):
base_row = base_positions[0][i]
base_col = base_positions[1][i]
base_distance = DISTANCES[row, col][base_row, base_col]
threat_mask = np.logical_and(
DISTANCES[(row, col)] <= base_distance,
DISTANCES[(base_row, base_col)] <= base_distance)
if base_distance > 1:
threat_mask[row, col] = 0
threat_mask[base_row, base_col] = 0
threat_ships_mask = opponent_less_halite_ships[threat_mask]
can_return_scores[i] = (base_distance <= steps_remaining)*(10+
max(int(safe_to_collect[row, col]),
weighted_base_mask[base_row, base_col]) - 5*(
threat_ships_mask.mean()) - base_distance/30)
# if observation['step'] == 384 and row == 8 and col == 11:
# import pdb; pdb.set_trace()
# Force an emergency return if the best return scores demand an urgent
# return in order to bring the halite home before the episode is over
end_game_base_return = False
if num_bases > 0:
best_return_id = np.argmax(can_return_scores)
best_base_row = base_positions[0][best_return_id]
best_base_col = base_positions[1][best_return_id]
best_base_distance = DISTANCES[row, col][best_base_row, best_base_col]
end_game_base_return = best_base_distance in [
steps_remaining-1, steps_remaining]
if end_game_base_return:
base_return_grid_multiplier[best_base_row, best_base_col] += 1e15
return base_return_grid_multiplier, end_game_base_return
def edge_aware_square_subset_mask(data, row, col, window, box, grid_size):
# Figure out how many rows to roll the data and box to end up with a
# contiguous subset
min_row = row - window
max_row = row + window
if min_row < 0:
data = np.roll(data, -min_row, axis=0)
box = np.roll(box, -min_row, axis=0)
elif max_row >= grid_size:
data = np.roll(data, grid_size-max_row-1, axis=0)
box = np.roll(box, grid_size-max_row-1, axis=0)
# Figure out how many columns to roll the data and box to end up with a
# contiguous subset
min_col = col - window
max_col = col + window
if min_col < 0:
data = np.roll(data, -min_col, axis=1)
box = np.roll(box, -min_col, axis=1)
elif max_col >= grid_size:
data = np.roll(data, grid_size-max_col-1, axis=1)
box = np.roll(box, grid_size-max_col-1, axis=1)
return data[box]
def update_scores_opponent_boxing_in(
all_ship_scores, stacked_ships, observation, env_config,
opponent_ships_sensible_actions, halite_ships, steps_remaining, player_obs,
np_rng, opponent_ships_scaled, collect_rate, obs_halite,
main_base_distances, history, on_rescue_mission,
my_defend_base_ship_positions, env_observation, player_influence_maps,
override_move_squares_taken, ignore_convert_positions,
convert_unavailable_positions, always_attack_opponent_id,
num_non_abandoned_bases, likely_convert_opponent_positions,
possible_convert_opponent_positions, my_current_base_distances,
box_in_window=3, min_attackers_to_box=4):
# Loop over the opponent ships and derive if I can box them in
# For now this is just greedy. We should probably consider decoupling finding
# targets from actually boxing in.
# FUTURE WORK: proper handling of opponent bases
opponent_positions = np.where(stacked_ships[1:].sum(0) > 0)
opponent_bases = np.stack([rbs[1] for rbs in observation[
'rewards_bases_ships']])[1:].sum(0)
num_opponent_ships = opponent_positions[0].size
double_window = box_in_window*2
dist_mask_dim = 2*double_window+1
nearby_rows = np.tile(np.expand_dims(np.arange(dist_mask_dim), 1),
[1, dist_mask_dim])
nearby_cols = np.tile(np.arange(dist_mask_dim), [dist_mask_dim, 1])
ships_available = np.copy(stacked_ships[0]) & (~on_rescue_mission) & (
~my_defend_base_ship_positions) & (~convert_unavailable_positions)
boxing_in = np.zeros_like(on_rescue_mission)
grid_size = stacked_ships.shape[1]
# ship_pos_to_key = {v[0]: k for k, v in player_obs[2].items()}
prev_step_boxing_in_ships = history['prev_step_boxing_in_ships']
num_players = stacked_ships.shape[0]
spawn_cost = env_config.spawnCost
ship_pos_to_key = {}
for i in range(num_players):
ship_pos_to_key.update({
v[0]: k for k, v in env_observation.players[i][2].items()})
# Loop over the camping ships and exclude the ones from the available mask
# that have flagged they are not available for boxing in
camping_ships_strategy = history['camping_ships_strategy']
for ship_k in camping_ships_strategy:
if not camping_ships_strategy[ship_k][3]:
camping_row, camping_col = row_col_from_square_grid_pos(
player_obs[2][ship_k][0], grid_size)
ships_available[camping_row, camping_col] = 0
# Loop over the ships that attack opponent camplers and exclude them from the
# available mask
attack_opponent_campers = history['attack_opponent_campers']
for ship_k in attack_opponent_campers:
attacking_camper_row, attacking_camper_col = row_col_from_square_grid_pos(
player_obs[2][ship_k][0], grid_size)
ships_available[attacking_camper_row, attacking_camper_col] = 0
# Loop over the ships that are stuck in a loop and mark them as unavailable
for ship_k in history['avoid_cycle_actions']:
cycle_row, cycle_col = row_col_from_square_grid_pos(
player_obs[2][ship_k][0], grid_size)
ships_available[cycle_row, cycle_col] = 0
original_ships_available = np.copy(ships_available)
my_ship_density = smooth2d(ships_available, smooth_kernel_dim=2)
# Compute the priorities of attacking each ship
# Compute the minimum opponent halite in the neighborhood of each square
# by looping over all opponent ships
attack_ship_priorities = np.zeros(num_opponent_ships)
near_opponent_min_halite = np.ones((grid_size, grid_size))*1e6
near_opponent_2_min_halite = np.ones((grid_size, grid_size))*1e6
near_opponent_specific_2_min_halite = [
np.ones((grid_size, grid_size))*1e6 for _ in range(num_players)]
should_attack = np.zeros(num_opponent_ships, dtype=np.bool)
for i in range(num_opponent_ships):
row = opponent_positions[0][i]
col = opponent_positions[1][i]
opponent_ship_k = ship_pos_to_key[row*grid_size+col]
boxing_in_prev_step = opponent_ship_k in prev_step_boxing_in_ships
opponent_halite = halite_ships[row, col]
clipped_opponent_halite = min(spawn_cost, opponent_halite)
opponent_id = np.where(stacked_ships[:, row, col])[0][0]
attack_ship_priorities[i] = 1e5*boxing_in_prev_step + (
clipped_opponent_halite) + 1000*(
opponent_ships_scaled[opponent_id-1]) + 1000*my_ship_density[row, col]
near_opp_mask = ROW_COL_MAX_DISTANCE_MASKS[(row, col, box_in_window)]
near_opp_2_mask = ROW_COL_MAX_DISTANCE_MASKS[(row, col, 2)]
near_opponent_min_halite[near_opp_mask] = np.minimum(
opponent_halite, near_opponent_min_halite[near_opp_mask])
near_opponent_2_min_halite[near_opp_2_mask] = np.minimum(
opponent_halite, near_opponent_2_min_halite[near_opp_2_mask])
near_opponent_specific_2_min_halite[opponent_id][near_opp_2_mask] = (
np.minimum(opponent_halite,
near_opponent_specific_2_min_halite[opponent_id][
near_opp_2_mask]))
# if observation['step'] == 163 and row == 2:
# import pdb; pdb.set_trace()
should_attack[i] = (main_base_distances[row, col] >= (9-(
observation['relative_step']*6 + 3*num_non_abandoned_bases)) or (
opponent_halite < history[
'inferred_boxed_in_conv_threshold'][opponent_id][0]) or (
always_attack_opponent_id == opponent_id)) and not (
(row, col) in ignore_convert_positions)
box_opponent_positions = []
boxing_in_ships = []
ships_on_box_mission = {}
opponent_ship_order = np.argsort(-attack_ship_priorities)
for i in range(num_opponent_ships):
opponent_ship_id = opponent_ship_order[i]
row = opponent_positions[0][opponent_ship_id]
col = opponent_positions[1][opponent_ship_id]
opponent_id = np.where(stacked_ships[:, row, col])[0][0]
opponent_ship_k = ship_pos_to_key[row*grid_size+col]
sensible_target_actions = opponent_ships_sensible_actions[row, col]
target_halite = halite_ships[row, col]
my_less_halite_mask = np.logical_and(
halite_ships < target_halite, ships_available)
# if observation['step'] == 210 and row == 1 and col == 8:
# import pdb; pdb.set_trace()
# Drop non zero halite ships towards the end of a game (they should return)
my_less_halite_mask = np.logical_and(
my_less_halite_mask, np.logical_or(
halite_ships == 0, steps_remaining > 20))
max_dist_mask = ROW_COL_MAX_DISTANCE_MASKS[(row, col, double_window)]
my_less_halite_mask &= max_dist_mask
box_pos = ROW_COL_BOX_MAX_DISTANCE_MASKS[row, col, double_window]
# if observation['step'] == 157 and row == 13 and col == 1:
# import pdb; pdb.set_trace()
if my_less_halite_mask.sum() >= min_attackers_to_box and should_attack[
opponent_ship_id]:
# Look up the near opponent min halite in the square which is in the
# middle between my attackers and the target - don't attack when there is
# a less halite ship near that ship or if there is an equal halite ship
# near that square and close to the opponent
my_considered_pos = np.where(my_less_halite_mask)
if my_considered_pos[0].size:
considered_rows = my_considered_pos[0]
considered_cols = my_considered_pos[1]
mid_rows = np.where(
np.abs(considered_rows-row) <= (grid_size // 2),
np.round((considered_rows*(1-1e-9)+row*(1+1e-9))/2),
np.where(considered_rows*(1-1e-9)+row*(1+1e-9) >= grid_size,
np.round(
(considered_rows*(1-1e-9)+row*(1+1e-9)-grid_size)/2),
np.mod(np.round(
(considered_rows*(1-1e-9)+row*(1+1e-9)+grid_size)/2),
grid_size))
).astype(np.int)
mid_cols = np.where(
np.abs(considered_cols-col) <= (grid_size // 2),
np.round((considered_cols*(1-1e-9)+col*(1+1e-9))/2),
np.where(considered_cols*(1-1e-9)+col*(1+1e-9) >= grid_size,
np.round(
(considered_cols*(1-1e-9)+col*(1+1e-9)-grid_size)/2),
np.mod(np.round(
(considered_cols*(1-1e-9)+col*(1+1e-9)+grid_size)/2),
grid_size))
).astype(np.int)
# Only box in with ships that can safely do so without becoming a
# target themselves. Take more risk when the halite on board is
# equal to that of other target surrounding ships (typically 0 halite)
considered_to_target_distances = DISTANCES[(row, col)][
(considered_rows, considered_cols)]
considered_min_halite_limits = np.where(
considered_to_target_distances < 3, near_opponent_2_min_halite[
(mid_rows, mid_cols)], near_opponent_min_halite[
(mid_rows, mid_cols)])
drop_ids = (considered_min_halite_limits < (
halite_ships[(considered_rows, considered_cols)])) | (
(considered_min_halite_limits == (
halite_ships[(considered_rows, considered_cols)])) & (
near_opponent_specific_2_min_halite[opponent_id][
(row, col)] <= (
halite_ships[(considered_rows, considered_cols)])))
if np.any(drop_ids):
drop_row_ids = considered_rows[drop_ids]
drop_col_ids = considered_cols[drop_ids]
my_less_halite_mask[(drop_row_ids, drop_col_ids)] = 0
my_less_halite_mask_box = edge_aware_square_subset_mask(
my_less_halite_mask, row, col, double_window, box_pos,
grid_size)
nearby_less_halite_mask = my_less_halite_mask_box.reshape(
(dist_mask_dim, dist_mask_dim))
# if observation['step'] == 32:
# import pdb; pdb.set_trace()
my_num_nearby = nearby_less_halite_mask.sum()
else:
my_num_nearby = 0
if my_num_nearby >= min_attackers_to_box:
# Check all directions to make sure I can box the opponent in
can_box_in = True
box_in_mask_dirs = np.zeros(
(4, dist_mask_dim, dist_mask_dim), dtype=np.bool)
for dim_id, d in enumerate(NOT_NONE_DIRECTIONS):
dir_and_ships = BOX_DIRECTION_MASKS[(double_window, d)] & (
nearby_less_halite_mask)
if not np.any(dir_and_ships):
can_box_in = False
break
else:
box_in_mask_dirs[dim_id] = dir_and_ships
if can_box_in:
# Sketch out the escape squares for the target ship
opponent_distances = np.abs(nearby_rows-double_window) + np.abs(
nearby_cols-double_window)
opponent_euclid_distances = np.sqrt(
(nearby_rows-double_window)**2 + (
nearby_cols-double_window)**2)
nearby_mask_pos = np.where(nearby_less_halite_mask)
my_nearest_distances = np.stack([np.abs(
nearby_rows-nearby_mask_pos[0][j]) + np.abs(
nearby_cols-nearby_mask_pos[1][j]) for j in range(
my_num_nearby)])
my_nearest_euclid_distances = np.stack([np.sqrt((
nearby_rows-nearby_mask_pos[0][j])**2 + (
nearby_cols-nearby_mask_pos[1][j])**2) for j in range(
my_num_nearby)])
# No boxing in if the opponent has a base in one of the escape squares
escape_squares = opponent_distances <= my_nearest_distances.min(0)
cropped_distances = OTHER_DISTANCES[
(double_window, double_window, dist_mask_dim)]
for dim_id, d in enumerate(NOT_NONE_DIRECTIONS):
box_dir_mask = BOX_DIRECTION_MASKS[(double_window, d)]
closest_dim_distance = cropped_distances[
box_in_mask_dirs[dim_id]].min()
escape_squares[box_dir_mask] &= (
cropped_distances[box_dir_mask] <= closest_dim_distance)
if not np.any(observation['rewards_bases_ships'][opponent_id][1][
box_pos][escape_squares.flatten()]):
# Let's box the opponent in!
# We should move towards the opponent if we can do so without opening
# up an escape direction
# if observation['step'] == 32:
# import pdb; pdb.set_trace()
# Order the planning by priority of direction and distance to the
# opponent
# Reasoning: mid-distance ships plan first since that allows fast
# boxing in - the nearby ships then just have to cover the remaining
# directions.
# Ships which cover hard to cover directions plan later.
box_in_mask_dirs_sum = box_in_mask_dirs.sum((1, 2))
ship_priorities = np.zeros(my_num_nearby)
must_attack_converting_square = ((row, col) in (
likely_convert_opponent_positions)) and not (
(row, col) in ignore_convert_positions) and ((
always_attack_opponent_id == opponent_id) or (
my_current_base_distances[:, row, col].min() < 5))
threatened_one_step = set()
for j in range(my_num_nearby):
my_row = nearby_mask_pos[0][j]
my_col = nearby_mask_pos[1][j]
box_directions = box_in_mask_dirs[:, my_row, my_col]
opponent_distance = np.abs(my_row-double_window) + np.abs(
my_col-double_window)
ship_priorities[j] = 20/(
box_in_mask_dirs_sum[box_directions].prod())+np.abs(
opponent_distance**0.9-box_in_window**0.9)
if opponent_distance == 2 and box_directions.sum() == 2 and np.all(
box_in_mask_dirs_sum[box_directions] == 1):
two_step_dirs = [MOVE_DIRECTIONS[move_id+1] for move_id in (
np.where(box_directions)[0])]
threatened_one_step.update(two_step_dirs)
# I can always attack all escape squares if I have at least 5 ships
# at a maximum distance of two with at least one attacker on each
# half plane
vert_diff = double_window-nearby_mask_pos[0]
horiz_diff = double_window-nearby_mask_pos[1]
distances = np.abs(vert_diff) + np.abs(horiz_diff)
is_near = distances <= 2
near_vert_diff = vert_diff[is_near]
near_horiz_diff = horiz_diff[is_near]
i_can_attack_all_escape_squares = distances.min() == 1 and (
is_near.sum() >= 5) and np.sign(near_vert_diff).ptp() == 2 and (
np.sign(near_horiz_diff).ptp() == 2)
if i_can_attack_all_escape_squares and (distances == 1).sum() == 1:
# I can only attack all escape squares if my attacker can be
# replaced
one_step_diff_id = np.argmin(distances)
single_attack_row = nearby_mask_pos[0][one_step_diff_id]
single_attack_col = nearby_mask_pos[1][one_step_diff_id]
can_replace = False
for row_offset in [-1, 1]:
for col_offset in [-1, 1]:
if nearby_less_halite_mask[single_attack_row + row_offset,
single_attack_col + col_offset]:
can_replace = True
break
i_can_attack_all_escape_squares = can_replace
# DISCERN if we are just chasing or actually attacking the ship in
# the next move - dummy rule to have at least K neighboring ships
# for us to attack the position of the targeted ship - this makes it
# hard to guess the escape direction
ship_target_1_distances = my_nearest_distances[
:, double_window, double_window] == 1
next_step_attack = (len(sensible_target_actions) == 0 and (
ship_target_1_distances.sum() > 2)) or (
i_can_attack_all_escape_squares) or (
must_attack_converting_square and np.any(
ship_target_1_distances))
# if next_step_attack and not (
# (len(sensible_target_actions) == 0 and (
# ship_target_1_distances.sum() > 2)) or (
# i_can_attack_all_escape_squares)):
# import pdb; pdb.set_trace()
opponent_boxed_bases = edge_aware_square_subset_mask(
opponent_bases, row, col, double_window, box_pos,
grid_size).reshape((dist_mask_dim, dist_mask_dim))
pos_taken = np.copy(opponent_boxed_bases)
box_override_assignment_not_next_attack = {}
if next_step_attack:
# If there is a ship that can take the position of my attacker:
# attack with that ship and replace its position.
# Otherwise pick a random attacker and keep the others in place.
# Initial approach: don't move with ships at distance 1.
ship_target_2_distance_ids = np.where(my_nearest_distances[
:, double_window, double_window] == 2)[0].tolist()
move_ids_directions_next_attack = {}
# Reorder ship_target_2_distance_ids so that the ones that can
# replace a 1 step threat are considered last, except when there is
# only a single 1 step threat (it would always move to the target).
# Also prefer to consider ships that only have a single option
# to move to the target first
two_step_distance_scores = np.zeros(
len(ship_target_2_distance_ids))
for two_step_id, two_step_diff_id in enumerate(
ship_target_2_distance_ids):
my_row = nearby_mask_pos[0][two_step_diff_id]
my_col = nearby_mask_pos[1][two_step_diff_id]
mask_between = get_mask_between_exclude_ends(
my_row, my_col, double_window, double_window, dist_mask_dim)
two_step_distance_scores[two_step_id] = mask_between.sum() + 10*(
nearby_less_halite_mask[mask_between].sum())*(
ship_target_1_distances.sum() > 1)
# if observation['step'] == 134:
# import pdb; pdb.set_trace()
ship_target_2_distance_ids = np.array(
ship_target_2_distance_ids)[
np.argsort(two_step_distance_scores)].tolist()
# Add the positions of the one step attackers
for one_step_diff_id in np.where(ship_target_1_distances)[0]:
my_row = nearby_mask_pos[0][one_step_diff_id]
my_col = nearby_mask_pos[1][one_step_diff_id]
# If I only have one ship that can attack the target: attack with
# that ship!
if ship_target_1_distances.sum() == 1:
attack_direction = get_dir_from_target(
my_row, my_col, double_window, double_window,
grid_size=1000)[0]
pos_taken[double_window, double_window] = True
move_ids_directions_next_attack[one_step_diff_id] = (
attack_direction)
else:
pos_taken[my_row, my_col] = 1
# if observation['step'] == 176:
# import pdb; pdb.set_trace()
two_step_pos_taken = []
while ship_target_2_distance_ids:
two_step_diff_id = ship_target_2_distance_ids.pop(0)
my_row = nearby_mask_pos[0][two_step_diff_id]
my_col = nearby_mask_pos[1][two_step_diff_id]
# Consider the shortest directions towards the target
shortest_directions = get_dir_from_target(
my_row, my_col, double_window, double_window, grid_size=1000)
has_selected_action = False
for d in shortest_directions:
# Prefer empty 1-step to target spaces over replacing a one
# step threat
move_row, move_col = move_ship_row_col(
my_row, my_col, d, size=1000)
if not pos_taken[move_row, move_col] and (not (
(move_row, move_col) in two_step_pos_taken)):
two_step_pos_taken.append((move_row, move_col))
move_ids_directions_next_attack[two_step_diff_id] = d
has_selected_action = True
break
if not has_selected_action:
# Replace a 1-step threatening ship
for d in shortest_directions:
move_row, move_col = move_ship_row_col(
my_row, my_col, d, size=1000)
if pos_taken[move_row, move_col] and not pos_taken[
double_window, double_window] and not opponent_boxed_bases[
move_row, move_col]:
move_ids_directions_next_attack[two_step_diff_id] = d
# Find the ids of the 1-step ship and make sure that ship
# attacks
replaced_id = np.where(my_nearest_distances[
:, move_row, move_col] == 0)[0][0]
one_step_attack_dir = get_dir_from_target(
move_row, move_col, double_window, double_window,
grid_size=1000)[0]
move_ids_directions_next_attack[replaced_id] = (
one_step_attack_dir)
pos_taken[double_window, double_window] = True
# Recompute the priority of the remaining two step ships
# Prefer ships with the lowest pos_taken shortest actions
two_step_distance_scores = np.zeros(
len(ship_target_2_distance_ids))
for two_step_id, two_step_diff_id in enumerate(
ship_target_2_distance_ids):
my_row = nearby_mask_pos[0][two_step_diff_id]
my_col = nearby_mask_pos[1][two_step_diff_id]
shortest_directions = get_dir_from_target(
my_row, my_col, double_window, double_window,
grid_size=1000)
for d in shortest_directions:
move_row, move_col = move_ship_row_col(
my_row, my_col, d, size=1000)
two_step_distance_scores[two_step_id] += int(
not (pos_taken[move_row, move_col] or (
(move_row, move_col) in two_step_pos_taken)))
ship_target_2_distance_ids = np.array(
ship_target_2_distance_ids)[
np.argsort(two_step_distance_scores)].tolist()
one_step_diff_ids = np.where(ship_target_1_distances)[0]
if pos_taken[double_window, double_window]:
# Add the remaining one step attackers with stay in place actions
for one_step_diff_id in one_step_diff_ids:
if not one_step_diff_id in move_ids_directions_next_attack:
move_ids_directions_next_attack[one_step_diff_id] = None
else:
# Prefer to avoid stay in place actions with zero halite ships
real_mask_pos = (
np.mod(nearby_mask_pos[0]+row-double_window, grid_size),
np.mod(nearby_mask_pos[1]+col-double_window, grid_size)
)
one_step_halite_on_board = halite_ships[real_mask_pos][
one_step_diff_ids]
one_step_halite_on_square = obs_halite[real_mask_pos][
one_step_diff_ids]
prefers_box_in = (one_step_halite_on_board == 0) & (
one_step_halite_on_square > 0)
if np.all(~prefers_box_in):
one_step_diff_ids_attack = one_step_diff_ids
else:
one_step_diff_ids_attack = one_step_diff_ids[
prefers_box_in]
# Of the remaining attack options: prefer an attacker from the
# direction where we have the highest influence, relative to the
# targeted opponent
# one_step_attacker_id = np_rng.choice(one_step_diff_ids_attack)
my_influences = player_influence_maps[0][real_mask_pos][
one_step_diff_ids_attack]
opponent_influences = player_influence_maps[opponent_id][
real_mask_pos][one_step_diff_ids_attack]
influence_differences = my_influences - opponent_influences
one_step_attacker_id = one_step_diff_ids_attack[
np.argmax(influence_differences)]
# Pick a random one step attacker to attack the target and make
# sure the remaining 1-step ships stay in place
for one_step_diff_id in one_step_diff_ids:
if one_step_diff_id == one_step_attacker_id:
my_row = nearby_mask_pos[0][one_step_diff_id]
my_col = nearby_mask_pos[1][one_step_diff_id]
attack_dir = get_dir_from_target(
my_row, my_col, double_window, double_window,
grid_size=1000)[0]
else:
attack_dir = None
move_ids_directions_next_attack[one_step_diff_id] = attack_dir
elif len(sensible_target_actions) == 0 or (
len(sensible_target_actions) == 1 and (
sensible_target_actions[0] == (0, 0))):
# Inspect what directions I can move right next to when the
# opponent has no valid escape actions. Use a greedy search to
# determine the action selection order
can_box_immediately = []
can_box_immediately_counts = np.zeros(4)
for j in range(my_num_nearby):
my_row = nearby_mask_pos[0][j]
my_col = nearby_mask_pos[1][j]
box_directions = box_in_mask_dirs[:, my_row, my_col]
opponent_distance = np.abs(my_row-double_window) + np.abs(
my_col-double_window)
if opponent_distance <= 2:
immediate_box_dirs = np.where(box_directions)[0]
can_box_immediately.append((
j, immediate_box_dirs, box_directions, my_row, my_col))
can_box_immediately_counts[box_directions] += 1
can_box_progress = [list(cb) for cb in can_box_immediately]
can_box_immediately_counts_progress = np.copy(
can_box_immediately_counts)
not_boxed_dirs = np.ones(4, dtype=np.bool)
# if observation['step'] == 97:
# import pdb; pdb.set_trace()
# Iteratively look for directions where I can box in in one step
# when I have others that can box in the remaining directions
# and nobody else can box that direction in
box_in_mask_rem_dirs_sum = np.copy(box_in_mask_dirs_sum)
while len(can_box_progress) > 0 and np.any(not_boxed_dirs) and (
can_box_immediately_counts_progress.sum() > 0):
considered_dir = np.argmin(
can_box_immediately_counts_progress + 100*(
can_box_immediately_counts_progress <= 0) + 1e-2*(
box_in_mask_rem_dirs_sum))
considered_dir_ids = [(
j, cb[0], box_in_mask_rem_dirs_sum[cb[1]], cb[1], cb[3],
cb[4]) for j, cb in enumerate(can_box_progress) if (
considered_dir in cb[1] and np.all(
box_in_mask_rem_dirs_sum[cb[1]] >= 1))]
num_considered_dir_ids = len(considered_dir_ids)
if num_considered_dir_ids > 0:
# Tie breaker: the one with the most ships in the other dir
# support
if num_considered_dir_ids > 1:
scores = np.zeros(num_considered_dir_ids)
for k in range(num_considered_dir_ids):
scores[k] = 100*len(considered_dir_ids[k][2]) - (
considered_dir_ids[k][2].sum())
picked_dir_id = np.argmin(scores)
else:
picked_dir_id = 0
picked = considered_dir_ids[picked_dir_id]
box_override_assignment_not_next_attack[picked[1]] = (
considered_dir, picked[4], picked[5])
# If I move closer with a diagonal ship: subtract the
# immediate box counter for the other direction
picked_other_immediate_box_dirs = picked[3][
picked[3] != considered_dir]
can_box_immediately_counts_progress[considered_dir] = 0
can_box_immediately_counts_progress[
picked_other_immediate_box_dirs] -= 1
not_boxed_dirs[considered_dir] = 0
box_in_mask_rem_dirs_sum[picked[3]] -= 1
ship_priorities[picked[1]] -= 1e6
del can_box_progress[picked[0]]
else:
break
num_covered_directions = np.zeros(4, dtype=np.int)
num_one_step_from_covered = np.zeros(4, dtype=np.bool)
ship_order = np.argsort(ship_priorities)
box_in_mask_rem_dirs_sum = np.copy(box_in_mask_dirs_sum)
update_ship_scores = []
one_square_threats = []
almost_covered_dirs = []
for j in range(my_num_nearby):
attack_id = ship_order[j]
my_row = nearby_mask_pos[0][attack_id]
my_col = nearby_mask_pos[1][attack_id]
my_abs_row = (row+my_row-double_window) % grid_size
my_abs_col = (col+my_col-double_window) % grid_size
ship_pos = my_abs_row*grid_size+my_abs_col
ship_k = ship_pos_to_key[ship_pos]
box_directions = box_in_mask_dirs[:, my_row, my_col]
opponent_distance = np.abs(my_row-double_window) + np.abs(
my_col-double_window)
box_in_mask_rem_dirs_sum[box_directions] -= 1
# if observation['step'] == 341:
# import pdb; pdb.set_trace()
if next_step_attack:
# Increase the ship scores for the planned actions
if attack_id in move_ids_directions_next_attack:
move_dir = move_ids_directions_next_attack[attack_id]
move_row, move_col = move_ship_row_col(
my_abs_row, my_abs_col, move_dir, grid_size)
# if observation['step'] == 204:
# import pdb; pdb.set_trace()
update_ship_scores.append(
(ship_k, move_row, move_col, 2e6, opponent_distance, None,
my_abs_row, my_abs_col))
else:
# Figure out if we should use this ship to attack the target -
# there is no point in using too many ships!
# if observation['step'] == 201 and my_row == 6 and my_col == 7:
# import pdb; pdb.set_trace()
if (opponent_distance > 2) and (
(num_covered_directions[box_directions] + 0.5*(
box_in_mask_rem_dirs_sum[box_directions])).min() >= 2 and (
np.all(num_covered_directions[box_directions] > 0)) or (
box_in_mask_rem_dirs_sum[box_directions].min() > 2) and (
opponent_distance > box_in_window)):
# print("Dropping ship", my_abs_row, my_abs_col, "from attack")
continue
rel_pos_diff = (my_row-double_window, my_col-double_window)
num_covered_attacker = num_covered_directions[box_directions]
# Logic to cover a direction that is almost covered
almost_covered_override = False
if np.all((num_covered_attacker > 0) | (
box_in_mask_rem_dirs_sum[box_directions] >= 1)) & np.any(
num_one_step_from_covered) and (
box_directions.sum() == 1) and len(
threatened_one_step) > 0 and ((
np.abs(my_row - my_col) == 1) or (my_row + my_col in [
double_window-1, double_window+1])):
move_dir = None
if my_row-my_col == -1:
if WEST in threatened_one_step and my_row < double_window:
almost_covered_dir = WEST
move_dir = SOUTH
elif SOUTH in threatened_one_step and my_row > double_window:
almost_covered_dir = SOUTH
move_dir = WEST
elif my_row-my_col == 1:
if NORTH in threatened_one_step and my_row < double_window:
almost_covered_dir = NORTH
move_dir = EAST
elif EAST in threatened_one_step and my_row > double_window:
almost_covered_dir = EAST
move_dir = NORTH
elif my_row+my_col == double_window-1:
if EAST in threatened_one_step and my_row < double_window:
almost_covered_dir = EAST
move_dir = SOUTH
elif SOUTH in threatened_one_step and my_row > double_window:
almost_covered_dir = SOUTH
move_dir = EAST
elif my_row+my_col == double_window+1:
if NORTH in threatened_one_step and my_row < double_window:
almost_covered_dir = NORTH
move_dir = WEST
elif WEST in threatened_one_step and my_row > double_window:
almost_covered_dir = WEST
move_dir = NORTH
if move_dir is not None:
move_row, move_col = move_ship_row_col(
my_row, my_col, move_dir, grid_size)
if not pos_taken[move_row, move_col]:
# Override: when we are next to the target: expect opponent
# to move
almost_covered_override = True
if opponent_distance == 1:
threat_dir = OPPOSITE_MAPPING[get_dir_from_target(
my_row, my_col, double_window, double_window, 1000)[0]]
one_square_threats.append(threat_dir)
move_dir = None
else:
# Make sure that the square we want to move to is
# available
almost_covered_dirs.append(almost_covered_dir)
if not almost_covered_override:
if attack_id in box_override_assignment_not_next_attack:
attack_move_id = box_override_assignment_not_next_attack[
attack_id][0]
assert box_directions[attack_move_id]
else:
attack_dir_scores = num_covered_attacker + 0.1*(
box_in_mask_rem_dirs_sum[box_directions])
attack_dir_id = np.argmin(attack_dir_scores)
attack_move_id = np.where(box_directions)[0][attack_dir_id]
rel_pos_diff = (my_row-double_window, my_col-double_window)
attack_cover_dir = np.array(NOT_NONE_DIRECTIONS)[
attack_move_id]
one_hot_cover_dirs = np.zeros(4, dtype=bool)
one_hot_cover_dirs[attack_move_id] = 1
other_dirs_covered = one_hot_cover_dirs | (
num_covered_directions > 0) | (box_in_mask_rem_dirs_sum >= 1)
wait_reinforcements = not np.all(other_dirs_covered) or (
opponent_distance == 1)
# if observation['step'] == 357:
# import pdb; pdb.set_trace()
# print(my_row, my_col, threatened_one_step,
# num_covered_directions, num_one_step_from_covered)
if wait_reinforcements:
# Move away from the target if staying would mean having more
# halite than the target
my_next_halite = halite_ships[my_abs_row, my_abs_col] + int(
collect_rate*obs_halite[my_abs_row, my_abs_col])
if my_next_halite > target_halite:
move_away_dirs = get_dir_from_target(
double_window, double_window, my_row, my_col,
grid_size=1000)
# import pdb; pdb.set_trace()
move_dir = np_rng.choice(move_away_dirs)
else:
move_dir = None
else:
if num_covered_directions[attack_move_id] > 0:
# Move towards the target on the diagonal (empowerment)
move_penalties = 0.001*opponent_euclid_distances**4 + (
my_nearest_euclid_distances[attack_id]**4) + 1e3*(
pos_taken)
move_penalties[my_row, my_col] += 1e3
best_penalty_pos = np.where(
move_penalties == move_penalties.min())
target_move_row = best_penalty_pos[0][0]
target_move_col = best_penalty_pos[1][0]
move_dir = get_dir_from_target(
my_row, my_col, target_move_row, target_move_col,
grid_size=1000)[0]
if attack_cover_dir == NORTH:
if np.abs(rel_pos_diff[1]) < (np.abs(rel_pos_diff[0])-1):
move_dir = SOUTH
elif rel_pos_diff[1] < 0:
move_dir = EAST
else:
move_dir = WEST
elif attack_cover_dir == SOUTH:
if np.abs(rel_pos_diff[1]) < (np.abs(rel_pos_diff[0])-1):
move_dir = NORTH
elif rel_pos_diff[1] < 0:
move_dir = EAST
else:
move_dir = WEST
elif attack_cover_dir == EAST:
if np.abs(rel_pos_diff[0]) < (np.abs(rel_pos_diff[1])-1):
move_dir = WEST
elif rel_pos_diff[0] < 0:
move_dir = SOUTH
else:
move_dir = NORTH
elif attack_cover_dir == WEST:
if np.abs(rel_pos_diff[0]) < (np.abs(rel_pos_diff[1])-1):
move_dir = EAST
elif rel_pos_diff[0] < 0:
move_dir = SOUTH
else:
move_dir = NORTH
# Increase the ship scores for the planned actions
moved_rel_dir = RELATIVE_DIR_MAPPING[move_dir]
new_rel_pos = (rel_pos_diff[0] + moved_rel_dir[0],
rel_pos_diff[1] + moved_rel_dir[1])
new_grid_pos = (double_window + new_rel_pos[0],
double_window + new_rel_pos[1])
if new_grid_pos[0] < 0 or new_grid_pos[1] < 0 or new_grid_pos[
0] > 2*double_window or new_grid_pos[1] > 2*double_window:
new_rel_pos = (rel_pos_diff[0], rel_pos_diff[1])
new_grid_pos = (double_window + new_rel_pos[0],
double_window + new_rel_pos[1])
if pos_taken[new_grid_pos] and opponent_distance == 2:
# Override - if I can move right next to the target: do it.
shortest_directions = get_dir_from_target(
my_row, my_col, double_window, double_window, grid_size=1000)
for move_dir in shortest_directions:
moved_rel_dir = RELATIVE_DIR_MAPPING[move_dir]
new_rel_pos = (rel_pos_diff[0] + moved_rel_dir[0],
rel_pos_diff[1] + moved_rel_dir[1])
new_grid_pos = (double_window + new_rel_pos[0],
double_window + new_rel_pos[1])
if not pos_taken[new_grid_pos]:
break
move_row, move_col = move_ship_row_col(
my_abs_row, my_abs_col, move_dir, grid_size)
if not pos_taken[new_grid_pos] and not new_rel_pos == (0, 0):
# Update the covered attack directions
ship_covered_directions = np.zeros(4, dtype=np.bool)
ship_one_step_from_covered_directions = np.zeros(
4, dtype=np.bool)
for threat_dir in RELATIVE_NOT_NONE_DIRECTIONS:
nz_dim = int(threat_dir[0] == 0)
dir_offset = new_rel_pos[nz_dim]*threat_dir[nz_dim]
other_dir_abs_offset = np.abs(new_rel_pos[1-nz_dim])
if dir_offset > 0 and other_dir_abs_offset <= dir_offset:
covered_id = np.where(
RELATIVE_DIR_TO_DIRECTION_MAPPING[threat_dir] == (
np.array(NOT_NONE_DIRECTIONS)))[0][0]
ship_one_step_from_covered_directions[covered_id] = 1
if other_dir_abs_offset < dir_offset:
ship_covered_directions[covered_id] = 1
# if observation['step'] == 210 and row == 1 and col == 8:
# import pdb; pdb.set_trace()
# Join the attack - add actions to the list
num_covered_directions[ship_covered_directions] += 1
num_one_step_from_covered[
ship_one_step_from_covered_directions] = 1
update_ship_scores.append(
(ship_k, move_row, move_col, 2e6, opponent_distance,
np.where(ship_covered_directions)[0], my_abs_row,
my_abs_col))
pos_taken[new_grid_pos] = 1
# We can almost box the opponent in and rely on the opponent not
# taking risky actions to escape
almost_attack_nearby_blockers = False
if len(threatened_one_step) > 0 and (
len(one_square_threats+almost_covered_dirs) > 0) and not np.all(
num_covered_directions > 0) and not next_step_attack:
not_covered_dirs = [MOVE_DIRECTIONS[i+1] for i in np.where(
num_covered_directions == 0)[0]]
if len(one_square_threats) > 0 and np.all(
[d in threatened_one_step for d in not_covered_dirs]):
almost_attack_nearby_blockers = True
else:
almost_attack_nearby_blockers = len(
threatened_one_step.intersection(almost_covered_dirs)) > 0
# if observation['step'] == 87:
# import pdb; pdb.set_trace()
if next_step_attack or np.all(num_covered_directions > 0) or (
almost_attack_nearby_blockers and np.any(
num_covered_directions > 0)):
# Prune the attackers: only keep the closest two in each direction
if not next_step_attack:
drop_rows = []
distance_dir = np.array([[u[4], u[5][0]] for u in (
update_ship_scores) if u[5].size > 0])
for d_id in np.arange(4):
if (distance_dir[:, 1] == d_id).sum() > 2:
dir_rows = np.where(distance_dir[:, 1] == d_id)[0]
drop_ids = np.argsort(distance_dir[dir_rows, 0])[2:]
drop_rows.extend(dir_rows[drop_ids].tolist())
for dr in np.sort(drop_rows)[::-1]:
del update_ship_scores[dr]
# if observation['step'] == 237:
# import pdb; pdb.set_trace()
box_opponent_positions.append((row, col))
boxing_in_ships.append(opponent_ship_k)
for (ship_k, move_row, move_col, new_collect_score,
distance_to_target, _, my_abs_row, my_abs_col) in (
update_ship_scores):
# Only update the ship scores if the box in action is in my one
# step valid actions
box_dir = get_dir_from_target(
my_abs_row, my_abs_col, move_row, move_col, grid_size)[0]
if box_dir in all_ship_scores[ship_k][9]:
all_ship_scores[ship_k][0][move_row, move_col] = (
new_collect_score)
# Flag the boxing in ships as unavailable for other hunts
ships_available[my_abs_row, my_abs_col] = 0
boxing_in[my_abs_row, my_abs_col] = 1
ships_on_box_mission[ship_k] = distance_to_target
override_move_squares_taken[move_row, move_col] = 1
# Make sure that I attack all squares where an opponent is converting that
# I can not allow to happen
for (row, col) in possible_convert_opponent_positions:
if not (row, col) in ignore_convert_positions:
my_base_distances = my_current_base_distances[:, row, col]
must_attack_converting_square = my_base_distances.min() < (3.5 - (
observation['relative_step']))
if must_attack_converting_square and not override_move_squares_taken[
row, col]:
# Look for nearby ships of mine that can attack the converting ship
for d in NOT_NONE_DIRECTIONS:
my_row, my_col = move_ship_row_col(row, col, d, grid_size)
if original_ships_available[my_row, my_col] or (
my_defend_base_ship_positions[my_row, my_col]):
to_target_dir = OPPOSITE_MAPPING[d]
ship_pos = my_row*grid_size+my_col
ship_k = ship_pos_to_key[ship_pos]
if to_target_dir in all_ship_scores[ship_k][6]:
all_ship_scores[ship_k][0][row, col] = 1e9
boxing_in[my_row, my_col] = 1
print("ATTACKING POSSIBLY CONVERTING SHIP", observation['step'],
row, col, my_row, my_col)
break
history['prev_step_boxing_in_ships'] = boxing_in_ships
return (all_ship_scores, boxing_in, box_opponent_positions,
override_move_squares_taken, ships_on_box_mission)
def update_scores_pack_hunt(
all_ship_scores, config, stacked_ships, observation,
opponent_ships_sensible_actions, halite_ships, steps_remaining,
player_obs, np_rng, opponent_ships_scaled, collect_rate, obs_halite,
main_base_distances, history, on_rescue_mission, boxing_in_mission,
my_defend_base_ship_positions, env_observation, box_opponent_positions,
override_move_squares_taken, player_influence_maps,
ignore_convert_positions, convert_unavailable_positions,
early_hunting_season, late_hunting_season, safe_collect_margin, spawn_cost,
change_standard_consecutive_steps=5):
available_pack_hunt_ships = np.copy(stacked_ships[0])
grid_size = available_pack_hunt_ships.shape[0]
hunting_season_started = history['hunting_season_started']
prev_standard_ships = history['hunting_season_standard_ships']
# # FUTURE WORK: Make the number of standard ships a function of the hunt
# # success?
# # FUTURE WORK: Make the number of standard ships a function of ship losses?
if early_hunting_season:
max_standard_ships_hunting_season = config[
'max_standard_ships_early_hunting_season']
elif late_hunting_season:
max_standard_ships_hunting_season = max(config[
'max_standard_ships_late_hunting_season'], int(len(player_obs[2])*(
config['late_hunting_season_standard_min_fraction'])))
else:
max_standard_ships_hunting_season = max(10, int(len(player_obs[2])/2.5))
# print(observation['step'], len(player_obs[2]),
# max_standard_ships_hunting_season)
# print(observation['step'], opponent_hunt_fraction, num_my_ships,
# my_target_standard_ships, max_standard_ships_hunting_season)
# Determine if I should preferably target a specific opponent.
# In games where there is a clear difference between the top two agents and
# my agent, where I am one of those two: mostly harrass/hoard the other top
# agent
current_scores = history['current_scores']
spawn_diffs = (current_scores[0] - current_scores[1:])/spawn_cost
first_opponent_id = np.argsort(spawn_diffs)[0]
second_opponent_id = np.argsort(spawn_diffs)[1]
my_agent_in_top_two = (spawn_diffs < 0).sum() <= 1
spawn_diff_first = np.abs(spawn_diffs[first_opponent_id])
spawn_diff_second = np.abs(spawn_diffs[second_opponent_id])
prev_targeted_hoard_mode = history['targeted_hoard_mode']
should_start_targeted_hoard_mode = my_agent_in_top_two and (
spawn_diff_second > 2*spawn_diff_first) and (spawn_diff_second > 6)
should_continue_targeted_hoard_mode = my_agent_in_top_two and (
spawn_diff_second > spawn_diff_first) and (spawn_diff_second > 4)
targeted_hoard_mode = should_start_targeted_hoard_mode or (
prev_targeted_hoard_mode and should_continue_targeted_hoard_mode)
history['targeted_hoard_mode'] = targeted_hoard_mode
preferred_victim = None
if targeted_hoard_mode:
preferred_victim = first_opponent_id+1
if should_start_targeted_hoard_mode and not prev_targeted_hoard_mode:
print(observation['step'], "Start selective hoarding of opponent",
preferred_victim)
prev_step_opponent_ship_moves = history['prev_step_opponent_ship_moves']
num_players = stacked_ships.shape[0]
ship_pos_to_key = {}
for i in range(num_players):
ship_pos_to_key.update({
v[0]: k for k, v in env_observation.players[i][2].items()})
ship_key_to_pos = {v: k for k, v in ship_pos_to_key.items()}
player_ids = -1*np.ones((grid_size, grid_size), dtype=np.int)
for i in range(stacked_ships.shape[0]):
player_ids[stacked_ships[i]] = i
not_available_due_to_camping = np.zeros_like(available_pack_hunt_ships)
# Loop over the camping ships and exclude the ones from the available mask
# that have flagged they are not available for boxing in
camping_ships_strategy = history['camping_ships_strategy']
for ship_k in camping_ships_strategy:
if not camping_ships_strategy[ship_k][3]:
camping_row, camping_col = row_col_from_square_grid_pos(
player_obs[2][ship_k][0], grid_size)
not_available_due_to_camping[camping_row, camping_col] = 1
# Loop over the ships that attack opponent camplers and exclude them from the
# available mask
attack_opponent_campers = history['attack_opponent_campers']
for ship_k in attack_opponent_campers:
attacking_camper_row, attacking_camper_col = row_col_from_square_grid_pos(
player_obs[2][ship_k][0], grid_size)
not_available_due_to_camping[
attacking_camper_row, attacking_camper_col] = 1
# Loop over the ships that are stuck in a loop and mark them as unavailable
not_available_due_to_cycle = np.zeros_like(available_pack_hunt_ships)
for ship_k in history['avoid_cycle_actions']:
cycle_row, cycle_col = row_col_from_square_grid_pos(
player_obs[2][ship_k][0], grid_size)
not_available_due_to_cycle[cycle_row, cycle_col] = 1
# Loop over the ships that are temporarily assigned a collect task
not_available_due_to_temp_collect = np.zeros_like(available_pack_hunt_ships)
delete_keys = []
for ship_k in history['temporary_hoarding_collect_ships']:
if ship_k in player_obs[2]:
collect_row, collect_col = row_col_from_square_grid_pos(
player_obs[2][ship_k][0], grid_size)
not_available_due_to_temp_collect[collect_row, collect_col] = 1
else:
delete_keys.append(ship_k)
for k in delete_keys:
history['temporary_hoarding_collect_ships'].remove(k)
# List the ships that are definitely not available for the pack hunt
# In this group:
# - Opponent base camping
# - Attack opponent base campers
# - Ships that are are on a rescue mission (rescuer and rescued)
# - Base defense emergency ships
# - Boxing in other ships
available_pack_hunt_ships &= (~not_available_due_to_camping)
available_pack_hunt_ships &= (~on_rescue_mission)
available_pack_hunt_ships &= (~my_defend_base_ship_positions)
available_pack_hunt_ships &= (~boxing_in_mission)
available_pack_hunt_ships &= (~convert_unavailable_positions)
available_pack_hunt_ships &= (~not_available_due_to_cycle)
available_pack_hunt_ships &= (~not_available_due_to_temp_collect)
# Of the remaining list: identify 'max_standard_ships_hunting_season' ships
# that are available to gather halite/attack bases.
# Preferably select ships that were also selected for these modes in the
# previous step and have halite on board.
# Only change the gather/attack ships if one of my gatherers was destroyed
# Assign a new gatherer if my gatherer is assigned to the base camping
# attack or defense (These ships tend to be indefinitely unavailable), or if
# the ship was destroyed.
# Prefer non-zero halite ships for the initial gathering ships.
my_ship_pos_to_k = {v[0]: k for k, v in player_obs[2].items()}
available_positions = np.where(available_pack_hunt_ships)
num_available_ships = available_pack_hunt_ships.sum()
standard_ships = []
if num_available_ships > 0:
best_standard_scores = np.zeros(num_available_ships)
pos_keys = []
for i in range(num_available_ships):
row = available_positions[0][i]
col = available_positions[1][i]
pos_key = my_ship_pos_to_k[row*grid_size+col]
best_standard_scores[i] = all_ship_scores[pos_key][0].max() - 1e6*(
halite_ships[row, col] == 0)
pos_keys.append(pos_key)
if hunting_season_started:
already_included_ids = np.zeros(num_available_ships, dtype=np.bool)
for ship_k in prev_standard_ships:
if ship_k in player_obs[2]:
# The ship still exists and was a standard ship in the previous step
row, col = row_col_from_square_grid_pos(
player_obs[2][ship_k][0], grid_size)
if my_defend_base_ship_positions[row, col] or boxing_in_mission[
row, col] or on_rescue_mission[row, col] or (
not_available_due_to_cycle[row, col]):
# We can use the ship for collecting soon (now it is rescuing or
# boxing in or defending the base)
standard_ships.append(ship_k)
elif available_pack_hunt_ships[row, col]:
# The ship is available now. Flag it for exclusion so it doesn't
# get added twice
standard_ships.append(ship_k)
match_id = np.where((available_positions[0] == row) & (
available_positions[1] == col))[0][0]
already_included_ids[match_id] = True
else:
# The ship is now used for base camping or base conversion or
# additional collection
# Exclude it from the standard ships group
assert not_available_due_to_camping[row, col] or (
convert_unavailable_positions[row, col]) or (
not_available_due_to_temp_collect[row, col])
best_standard_scores = best_standard_scores[~already_included_ids]
available_positions = (available_positions[0][~already_included_ids],
available_positions[1][~already_included_ids])
pos_keys = np.array(pos_keys)[~already_included_ids].tolist()
num_unassigned = max_standard_ships_hunting_season - len(standard_ships)
# if num_unassigned > 0:
# import pdb; pdb.set_trace()
num_to_assign = min(num_unassigned, num_available_ships)
num_to_assign_phase_1 = max(0, num_to_assign - config[
'max_standard_ships_decided_end_pack_hunting'])
num_to_assign_phase_2 = num_to_assign-num_to_assign_phase_1
num_available_ships = best_standard_scores.size
else:
num_to_assign = max_standard_ships_hunting_season
num_to_assign_phase_1 = min(num_to_assign, num_available_ships)
num_to_assign_phase_2 = 0
# Assign the remaining standard ships
# Assign the available ships with the highest collect score for collecting
# (preferably non zero halite ships)
best_standard_ids = np.argsort(-best_standard_scores)[
:num_to_assign_phase_1]
for standard_id in best_standard_ids:
standard_row = available_positions[0][standard_id]
standard_col = available_positions[1][standard_id]
standard_key = pos_keys[standard_id]
assert not standard_key in standard_ships
standard_ships.append(standard_key)
# Mark the standard ships as unavailable for pack hunting
for standard_key in standard_ships:
standard_row, standard_col = row_col_from_square_grid_pos(
player_obs[2][standard_key][0], grid_size)
available_pack_hunt_ships[standard_row, standard_col] = 0
# The remaining ships are considered for pack hunting. Send the ones with
# halite on board to a base.
considered_hunting_ships_pos = np.where(available_pack_hunt_ships)
num_available_ships = available_pack_hunt_ships.sum()
for i in range(num_available_ships):
row = considered_hunting_ships_pos[0][i]
col = considered_hunting_ships_pos[1][i]
if halite_ships[row, col] > 0:
available_pack_hunt_ships[row, col] = 0
ship_k = my_ship_pos_to_k[row*grid_size+col]
# Let the ship collect at will (but prefer to go to a base sooner rather
# than later) before joining the pack hunt (after touching base)
for j in [2, 3]:
all_ship_scores[ship_k][j][:] = -1e6
# Let a hoarding ship gather when it is safe to do so
if (obs_halite[row, col] < 100 or safe_collect_margin[
row, col] <= 0) and (
not ship_k in history['temporary_hoarding_collect_ships']):
# FUTURE WORK: Make the multiplier a function of the opponent
# aggression level?
all_ship_scores[ship_k][1][:] *= 4
elif not ship_k in history['temporary_hoarding_collect_ships'] and (
safe_collect_margin[row, col] > 0):
history['temporary_hoarding_collect_ships'].append(ship_k)
# print(observation['step'], row, col, ship_k,
# "Temporarily collecting")
# Ignore ships for hunting that are already being boxed in with my box-in
# ships
box_opponent_mask = np.zeros((grid_size, grid_size), dtype=np.bool)
for boxed_target_row, boxed_target_col in box_opponent_positions:
box_opponent_mask[boxed_target_row, boxed_target_col] = 1
# Consider what to do with the zero halite ships that are available for
# hunting.
# First attempt: do something similar as mzotkiew
# Main idea: move to the nearest non zero halite opponent if that direction
# is valid.
opponent_ships = stacked_ships[1:].sum(0) > 0
potential_targets = opponent_ships & (halite_ships > 0) & (
~box_opponent_mask)
hunting_ships_pos = np.where(available_pack_hunt_ships)
num_hunting_ships = available_pack_hunt_ships.sum()
# Exclude targets that I am willfully letting convert
for (ignore_convert_row, ignore_convert_col) in ignore_convert_positions:
potential_targets[ignore_convert_row, ignore_convert_col] = 0
# Exclude targets that have a safe path to one of their bases
if num_hunting_ships > 0:
stacked_bases = np.stack(
[rbs[1] for rbs in observation['rewards_bases_ships']])
nearest_opponent_base_distances = [None]
for opponent_id in range(1, num_players):
num_bases = stacked_bases[opponent_id].sum()
opponent_base_locations = np.where(stacked_bases[opponent_id])
all_opponent_base_distances = [DISTANCES[
opponent_base_locations[0][i], opponent_base_locations[1][i]] for i in (
range(num_bases))] + [99*np.ones((grid_size, grid_size))]
nearest_opponent_base_distances.append(np.stack(
all_opponent_base_distances).min(0))
considered_targets_pos = np.where(potential_targets)
for j in range(potential_targets.sum()):
target_row = considered_targets_pos[0][j]
target_col = considered_targets_pos[1][j]
opponent_id = np.where(stacked_ships[:, target_row, target_col])[0][0]
opp_nearest_base_distances = nearest_opponent_base_distances[opponent_id]
target_distance_to_nearest_base = opp_nearest_base_distances[
target_row, target_col]
my_min_distance_to_opp_nearest_base = opp_nearest_base_distances[
hunting_ships_pos].min()
# if target_row == 20 and target_col == 12 and observation['step'] == 160:
# import pdb; pdb.set_trace()
if target_distance_to_nearest_base < my_min_distance_to_opp_nearest_base:
potential_targets[target_row, target_col] = 0
potential_targets_pos = np.where(potential_targets)
num_potential_targets = potential_targets.sum()
# print(observation['step'], num_hunting_ships, num_potential_targets)
hoarded_one_step_opponent_keys = []
if num_potential_targets > 0 and num_hunting_ships > 0:
# print(observation['step'])
ordered_ship_keys = []
all_target_distances = np.zeros((num_hunting_ships, num_potential_targets))
for i in range(num_hunting_ships):
row = hunting_ships_pos[0][i]
col = hunting_ships_pos[1][i]
ship_k = my_ship_pos_to_k[row*grid_size+col]
ordered_ship_keys.append(ship_k)
potential_target_distances = DISTANCES[row, col][potential_targets]
# Update the target distances to only include potential targets that
# correspond with valid actions
potential_targets_rows = potential_targets_pos[0]
potential_targets_cols = potential_targets_pos[1]
south_dist = np.where(
potential_targets_rows >= row, potential_targets_rows-row,
potential_targets_rows-row+grid_size)
east_dist = np.where(
potential_targets_cols >= col, potential_targets_cols-col,
potential_targets_cols-col+grid_size)
valid_directions = all_ship_scores[ship_k][6]
valid_move_counts = 2*np.ones(num_potential_targets)
for d in NOT_NONE_DIRECTIONS:
if not d in valid_directions:
if d == NORTH:
decrement_ids = south_dist >= grid_size/2
elif d == SOUTH:
decrement_ids = (south_dist <= grid_size/2) & (south_dist > 0)
elif d == EAST:
decrement_ids = (east_dist <= grid_size/2) & (east_dist > 0)
elif d == WEST:
decrement_ids = east_dist >= grid_size/2
valid_move_counts[decrement_ids] -= 1
# Handle the case of being in the same row or column
valid_move_counts[south_dist == 0] -= 1
valid_move_counts[east_dist == 0] -= 1
# if observation['step'] == 91 and row == 6 and col == 3:
# import pdb; pdb.set_trace()
assert np.all(valid_move_counts >= 0)
potential_target_distances[valid_move_counts == 0] += 100
all_target_distances[i] = potential_target_distances
opponent_num_escape_directions = np.zeros(num_potential_targets)
for j in range(num_potential_targets):
target_row = potential_targets_pos[0][j]
target_col = potential_targets_pos[1][j]
opponent_num_escape_directions[j] = len(opponent_ships_sensible_actions[
target_row, target_col])
# First coordinate my hunters to ships that have no valid escape directions
hunting_ships_available = np.ones(num_hunting_ships, dtype=np.bool)
for j in range(num_potential_targets):
num_escape_dirs = opponent_num_escape_directions[j]
if num_escape_dirs == 0:
target_row = potential_targets_pos[0][j]
target_col = potential_targets_pos[1][j]
# Loop over my hunting ships at a distance of max two and take as many
# of the potential escape squares as possible
my_near_ships = np.where((all_target_distances[:, j] <= 2) & (
hunting_ships_available))[0]
num_my_near_ships = my_near_ships.size
if num_my_near_ships > 0:
# You can always attack at most 2 escape squares. Keep track of what
# escape squares each of my ships can attack without collecting
# halite on the next turn (ignoring ship collision halite gain)
my_target_relative_attack_dirs = np.zeros((num_my_near_ships, 5))
for loop_id, my_ship_id in enumerate(my_near_ships):
row = hunting_ships_pos[0][my_ship_id]
col = hunting_ships_pos[1][my_ship_id]
ship_k = my_ship_pos_to_k[row*grid_size+col]
valid_attack_dirs = all_ship_scores[ship_k][6]
considered_attack_dirs = get_dir_from_target(
row, col, target_row, target_col, grid_size)
if all_target_distances[my_ship_id, j] == 1 and (
obs_halite[row, col] == 0):
considered_attack_dirs.append(None)
attack_dirs = list(set(considered_attack_dirs) & set(
valid_attack_dirs))
# Get the relative directions from the target that I can attack
for d in attack_dirs:
move_row, move_col = move_ship_row_col(
row, col, d, grid_size)
relative_covered_dir = MOVE_DIRECTIONS_TO_ID[get_dir_from_target(
target_row, target_col, move_row, move_col, grid_size)[0]]
my_target_relative_attack_dirs[loop_id, relative_covered_dir] = 1
direction_covered = np.zeros(len(MOVE_DIRECTIONS), dtype=np.bool)
for dir_id, d in enumerate(MOVE_DIRECTIONS):
rel_target_row, rel_target_col = move_ship_row_col(
target_row, target_col, d, grid_size)
if override_move_squares_taken[rel_target_row, rel_target_col]:
direction_covered[dir_id] = 1
# First, handle the ships that can only attack a single square that
# is not covered yet
my_target_relative_attack_dirs[:, direction_covered] = 0
# if observation['step'] == 149:
# import pdb; pdb.set_trace()
# Greedily loop over directions by ordering the count of the number
# of ships that cover. Prefer low but strictly positive directions.
while my_target_relative_attack_dirs.sum() > 0:
ship_num_possible_attacks = my_target_relative_attack_dirs.sum(1)
dir_num_possible_attacks = my_target_relative_attack_dirs.sum(0)
# The None action is slightly preferred since that guarantees a max
# distance of 1 on the next turn
dir_num_possible_attacks[0] -= 0.1
non_zero_min_count = dir_num_possible_attacks[
dir_num_possible_attacks > 0].min()
best_dir_ids = np.where(dir_num_possible_attacks == (
non_zero_min_count))[0]
dir_id = np_rng.choice(best_dir_ids)
considered_ships = np.where(
my_target_relative_attack_dirs[:, dir_id])[0]
# Break ties with the number of directions each ship covers
cover_ship_scores = ship_num_possible_attacks[considered_ships]
considered_ships_attacker_id = considered_ships[
np.argmin(cover_ship_scores)]
attacker_id = my_near_ships[considered_ships_attacker_id]
# Move my ship to the relative position of the target
rel_target_row, rel_target_col = move_ship_row_col(
target_row, target_col, MOVE_DIRECTIONS[dir_id], grid_size)
attacker_row = hunting_ships_pos[0][attacker_id]
attacker_col = hunting_ships_pos[1][attacker_id]
ship_k = my_ship_pos_to_k[attacker_row*grid_size+attacker_col]
all_ship_scores[ship_k][0][rel_target_row, rel_target_col] = 2e5
override_move_squares_taken[rel_target_row, rel_target_col] = 1
hunting_ships_available[attacker_id] = 0
# Update the attack dir counts
my_target_relative_attack_dirs[considered_ships_attacker_id] = 0
my_target_relative_attack_dirs[:, dir_id] = 0
# if observation['step'] == 188:
# import pdb; pdb.set_trace()
# Next, coordinate my hunters to ships that have a single moving escape
# direction.
# These ship are preferred targets since it is likely that I can soon box
# them in, especially if it is me who cuts off three of the move directions
# Order the ships so that the ships that had a single escape direction in
# the previous step are handled first, so we can coordinate the
# interception
one_step_opponent_ids = np.arange(num_potential_targets).tolist()
priority_ids = []
for opponent_ship_k in history['prev_step_hoarded_one_step_opponent_keys']:
if opponent_ship_k in ship_key_to_pos:
opponent_pos = ship_key_to_pos[opponent_ship_k]
target_row, target_col = row_col_from_square_grid_pos(
opponent_pos, grid_size)
if potential_targets[target_row, target_col]:
# We need to check because the target may no longer be available for
# pack hunting due to boxing in or getting close to a friendly base
opponent_priority_id = np.where(
(potential_targets_pos[0] == target_row) & (
potential_targets_pos[1] == target_col))[0][0]
priority_ids.append(opponent_priority_id)
remaining_ids = list(set(one_step_opponent_ids) - set(priority_ids))
remaining_ids.sort() # Set intersect can be flaky
one_step_opponent_ids = priority_ids + remaining_ids
one_step_opponent_positions_directions = []
for j in one_step_opponent_ids:
target_row = potential_targets_pos[0][j]
target_col = potential_targets_pos[1][j]
target_escape_directions = opponent_ships_sensible_actions[
target_row, target_col]
move_escape_directions = copy.copy(target_escape_directions)
if (0, 0) in move_escape_directions:
move_escape_directions.remove((0, 0))
num_move_escape_dirs = len(move_escape_directions)
# nearest_target_distances = np.tile(
# all_target_distances.min(1)[:, None], [1, num_potential_targets])
if num_move_escape_dirs == 1:
# The <= ensures we consider piling up on inidividual ships
potential_nearby_attackers = np.where(hunting_ships_available & (
all_target_distances[:, j] <= 2))[0]
if potential_nearby_attackers.size >= 2:
# Figure out if I have at least one available ship at a max distance
# of 2 that can push the opponent in one direction
escape_dir = RELATIVE_DIR_TO_DIRECTION_MAPPING[
move_escape_directions[0]]
potential_nearby_distances = all_target_distances[
potential_nearby_attackers, j]
if potential_nearby_distances.min() == 1:
# The None direction is covered - verify the other directions
uncovered_dirs = copy.copy(NOT_NONE_DIRECTIONS)
uncovered_dirs.remove(escape_dir)
ignore_attacker_ids = []
d1_hunters = []
for attacker_id in potential_nearby_attackers:
attacker_row = hunting_ships_pos[0][attacker_id]
attacker_col = hunting_ships_pos[1][attacker_id]
ship_k = my_ship_pos_to_k[attacker_row*grid_size+attacker_col]
valid_directions = all_ship_scores[ship_k][6]
if escape_dir in valid_directions:
threat_dirs = get_dir_from_target(
target_row, target_col, attacker_row, attacker_col,
grid_size)
uncovered_dirs = list(set(uncovered_dirs) - set(threat_dirs))
if DISTANCES[target_row, target_col][
attacker_row, attacker_col] == 1:
d1_hunters.append(attacker_id)
else:
ignore_attacker_ids.append(attacker_id)
if len(uncovered_dirs) == 0 or (
len(uncovered_dirs) == 1 and len(d1_hunters) > 1):
one_step_opponent_positions_directions.append((
target_row, target_col, escape_dir))
opponent_ship_k = ship_pos_to_key[
target_row*grid_size+target_col]
hoarded_one_step_opponent_keys.append(opponent_ship_k)
if len(uncovered_dirs) > 0:
potential_nearby_attackers = d1_hunters
# Move the attackers in the single escape direction
# import pdb; pdb.set_trace()
for attacker_id in potential_nearby_attackers:
if not attacker_id in ignore_attacker_ids:
attacker_row = hunting_ships_pos[0][attacker_id]
attacker_col = hunting_ships_pos[1][attacker_id]
move_row, move_col = move_ship_row_col(
attacker_row, attacker_col, escape_dir, grid_size)
ship_k = my_ship_pos_to_k[
attacker_row*grid_size+attacker_col]
all_ship_scores[ship_k][0][move_row, move_col] = 2e5
override_move_squares_taken[move_row, move_col] = 1
hunting_ships_available[attacker_id] = 0
# Try to get into a position where the opponent can only move in one
# direction (from two to one escape direction)
for j in range(num_potential_targets):
num_escape_dirs = opponent_num_escape_directions[j]
if num_escape_dirs == 2:
target_row = potential_targets_pos[0][j]
target_col = potential_targets_pos[1][j]
potential_nearby_attackers = np.where(hunting_ships_available & (
all_target_distances[:, j] == 1))[0]
attack_selected = False
if potential_nearby_attackers.size == 2:
escape_directions = opponent_ships_sensible_actions[
target_row, target_col]
if (escape_directions[0][0] == 0 and (
escape_directions[1][0] == 0)) or (
escape_directions[0][1] == 0 and (
escape_directions[1][1] == 0)):
# Scenario: ME | OPPONENT | ME - guess the action and then chase
# Guess the opponent's next action
opponent_id = np.where(
stacked_ships[:, target_row, target_col])[0][0]
escape_dir_scores = np.zeros(2)
for escape_id, escape_dir in enumerate(escape_directions):
move_row, move_col = move_ship_row_col(
target_row, target_col, RELATIVE_DIR_TO_DIRECTION_MAPPING[
escape_dir], grid_size)
opponent_influence = player_influence_maps[opponent_id][
move_row, move_col]
my_influence = player_influence_maps[0][move_row, move_col]
escape_dir_scores[escape_id] = opponent_influence-my_influence
likely_opponent_move = RELATIVE_DIR_TO_DIRECTION_MAPPING[
escape_directions[np.argmax(escape_dir_scores)]]
# Only continue if both my ships can move in the selected
# directions
both_can_move = True
can_stay = np.zeros(2, dtype=np.bool)
for attacker_0_or_1, attacker_id in enumerate(
potential_nearby_attackers):
attacker_row = hunting_ships_pos[0][attacker_id]
attacker_col = hunting_ships_pos[1][attacker_id]
ship_k = my_ship_pos_to_k[attacker_row*grid_size+attacker_col]
both_can_move = both_can_move and likely_opponent_move in (
all_ship_scores[ship_k][6])
can_stay[attacker_0_or_1] = obs_halite[
attacker_row, attacker_col] == 0
if both_can_move:
# If both are on non zero halite squares: move both in the likely
# escape direction. Otherwise, select a random ship to move in
# the escape direction where the ship that remains in place has
# no halite at the considered square
if not np.any(can_stay):
stay_in_place_ids = []
else:
stay_in_place_ids = [np_rng.choice(potential_nearby_attackers[
can_stay])]
for attacker_id in potential_nearby_attackers:
# import pdb; pdb.set_trace()
attacker_row = hunting_ships_pos[0][attacker_id]
attacker_col = hunting_ships_pos[1][attacker_id]
move_dir = None if attacker_id in stay_in_place_ids else (
likely_opponent_move)
move_row, move_col = move_ship_row_col(
attacker_row, attacker_col, move_dir, grid_size)
ship_k = my_ship_pos_to_k[
attacker_row*grid_size+attacker_col]
all_ship_scores[ship_k][0][move_row, move_col] = 2e5
override_move_squares_taken[move_row, move_col] = 1
hunting_ships_available[attacker_id] = 0
attack_selected = True
escape_directions = opponent_ships_sensible_actions[
target_row, target_col]
if not attack_selected and not (0, 0) in escape_directions and len(
escape_directions) == 2:
# Scenario: ME | OPPONENT | | ME - guess the action and then chase
available_nearby = np.where(hunting_ships_available & (
all_target_distances[:, j] <= 2))[0]
if available_nearby.size >= 2:
attacker_rows = hunting_ships_pos[0][available_nearby]
attacker_cols = hunting_ships_pos[1][available_nearby]
north_dist = np.where(
target_row >= attacker_rows, target_row-attacker_rows,
target_row-attacker_rows+grid_size)
vert_rel_pos = np.where(
north_dist < 3, north_dist, north_dist-grid_size)
west_dist = np.where(
target_col >= attacker_cols, target_col-attacker_cols,
target_col-attacker_cols+grid_size)
horiz_rel_pos = np.where(
west_dist < 3, west_dist, west_dist-grid_size)
same_row_ids = (vert_rel_pos == 0)
same_col_ids = (horiz_rel_pos == 0)
consider_attack = False
if np.any(horiz_rel_pos[same_row_ids] < 0) and np.any(
horiz_rel_pos[same_row_ids] > 0):
if np.any(horiz_rel_pos[same_row_ids] == 1) and np.any(
horiz_rel_pos[same_row_ids] == -2):
move_to_target_id = available_nearby[same_row_ids][np.where(
horiz_rel_pos[same_row_ids] == -2)[0][0]]
move_escape_id = available_nearby[same_row_ids][np.where(
horiz_rel_pos[same_row_ids] == 1)[0][0]]
consider_attack = True
elif np.any(horiz_rel_pos[same_row_ids] == -1) and np.any(
horiz_rel_pos[same_row_ids] == 2):
move_to_target_id = available_nearby[same_row_ids][np.where(
horiz_rel_pos[same_row_ids] == 2)[0][0]]
move_escape_id = available_nearby[same_row_ids][np.where(
horiz_rel_pos[same_row_ids] == -1)[0][0]]
consider_attack = True
elif np.any(vert_rel_pos[same_col_ids] < 0) and np.any(
vert_rel_pos[same_col_ids] > 0):
if np.any(vert_rel_pos[same_col_ids] == 1) and np.any(
vert_rel_pos[same_col_ids] == -2):
move_to_target_id = available_nearby[same_col_ids][np.where(
vert_rel_pos[same_col_ids] == -2)[0][0]]
move_escape_id = available_nearby[same_col_ids][np.where(
vert_rel_pos[same_col_ids] == 1)[0][0]]
consider_attack = True
elif np.any(vert_rel_pos[same_col_ids] == -1) and np.any(
vert_rel_pos[same_col_ids] == 2):
move_to_target_id = available_nearby[same_col_ids][np.where(
vert_rel_pos[same_col_ids] == 2)[0][0]]
move_escape_id = available_nearby[same_col_ids][np.where(
vert_rel_pos[same_col_ids] == -1)[0][0]]
consider_attack = True
if consider_attack:
opponent_id = np.where(
stacked_ships[:, target_row, target_col])[0][0]
escape_dir_scores = np.zeros(2)
for escape_id, escape_dir in enumerate(escape_directions):
move_row, move_col = move_ship_row_col(
target_row, target_col, RELATIVE_DIR_TO_DIRECTION_MAPPING[
escape_dir], grid_size)
opponent_influence = player_influence_maps[opponent_id][
move_row, move_col]
my_influence = player_influence_maps[0][move_row, move_col]
escape_dir_scores[escape_id] = opponent_influence-my_influence
likely_opponent_move = RELATIVE_DIR_TO_DIRECTION_MAPPING[
escape_directions[np.argmax(escape_dir_scores)]]
# print(observation['step'], target_row, target_col)
attacker_escape_row = hunting_ships_pos[0][move_escape_id]
attacker_escape_col = hunting_ships_pos[1][move_escape_id]
attacker_to_target_row = hunting_ships_pos[0][move_to_target_id]
attacker_to_target_col = hunting_ships_pos[1][move_to_target_id]
move_escape_row, move_escape_col = move_ship_row_col(
attacker_escape_row, attacker_escape_col, likely_opponent_move,
grid_size)
to_target_dir = get_dir_from_target(
attacker_to_target_row, attacker_to_target_col,
target_row, target_col, grid_size)[0]
move_to_target_row, move_to_target_col = move_ship_row_col(
attacker_to_target_row, attacker_to_target_col,
to_target_dir, grid_size)
ship_escape_k = my_ship_pos_to_k[
attacker_escape_row*grid_size+attacker_escape_col]
ship_to_target_k = my_ship_pos_to_k[
attacker_to_target_row*grid_size+attacker_to_target_col]
if likely_opponent_move in all_ship_scores[ship_escape_k][6] and(
to_target_dir in all_ship_scores[ship_to_target_k][6]) and(
not override_move_squares_taken[
move_escape_row, move_escape_col]) and not (
override_move_squares_taken[
move_to_target_row, move_to_target_col]):
all_ship_scores[ship_escape_k][0][
move_escape_row, move_escape_col] = 2e5
all_ship_scores[ship_to_target_k][0][
move_to_target_row, move_to_target_col] = 2e5
override_move_squares_taken[
move_escape_row, move_escape_col] = 1
override_move_squares_taken[
move_to_target_row, move_to_target_col] = 1
hunting_ships_available[move_escape_id] = 0
hunting_ships_available[move_to_target_id] = 0
# Intercept ships that are pushed in one direction to avoid chasing forever
for target_row, target_col, escape_dir in (
one_step_opponent_positions_directions):
# Try to move perpendicular to the escaping ship if I can catch it in
# time
attacker_rows = hunting_ships_pos[0]
attacker_cols = hunting_ships_pos[1]
north_dist = np.where(
target_row >= attacker_rows, target_row-attacker_rows,
target_row-attacker_rows+grid_size)
west_dist = np.where(
target_col >= attacker_cols, target_col-attacker_cols,
target_col-attacker_cols+grid_size)
if escape_dir in [NORTH, SOUTH]:
perpendicular_distances = np.minimum(west_dist, grid_size-west_dist)
if escape_dir == SOUTH:
direction_distances = grid_size-north_dist
else:
direction_distances = north_dist
else:
perpendicular_distances = np.minimum(north_dist, grid_size-north_dist)
if escape_dir == EAST:
direction_distances = grid_size-west_dist
else:
direction_distances = west_dist
potential_nearby_attackers = np.where(hunting_ships_available & (
direction_distances >= perpendicular_distances))[0]
if potential_nearby_attackers.size > 0:
potential_crossing_min_steps = np.ceil((
direction_distances[potential_nearby_attackers]+(
perpendicular_distances[potential_nearby_attackers]))/2)
min_crossing_distance = potential_crossing_min_steps.min().astype(np.int)
# FUTURE WORK: discard if there is a base on the escape track
if min_crossing_distance <= 6:
attacker_id = potential_nearby_attackers[
np.argmin(potential_crossing_min_steps)]
attacker_row = hunting_ships_pos[0][attacker_id]
attacker_col = hunting_ships_pos[1][attacker_id]
if escape_dir == NORTH:
intersect_row = (target_row-min_crossing_distance) % grid_size
intersect_col = target_col
elif escape_dir == SOUTH:
intersect_row = (target_row+min_crossing_distance) % grid_size
intersect_col = target_col
elif escape_dir == EAST:
intersect_row = target_row
intersect_col = (target_col+min_crossing_distance) % grid_size
elif escape_dir == WEST:
intersect_row = target_row
intersect_col = (target_col-min_crossing_distance) % grid_size
ship_k = my_ship_pos_to_k[
attacker_row*grid_size+attacker_col]
intersect_bonus_mask = get_mask_between_exclude_ends(
attacker_row, attacker_col, intersect_row, intersect_col,
grid_size)
intersect_bonus = 1e5*intersect_bonus_mask
if intersect_bonus_mask.sum() > 1:
# Prefer to move to low halite squares in order to avoid conflicts
# with collect ships
intersect_bonus[intersect_bonus_mask] -= 10*(np.minimum(
1000, 10*obs_halite[intersect_bonus_mask])+obs_halite[
intersect_bonus_mask]/10)
# Give a small penalty to same rows or columns in order to allow more
# move options downstream
intersect_bonus[row] -= 1
intersect_bonus[:, col] -= 1
all_ship_scores[ship_k][0][:] += intersect_bonus
# override_move_squares_taken[move_row, move_col] = 1
# import pdb; pdb.set_trace()
hunting_ships_available[attacker_id] = 0
# Try to cut off the preferred escape direction for opponent ships that
# only have a stand still valid action
for j in range(num_potential_targets):
target_row = potential_targets_pos[0][j]
target_col = potential_targets_pos[1][j]
target_escape_directions = opponent_ships_sensible_actions[
target_row, target_col]
if (0, 0) in target_escape_directions and len(
target_escape_directions) == 1:
potential_nearby_attackers = np.where(hunting_ships_available & (
all_target_distances[:, j] <= 2))[0]
num_potential_attackers = potential_nearby_attackers.size
if num_potential_attackers > 0:
can_cover_dirs = {d: [] for d in NOT_NONE_DIRECTIONS}
for attacker_id in potential_nearby_attackers:
attacker_row = hunting_ships_pos[0][attacker_id]
attacker_col = hunting_ships_pos[1][attacker_id]
ship_k = my_ship_pos_to_k[attacker_row*grid_size+attacker_col]
for d in NOT_NONE_DIRECTIONS:
if d in all_ship_scores[ship_k][6]:
move_attacker_row, move_attacker_col = move_ship_row_col(
attacker_row, attacker_col, d, grid_size)
if DISTANCES[target_row, target_col][
move_attacker_row, move_attacker_col] == 1:
covered_dir = get_dir_from_target(
target_row, target_col, move_attacker_row,
move_attacker_col, grid_size)[0]
can_cover_dirs[covered_dir].append(attacker_id)
cover_dir_scores = np.zeros(len(NOT_NONE_DIRECTIONS))
opponent_id = np.where(
stacked_ships[:, target_row, target_col])[0][0]
for dir_id, d in enumerate(NOT_NONE_DIRECTIONS):
move_row, move_col = move_ship_row_col(
target_row, target_col, d, grid_size)
opponent_influence = player_influence_maps[opponent_id][
move_row, move_col]
not_opponent_influence = player_influence_maps[
:, move_row, move_col].sum() - opponent_influence
cover_dir_scores[dir_id] = (
opponent_influence-not_opponent_influence)
# Greedily cover escape directions by the order of preference for the
# target ship
cover_dir_argsort = np.argsort(-cover_dir_scores)
for dir_id in cover_dir_argsort:
d = NOT_NONE_DIRECTIONS[dir_id]
covered_row, covered_col = move_ship_row_col(
target_row, target_col, d, grid_size)
considered_attacker_ids = can_cover_dirs[d]
# if considered_attacker_ids and override_move_squares_taken[
# covered_row, covered_col]:
# import pdb; pdb.set_trace()
if considered_attacker_ids and not override_move_squares_taken[
covered_row, covered_col]:
# Prefer my attackers that can not attack other escape squares
# (on the same row or col at a distance of 2)
num_considered_attackers = len(considered_attacker_ids)
considered_attack_scores = np.zeros(num_considered_attackers)
for cons_attack_id in range(num_considered_attackers):
attacker_id = considered_attacker_ids[cons_attack_id]
attacker_row = hunting_ships_pos[0][attacker_id]
attacker_col = hunting_ships_pos[1][attacker_id]
considered_attack_scores[cons_attack_id] = int(
(attacker_row != target_row) and (
attacker_col != target_col))
selected_attacker_id = considered_attacker_ids[
np.argmin(considered_attack_scores)]
hunting_ships_available[selected_attacker_id] = 0
attacker_row = hunting_ships_pos[0][selected_attacker_id]
attacker_col = hunting_ships_pos[1][selected_attacker_id]
ship_k = my_ship_pos_to_k[attacker_row*grid_size+attacker_col]
# import pdb; pdb.set_trace()
all_ship_scores[ship_k][0][covered_row, covered_col] = 2e5
override_move_squares_taken[covered_row, covered_col] = 1
for d_ in NOT_NONE_DIRECTIONS:
if selected_attacker_id in can_cover_dirs[d_]:
can_cover_dirs[d_].remove(selected_attacker_id)
# FUTURE WORK: Try to cut off the preferred escape direction for opponent
# ships that only have a single non stand still valid action
# Assign the remaining standard ships
if num_to_assign_phase_2 > 0 and hunting_ships_available.sum() > 0:
available_hunting_ids = np.where(hunting_ships_available)[0]
num_remaining_available_ships = available_hunting_ids.size
best_standard_scores = np.zeros(num_remaining_available_ships)
pos_keys = []
for i in range(num_remaining_available_ships):
row = hunting_ships_pos[0][available_hunting_ids[i]]
col = hunting_ships_pos[1][available_hunting_ids[i]]
pos_key = my_ship_pos_to_k[row*grid_size+col]
best_standard_scores[i] = all_ship_scores[pos_key][0].max() - 1e6*(
halite_ships[row, col] == 0)
pos_keys.append(pos_key)
# Assign the remaining collect ships
# Assign the available ships with the highest collect score for collecting
# (preferably non zero halite ships)
best_standard_ids = np.argsort(
-best_standard_scores)[:num_to_assign_phase_2]
for standard_id in best_standard_ids:
standard_row = hunting_ships_pos[0][available_hunting_ids[standard_id]]
standard_col = hunting_ships_pos[1][available_hunting_ids[standard_id]]
# print("Newly assigned standard ship", standard_row, standard_col)
standard_key = pos_keys[standard_id]
assert not standard_key in standard_ships
standard_ships.append(standard_key)
hunting_ships_available[available_hunting_ids[standard_id]] = False
# Coordinate the remaining hunting actions based on the potential target
# distances. Balance with staying closer to my center of mass of ships
# with halite and my bases
# FUTURE WORK: make this work
# my_vulnerable_score = (
# observation['rewards_bases_ships'][0][2] & (halite_ships > 0)) + 3*(
# observation['rewards_bases_ships'][0][1])
# Average the vulnerable map over time so that the center of mass is more
# stable
# my_time_averaged_vulnerable_score = 0.8*history[
# 'my_time_averaged_vulnerable_score'] + 0.2*my_vulnerable_score
# history['my_time_averaged_vulnerable_score'] = (
# my_time_averaged_vulnerable_score)
# smoothed_vulnerable_score = smooth2d(my_time_averaged_vulnerable_score)
# center_of_vulnerable_mass = np.unravel_index(
# smoothed_vulnerable_score.argmax(), smoothed_vulnerable_score.shape)
# print(observation['step'], center_of_vulnerable_mass)
# FUTURE WORK: don't break ranks when hoarding
stacked_bases = np.stack(
[rbs[1] for rbs in observation['rewards_bases_ships']])
unavailable_hunt_positions = (stacked_bases[1:].sum(0) > 0) | (
override_move_squares_taken)
for i in range(num_hunting_ships):
if hunting_ships_available[i]:
row = hunting_ships_pos[0][i]
col = hunting_ships_pos[1][i]
ship_k = my_ship_pos_to_k[row*grid_size+col]
potential_target_distances_ship = all_target_distances[i]
potential_target_distances_ship_weighted = (
potential_target_distances_ship)
# potential_target_distances_ship_weighted = (
# potential_target_distances_ship) + DISTANCES[
# center_of_vulnerable_mass][potential_targets_pos]/3
if preferred_victim is not None and (
potential_target_distances_ship.min() > 2):
preferred_victim_ship_ids = player_ids[
potential_targets_pos] == preferred_victim
potential_target_distances_ship_weighted[
preferred_victim_ship_ids] -= 10
selected_target_id = np.argmin(
potential_target_distances_ship_weighted)
target_distance = potential_target_distances_ship[selected_target_id]
if target_distance < 3 or obs_halite[row, col] < 100 or (
safe_collect_margin[row, col] <= 0):
target_row = potential_targets_rows[selected_target_id]
target_col = potential_targets_cols[selected_target_id]
hunting_bonus = 1e5*get_mask_between_exclude_ends(
row, col, target_row, target_col, grid_size)
if target_distance == 1 and not unavailable_hunt_positions[
target_row, target_col]:
hunting_bonus[target_row, target_col] = 1e5
elif target_distance == 1:
# Move in one of the at most 2 likely opponent next action
# direction if that direction is still available.
# This means I have another hunter/boxer at distance one which has
# already claimed the target square
sensible_opponent_dirs = opponent_ships_sensible_actions[
(target_row, target_col)]
for d in NOT_NONE_DIRECTIONS:
if RELATIVE_DIR_MAPPING[d] in sensible_opponent_dirs:
move_row, move_col = move_ship_row_col(
row, col, d, grid_size)
if not unavailable_hunt_positions[move_row, move_col]:
hunting_bonus[move_row, move_col] = 1e5
# Prefer to move in the same direction as the target when I am
# tracking the target closely
opponent_ship_k = ship_pos_to_key[
target_row*grid_size+target_col]
if opponent_ship_k in prev_step_opponent_ship_moves and (
target_distance <= 2):
target_prev_move = prev_step_opponent_ship_moves[opponent_ship_k]
bonus_rel_dir = RELATIVE_DIR_MAPPING[target_prev_move]
bonus_rows = np.mod(row + bonus_rel_dir[0]*(
1+np.arange(half_distance_mask_dim)), grid_size)
bonus_cols = np.mod(col + bonus_rel_dir[1]*(
1+np.arange(half_distance_mask_dim)), grid_size)
hunting_bonus[(bonus_rows, bonus_cols)] *= 1.5
if (hunting_bonus > 0).sum() > 1:
# Prefer to move to low halite squares in order to avoid conflicts
# with collect ships
hunting_bonus[hunting_bonus > 0] -= 10*(np.minimum(
1000, 10*obs_halite[hunting_bonus > 0]) + obs_halite[
hunting_bonus > 0]/10)
# Give a small penalty to same rows or columns in order to allow
# more move options downstream
hunting_bonus[row] -= 1
hunting_bonus[:, col] -= 1
else:
# Override the pack hunt and collect at the current square
hunting_bonus = np.zeros((grid_size, grid_size))
if ship_k in history['temporary_hoarding_collect_ships']:
print(observation['step'], row, col, ship_k,
"INVESTIGATE: ship in hoarding collect but also hoarding??")
else:
history['temporary_hoarding_collect_ships'].append(ship_k)
# print(observation['step'], row, col, ship_k,
# "Temporarily collecting")
hunting_bonus[row, col] = 11e4 # Consider
# import pdb; pd.set_trace()
# x=1
all_ship_scores[ship_k][0][:] += hunting_bonus
# print(standard_ships, available_pack_hunt_ships.sum(),
# stacked_ships[0].sum())
# print(observation['step'], history['temporary_hoarding_collect_ships'])
history['hunting_season_standard_ships'] = standard_ships
history['hunting_season_started'] = True
history['prev_step_hoarded_one_step_opponent_keys'] = (
hoarded_one_step_opponent_keys)
# if observation['step'] == 192:
# import pdb; pdb.set_trace()
return all_ship_scores, history, override_move_squares_taken
def get_no_zero_halite_neighbors(halite):
no_zero_halite_neighbors = np.ones_like(halite, dtype=np.bool)
for d in NOT_NONE_DIRECTIONS:
if d == NORTH:
shifted = np.concatenate([halite[None, -1], halite[:-1]])
elif d == SOUTH:
shifted = np.concatenate([halite[1:], halite[None, 0]])
elif d == EAST:
shifted = np.concatenate([halite[:, 1:], halite[:, 0, None]], 1)
elif d == WEST:
shifted = np.concatenate([halite[:, -1, None], halite[:, :-1]], 1)
no_zero_halite_neighbors &= (shifted > 0)
return no_zero_halite_neighbors
def get_my_guaranteed_safe_collect_squares(
opponent_ships, grid_size, my_bases, obs_halite, collect_rate,
halite_ships, observation, halite_on_board_mult=1e-6):
opp_ship_locations = np.where(opponent_ships)
nearest_opponent_stacked_distances = [
99*np.ones((grid_size, grid_size))]
for i in range(opponent_ships.sum()):
opponent_row = opp_ship_locations[0][i]
opponent_col = opp_ship_locations[1][i]
opponent_ship_halite = max(0, halite_ships[opponent_row, opponent_col])
opponent_distances = DISTANCES[opponent_row, opponent_col]
nearest_opponent_stacked_distances.append(
opponent_distances + halite_on_board_mult*opponent_ship_halite)
nearest_opponent_distances = np.stack(
nearest_opponent_stacked_distances).min(0)
my_base_locations = np.where(my_bases)
my_nearest_base_distances = [DISTANCES[
my_base_locations[0][i], my_base_locations[1][i]] for i in range(
my_bases.sum())]
safe_to_collect = np.zeros((grid_size, grid_size), dtype=np.bool)
safe_to_collect_margin = -1*np.ones((grid_size, grid_size), dtype=np.int)
safe_to_return_halites = -1/halite_on_board_mult*np.ones(
(grid_size, grid_size), dtype=np.int)
safe_to_return_base_halites = []
for i in range(my_bases.sum()):
considered_base = my_base_locations[0][i], my_base_locations[1][i]
margin = np.floor((
nearest_opponent_distances[considered_base]-1) - (
my_nearest_base_distances[i] + halite_on_board_mult*(
np.maximum(0, halite_ships)+(
collect_rate*obs_halite).astype(np.int))+1e-12)).astype(np.int)
safe_base_reach = (my_nearest_base_distances[i] + halite_on_board_mult*(
np.maximum(0, halite_ships)+(
collect_rate*obs_halite).astype(np.int))) < (
nearest_opponent_distances[considered_base]-1)
safe_to_collect |= safe_base_reach
safe_to_collect_margin[safe_base_reach] = np.maximum(
safe_to_collect_margin[safe_base_reach], margin[safe_base_reach]+1)
base_safe_return_thresholds = 1/halite_on_board_mult*(
nearest_opponent_distances[considered_base] - (
my_nearest_base_distances[i]))
safe_to_return_halites = np.maximum(
safe_to_return_halites, base_safe_return_thresholds)
safe_to_return_base_halites.append(
(base_safe_return_thresholds, considered_base))
# if observation['step'] == 78:
# import pdb; pdb.set_trace()
# nearest_opponent_stacked_distances_old = [DISTANCES[
# opp_ship_locations[0][i], opp_ship_locations[1][i]] for i in range(
# opponent_ships.sum())] + [99*np.ones((grid_size, grid_size))]
# nearest_opponent_distances_old = np.stack(
# nearest_opponent_stacked_distances_old).min(0)
# my_nearest_base_distances_old = np.stack(my_nearest_base_distances + [
# 99*np.ones((grid_size, grid_size))]).min(0)
# safe_to_collect_old = my_nearest_base_distances_old <= (
# nearest_opponent_distances_old-2)
return (safe_to_collect, safe_to_collect_margin, safe_to_return_halites,
safe_to_return_base_halites)
def get_ignored_convert_positions(
likely_convert_opponent_positions, main_base_distances, stacked_ships,
abs_rel_opponent_scores, observation, my_base_distances, opponent_bases,
boxed_in_attack_squares):
ignore_convert_positions = []
for (row, col) in likely_convert_opponent_positions:
main_base_distance = main_base_distances[row, col]
opponent_id = np.where(stacked_ships[:, row, col])[0][0]
if (abs_rel_opponent_scores[opponent_id-1] == 0) and (
main_base_distance >= 9-(observation['relative_step']*6)) and (
my_base_distances[:, row, col].min() >= 5-(
observation['relative_step']*3)):
ignore_convert_positions.append((row, col))
opponent_bases[row, col] = True
boxed_in_attack_squares[ROW_COL_MAX_DISTANCE_MASKS[row, col, 1]] = 0
# if observation['step'] == 84:
# import pdb; pdb.set_trace()
return ignore_convert_positions, opponent_bases, boxed_in_attack_squares
def get_avoid_attack_squares(
boxed_in_attack_squares, approximate_score_diff, currently_winning,
abs_rel_opponent_scores, my_zero_halite_ships, opponent_ships,
influence_map, influence_map_unweighted, my_base_distances,
boxed_in_opponent_ids, observation):
grid_size = opponent_ships.shape[0]
avoid_attack_squares_zero_halite = np.zeros(
(grid_size, grid_size), dtype=np.bool)
# Decide what opponent to attack regardless of the risk of ship loss
# Policy: I am a close second or I am winning and attacking the second
always_attack_opponent_id = None
best_opponent_id = 1+np.argmin(approximate_score_diff)
if np.all(currently_winning) or (
(~currently_winning).sum() == 1 and abs_rel_opponent_scores[
best_opponent_id-1] > 0):
always_attack_opponent_id = best_opponent_id
if np.any(boxed_in_attack_squares):
# Count nearby zero halite and opponent ships
all_boxed_squares = np.where(boxed_in_attack_squares)
for i in range(all_boxed_squares[0].size):
boxed_row = all_boxed_squares[0][i]
boxed_col = all_boxed_squares[1][i]
num_my_nearby_zero_halite = my_zero_halite_ships[
ROW_COL_MAX_DISTANCE_MASKS[boxed_row, boxed_col, 3]].sum()
num_opponent_nearby = opponent_ships[
ROW_COL_MAX_DISTANCE_MASKS[boxed_row, boxed_col, 5]].sum()
if ((influence_map[boxed_row, boxed_col] < 0.5) and (
influence_map_unweighted[boxed_row, boxed_col] < -2) and (
num_my_nearby_zero_halite == 1) and (
num_opponent_nearby > 4) and (
my_base_distances[:, boxed_row, boxed_col].min() >= 5)) and (
always_attack_opponent_id is None or (
boxed_in_opponent_ids[boxed_row, boxed_col] != (
always_attack_opponent_id))):
# Flag the square as bad if I don't have a likely escape path
can_escape = False
avoid_attack_escape_distance = 4
for d in NOT_NONE_DIRECTIONS:
if d == NORTH:
considered_row = (boxed_row - avoid_attack_escape_distance) % (
grid_size)
considered_col = boxed_col
elif d == SOUTH:
considered_row = (boxed_row + avoid_attack_escape_distance) % (
grid_size)
considered_col = boxed_col
elif d == EAST:
considered_row = boxed_row
considered_col = (boxed_col + avoid_attack_escape_distance) % (
grid_size)
elif d == WEST:
considered_row = boxed_row
considered_col = (boxed_col - avoid_attack_escape_distance) % (
grid_size)
if influence_map[considered_row, considered_col] > 0.5:
can_escape = True
break
if not can_escape:
avoid_attack_squares_zero_halite[boxed_row, boxed_col] = 1
# if np.any(avoid_attack_squares_zero_halite):
# print(observation['step'], np.where(avoid_attack_squares_zero_halite))
# import pdb; pdb.set_trace()
# x=1
return avoid_attack_squares_zero_halite, always_attack_opponent_id
def override_initial_collect(
config, all_ship_scores, obs_halite, halite_ships, stacked_ships,
stacked_bases, player_influence_maps, player_obs, observation,
history):
# ORPHANED LOGIC, this was not going anywhere
# Initial collect override logic. Ships should initially aim for halite
# squares at the boundaries of their influence sphere - that way opponents
# don't get to mine it and I can then later focus on halite near my base
grid_size = stacked_ships.shape[1]
# Stack the collect scores for all my ships
ship_rows = []
ship_cols = []
ship_keys = []
for ship_k in player_obs[2]:
row, col = row_col_from_square_grid_pos(
player_obs[2][ship_k][0], grid_size)
ship_rows.append(row)
ship_cols.append(col)
ship_keys.append(ship_k)
my_num_ships = len(ship_rows)
if my_num_ships > 0:
my_ship_positions = (np.array(ship_rows), np.array(ship_cols))
opponent_ship_positions = np.where(stacked_ships[1:].sum(0) > 0)
my_ship_halites = halite_ships[my_ship_positions]
# Obtain the nearest distances for my and opponent ships
my_nearest_ship_distances = 99*np.ones((grid_size, grid_size))
for ship_id in range(my_num_ships):
# FUTURE WORK: Should I exclude my returning to base ships? Probably not.
row = my_ship_positions[0][ship_id]
col = my_ship_positions[1][ship_id]
my_nearest_ship_distances = np.minimum(
my_nearest_ship_distances, DISTANCES[row, col])
my_nearest_base_distances = 99*np.ones((grid_size, grid_size))
my_num_bases = stacked_bases[0].sum()
my_base_positions = np.where(stacked_bases[0])
for base_id in range(my_num_bases):
row = my_base_positions[0][base_id]
col = my_base_positions[1][base_id]
my_nearest_base_distances = np.minimum(
my_nearest_base_distances, DISTANCES[row, col])
opponent_nearest_ship_distances = 99*np.ones((grid_size, grid_size))
opponent_ships = stacked_ships[1:].sum(0) > 0
num_opponent_ships = opponent_ships.sum()
for ship_id in range(num_opponent_ships):
row = opponent_ship_positions[0][ship_id]
col = opponent_ship_positions[1][ship_id]
opponent_nearest_ship_distances = np.minimum(
opponent_nearest_ship_distances, DISTANCES[row, col])
opponent_nearest_base_distances = 99*np.ones((grid_size, grid_size))
opponent_bases = stacked_bases[1:].sum(0) > 0
num_opponent_bases = opponent_bases.sum()
opponent_base_positions = np.where(opponent_bases)
for base_id in range(num_opponent_bases):
row = opponent_base_positions[0][base_id]
col = opponent_base_positions[1][base_id]
opponent_nearest_base_distances = np.minimum(
opponent_nearest_base_distances, DISTANCES[row, col])
nearest_ship_distance_difference = (
my_nearest_ship_distances - opponent_nearest_ship_distances)
if observation['step'] == 0:
original_position_multiplier = 1.5**(3-np.abs(np.maximum(
-3, nearest_ship_distance_difference)))
history['original_position_multiplier'] = original_position_multiplier
else:
original_position_multiplier = history['original_position_multiplier']
# Use the opponent influence to determine the value of gather squares -
# squares where the nearest distance to one of my ships is equal have the
# highest value since these will likely be where the competition happens
smooth_multiplier = smooth2d(obs_halite)
smooth_multiplier /= smooth_multiplier.mean()
collect_values = np.copy(obs_halite) * smooth_multiplier
collect_values *= original_position_multiplier
smooth_collect_values = smooth2d(collect_values, 5)
# Compute ship specific collect scores for all considered collect values
# This factors in the distance to each square and the halite on board
# FUTURE WORK: factor in halite on board
all_ship_collect_scores = np.zeros((my_num_ships, grid_size, grid_size))
my_assigned_squares = np.zeros((grid_size, grid_size), dtype=np.bool)
ships_assigned = np.zeros(my_num_ships, dtype=np.bool)
assigned_id = 0
initial_collect_zero_halite_targets = history[
'initial_collect_zero_halite_targets']
for ship_id, ship_k in enumerate(ship_keys):
row = my_ship_positions[0][ship_id]
col = my_ship_positions[1][ship_id]
valid_considered_mask = np.ones((grid_size, grid_size), dtype=np.bool)
bad_directions = list(set(MOVE_DIRECTIONS) - set(all_ship_scores[ship_k][6]))
for d in bad_directions:
if d is None:
valid_considered_mask[row, col] = 0
else:
valid_considered_mask[HALF_PLANES_CATCH[row, col][d]] = 0
dm = DISTANCE_MASKS[(row, col)]
ship_collect_scores = dm*collect_values*valid_considered_mask
ship_collect_scores[row, col] *= int(my_num_bases > 0)*(
config['relative_stand_still_collect_boost'])
all_ship_collect_scores[ship_id] = ship_collect_scores
if my_ship_halites[ship_id] > 0 and not None in bad_directions and (
ship_collect_scores[row, col] == ship_collect_scores.max()):
# import pdb; pdb.set_trace()
my_assigned_squares[row, col] = True
all_ship_scores[ship_k][0][row, col] = 1e5 - assigned_id
ships_assigned[ship_id] = True
assigned_id += 1
elif my_ship_halites[ship_id] == 0 and ship_k in (
initial_collect_zero_halite_targets):
# Prefer consistent target selection - only reevaluate after reaching the
# target
all_ship_collect_scores[ship_id][initial_collect_zero_halite_targets[
ship_k]] *= 2
# if observation['step'] == 13:
# import pdb; pdb.set_trace()
lowest_collect_score = all_ship_collect_scores.min()
# original_ship_collect_scores = np.copy(all_ship_collect_scores)
all_ship_collect_scores[:, my_assigned_squares] = lowest_collect_score
all_ship_collect_scores[ships_assigned] = lowest_collect_score
# First assign the zero halite ships - Ideally, they should spread out and
# target high value halite squares at the boundary of the influence sphere
num_zero_halite_ships = (my_ship_halites == 0).sum()
zero_halite_ids = np.where(my_ship_halites == 0)
zero_halite_collect_scores = all_ship_collect_scores[zero_halite_ids]
zero_halite_targets = {}
for _ in range(num_zero_halite_ships):
(best_ship_id, best_row, best_col) = np.unravel_index(
zero_halite_collect_scores.argmax(), zero_halite_collect_scores.shape)
# import pdb; pdb.set_trace()
ship_k = ship_keys[best_ship_id]
my_assigned_squares[best_row, best_col] = True
# Create a mask between the current and target positions, where we
# encourage the ships to prefer squares with a higher smooth collect value
row = my_ship_positions[0][best_ship_id]
col = my_ship_positions[1][best_ship_id]
collect_ship_score_mask = get_mask_between_exclude_ends(
row, col, best_row, best_col, grid_size)
collect_ship_score_mask[best_row, best_col] = 1
collect_ship_scores = (1e5-assigned_id)*collect_ship_score_mask-(
10*obs_halite)
to_target_dirs = get_dir_from_target(
row, col, best_row, best_col, grid_size)
if len(to_target_dirs) == 2:
first_move_pos = move_ship_row_col(
row, col, to_target_dirs[0], grid_size)
second_move_pos = move_ship_row_col(
row, col, to_target_dirs[1], grid_size)
if smooth_collect_values[first_move_pos] > smooth_collect_values[
second_move_pos]:
avoid_dir = to_target_dirs[1]
else:
avoid_dir = to_target_dirs[0]
collect_ship_scores[HALF_PLANES_CATCH[row, col][avoid_dir]] -= 1
# import pdb; pdb.set_trace()
all_ship_scores[ship_k][0][:] = collect_ship_scores
zero_halite_targets[ship_k] = (best_row, best_col)
assigned_id += 1
ships_assigned[best_ship_id] = True
all_ship_collect_scores[:, my_assigned_squares]
zero_halite_collect_scores[:, best_row, best_col] = lowest_collect_score
zero_halite_collect_scores[best_ship_id] = lowest_collect_score
history['initial_collect_zero_halite_targets'] = zero_halite_targets
print(observation['step'], zero_halite_targets)
return all_ship_scores, history
def get_ship_scores(config, observation, player_obs, env_config, np_rng,
ignore_bad_attack_directions, history,
env_obs_ids, env_observation, verbose):
ship_scores_start_time = time.time()
convert_cost = env_config.convertCost
spawn_cost = env_config.spawnCost
stacked_bases = np.stack(
[rbs[1] for rbs in observation['rewards_bases_ships']])
all_my_bases = copy.copy(stacked_bases[0])
my_bases = stacked_bases[0]
# Exclude bases that are persistently camped by opponents
num_my_bases_with_excluded = my_bases.sum()
base_locations_with_excluded = np.where(my_bases)
excluded_base_distances = []
for base_pos in history['my_base_not_attacked_positions']:
# Note: stacking ensures we are working on a copy of the original base
# observation!
my_bases[base_pos] = 0
excluded_base_distances.append(DISTANCES[base_pos])
obs_halite = np.maximum(0, observation['halite'])
# Clip obs_halite to zero when gathering it doesn't add to the score
# code: delta_halite = int(cell.halite * configuration.collect_rate)
collect_rate = env_config.collectRate
obs_halite[obs_halite < 1/collect_rate] = 0
obs_halite_sum = obs_halite.sum()
my_ship_count = len(player_obs[2])
num_my_bases = my_bases.sum()
first_base = my_ship_count == 1 and num_my_bases == 0 and observation[
'step'] <= 10
max_ships = config['max_initial_ships']
early_game_return_boost_step = config['early_game_return_boost_step']
step = observation['step']
early_game_not_max_ships = (my_ship_count < max_ships) and (
step < early_game_return_boost_step)
early_game_return_boost = (early_game_return_boost_step-step)/(
early_game_return_boost_step)*config[
'early_game_return_base_additional_multiplier']*early_game_not_max_ships
steps_remaining = env_config.episodeSteps-1-observation['step']
# Override the maximum number of conversions on the last episode turn
last_episode_turn = observation['relative_step'] == 1
grid_size = obs_halite.shape[0]
half_dim_grid_mask = np.ones((grid_size, grid_size))*half_distance_mask_dim
# smoothed_friendly_ship_halite = smooth2d(
# observation['rewards_bases_ships'][0][3])
smoothed_halite = smooth2d(obs_halite)
can_deposit_halite = num_my_bases > 0
stacked_ships = np.stack(
[rbs[2] for rbs in observation['rewards_bases_ships']])
my_ships = stacked_ships[0]
opponent_ships = stacked_ships[1:].sum(0) > 0
all_ship_count = opponent_ships.sum() + my_ship_count
my_ship_fraction = my_ship_count/(1e-9+all_ship_count)
halite_ships = np.stack([
rbs[3] for rbs in observation['rewards_bases_ships']]).sum(0)
halite_ships[stacked_ships.sum(0) == 0] = -1e-9
my_zero_halite_ships = my_ships & (halite_ships == 0)
last_ship_standing_no_collect = observation[
'relative_step'] > 1/4 and (
stacked_ships[0] & (halite_ships == 0)).sum() == 1
opponent_bases = stacked_bases[1:].sum(0)
player_ids = -1*np.ones((grid_size, grid_size), dtype=np.int)
for i in range(stacked_ships.shape[0]):
player_ids[stacked_ships[i]] = i
camping_ships_strategy = history['camping_ships_strategy']
# Get the distance to the nearest base for all squares
all_bases = stacked_bases.sum(0) > 0
base_locations = np.where(all_bases)
num_bases = all_bases.sum()
all_base_distances = [DISTANCES[
base_locations[0][i], base_locations[1][i]] for i in range(num_bases)] + [
99*np.ones((grid_size, grid_size))]
nearest_base_distances = np.stack(all_base_distances).min(0)
if num_my_bases_with_excluded > 0:
all_base_distances_with_excluded = np.stack([DISTANCES[
base_locations_with_excluded[0][i],
base_locations_with_excluded[1][i]] for i in range(
num_my_bases_with_excluded)])
nearest_base_distances_with_my_excluded = (
all_base_distances_with_excluded.min(0))
else:
all_base_distances_with_excluded = np.zeros((0, grid_size, grid_size))
nearest_base_distances_with_my_excluded = 99*np.ones(
(grid_size, grid_size), dtype=np.int)
# Flag to indicate I should not occupy/flood my base with early ships
my_halite = observation['rewards_bases_ships'][0][0]
avoid_base_early_game = my_halite >= spawn_cost and (
observation['step'] < 20) and num_my_bases == 1 and (
my_halite % spawn_cost) == 0 and my_ship_count < 9
# if observation['step'] in [160, 242]:
# import pdb; pdb.set_trace()
# Distance to nearest base mask - gathering closer to my base is better
(base_nearest_distance_scores, my_base_distances,
my_nearest_base_distances) = get_nearest_base_distances(
grid_size, history['my_base_not_attacked_positions'], observation)
# Get opponent ship actions that avoid collisions with less halite ships
(opponent_ships_sensible_actions, opponent_ships_sensible_actions_no_risk,
boxed_in_attack_squares, boxed_in_opponent_ids,
boxed_in_zero_halite_opponents, likely_convert_opponent_positions,
possible_convert_opponent_positions) = get_valid_opponent_ship_actions(
config, observation['rewards_bases_ships'], halite_ships, grid_size,
history, nearest_base_distances_with_my_excluded, observation,
env_config)
# Get the weighted base mask
(weighted_base_mask, main_base_distances, non_abandoned_base_distances,
ship_diff_smoothed) = get_weighted_base_mask(
stacked_bases, stacked_ships, observation, history)
# Get the influence map
(influence_map, influence_map_unweighted, player_influence_maps,
priority_scores, ship_priority_weights,
escape_influence_probs) = get_influence_map(
config, stacked_bases, stacked_ships, halite_ships, observation,
player_obs)
# Scale the opponent bases as a function of attack desirability
(opponent_bases_scaled, opponent_ships_scaled, abs_rel_opponent_scores,
currently_winning, approximate_score_diff, history,
ballistic_attack_base_targets) = scale_attack_scores_bases_ships(
config, observation, player_obs, spawn_cost, non_abandoned_base_distances,
weighted_base_mask, steps_remaining, obs_halite, halite_ships, history,
smoothed_halite, player_influence_maps,
nearest_base_distances_with_my_excluded, player_ids)
ignore_bad_attack_directions = ignore_bad_attack_directions or len(
ballistic_attack_base_targets) > 0
# Decide what converting ships to let convert peacefully
(ignore_convert_positions, opponent_bases,
boxed_in_attack_squares) = get_ignored_convert_positions(
likely_convert_opponent_positions, main_base_distances, stacked_ships,
abs_rel_opponent_scores, observation, my_base_distances, opponent_bases,
boxed_in_attack_squares)
# Decide what boxed in escape squares to avoid - if I use a lonely zero
# halite ship to destroy an opponent's ship, I am likely to lose my ship in
# one of the subsequent turns
(avoid_attack_squares_zero_halite,
always_attack_opponent_id) = get_avoid_attack_squares(
boxed_in_attack_squares, approximate_score_diff, currently_winning,
abs_rel_opponent_scores, my_zero_halite_ships, opponent_ships,
influence_map, influence_map_unweighted, my_base_distances,
boxed_in_opponent_ids, observation)
# Get the squares that have no zero halite neighbors - this makes it hard
# to successfully camp out next to the base
no_zero_halite_neighbors = get_no_zero_halite_neighbors(
observation['halite'])
# Only conditionally attack the bases where I have a camper that is active
my_prev_step_base_attacker_ships = history[
'my_prev_step_base_attacker_ships']
camp_attack_mask = np.ones((grid_size, grid_size), dtype=np.bool)
for ship_k in camping_ships_strategy:
base_location = camping_ships_strategy[ship_k][5]
consider_base_attack = camping_ships_strategy[ship_k][4]
camp_attack_mask[base_location] = consider_base_attack
# Attack opponent ships that camp out next to my base
attack_opponent_campers = history['attack_opponent_campers']
# Don't worry about collecting if I have a base at distance <= d and the
# nearest opponent is at a distance of at least d+2
(safe_to_collect, safe_to_collect_margin, safe_to_return_halites,
safe_to_return_base_halites) = get_my_guaranteed_safe_collect_squares(
opponent_ships, grid_size, all_my_bases, obs_halite, collect_rate,
halite_ships, observation)
# Early on, the collect boost is high as the distance from the nearest base
# grows. This effect rapidly decays once the hunting season starts
stand_still_collect_boost = config['relative_stand_still_collect_boost']
# print(observation['step'], my_ship_count, (stacked_ships[0] & (
# halite_ships == 0)).sum())
n_step_avoid_min_die_prob_cutoff = config[
'n_step_avoid_min_die_prob_cutoff']
if history['num_destroyed_ships'] == 0:
low_risk_limit = 0.1
early_game_risk_limit = max(
low_risk_limit, config['max_risk_n_step_risky'] - (
config['max_risk_n_step_risky']-low_risk_limit)*observation[
'step']/config['max_steps_n_step_risky'])
n_step_avoid_min_die_prob_cutoff = max(
early_game_risk_limit, n_step_avoid_min_die_prob_cutoff)
all_ship_scores = {}
for i, ship_k in enumerate(player_obs[2]):
row, col = row_col_from_square_grid_pos(
player_obs[2][ship_k][0], grid_size)
dm = DISTANCE_MASKS[(row, col)]
ship_halite = player_obs[2][ship_k][1]
opponent_less_halite_ships = np.logical_and(
opponent_ships, halite_ships <= ship_halite)
opponent_smoother_less_halite_ships = smooth2d(
opponent_less_halite_ships, smooth_kernel_dim=5)
# Scores 1: collecting halite at row, col
# Multiply the smoothed halite, added with the obs_halite with a distance
# mask, specific for the current row and column
ship_influence_priority_multipliers = (
1+config['influence_weights_additional_multiplier']*(
ship_priority_weights[ship_k])**config[
'influence_weights_exponent']) ** priority_scores
collect_grid_scores = dm*(
smoothed_halite * config['collect_smoothed_multiplier'] +
obs_halite * config['collect_actual_multiplier']) * (
config['collect_less_halite_ships_multiplier_base'] ** (
opponent_smoother_less_halite_ships)) * (
base_nearest_distance_scores ** config[
'collect_base_nearest_distance_exponent'])*(
ship_influence_priority_multipliers)
if observation['step'] < 50:
# import pdb; pdb.set_trace()
collect_grid_scores *= (history['original_position_multiplier']**(
observation['step']/50))
base_distance = nearest_base_distances_with_my_excluded[row, col]
collect_grid_scores[row, col] *= int(num_my_bases_with_excluded > 0)*(
stand_still_collect_boost * (1+config[
'initial_collect_boost_away_from_base'] * max(0, base_distance-5)/7)**(
(1-observation['relative_step'])**14))
if ship_k in history['initial_not_collect_near_base_ships']:
collect_grid_scores[history['initial_not_collect_near_base_mask']] = 0
# if observation['step'] == 233:
# import pdb; pdb.set_trace()
if last_ship_standing_no_collect and ship_halite == 0:
collect_grid_scores[row, col] = -1e13
# if observation['step'] >= 14 and row == 2 and col in [9]:
# import pdb; pdb.set_trace()
# Override the collect score to 0 to avoid blocking the base early on in
# the game: All squares right next to the initial base are set to 0
if avoid_base_early_game:
collect_grid_scores, early_next_base_dir, drop_None_valid = (
set_scores_single_nearby_zero(
collect_grid_scores, my_bases, grid_size, row, col))
else:
early_next_base_dir = None
drop_None_valid = False
# At the end of a game, disincintivize all collect squares that are too far
# away from the nearest base to be able to return before the game is over
if steps_remaining < grid_size and nearest_base_distances.min() == 0:
trajectory_lengths = DISTANCES[row, col] + my_nearest_base_distances
collect_grid_scores[trajectory_lengths > (steps_remaining-1-int(
steps_remaining > grid_size//2))] = 0
# Scores 2: returning to any of my bases - delay the return when it is
# safe to collect
safe_collect_ship_margin = safe_to_collect_margin[row, col]
if ship_k in history['returning_to_base_ships'] or (
safe_collect_ship_margin <= 0) or num_my_bases < 2:
delay_return_divisor = 1
else:
# delay_return_divisor = 1
delay_return_divisor = 2**(safe_collect_ship_margin/2)
# Always use the maximum value for a base return if I can safely move there
# (regardless of my influence in that area)
weighted_base_mask_ship_return = np.copy(weighted_base_mask)
for base_safe_return_halite, base_location in safe_to_return_base_halites:
if ship_halite < base_safe_return_halite[row, col]:
weighted_base_mask_ship_return[base_location] = max(
1.0, weighted_base_mask_ship_return[base_location])
base_return_grid_multiplier = dm*min(ship_halite, 2*convert_cost)/(
delay_return_divisor)*(config['return_base_multiplier'] * (
config['return_base_less_halite_ships_multiplier_base'] ** (
opponent_smoother_less_halite_ships)) + early_game_return_boost)*(
weighted_base_mask_ship_return)
# Further incentivize moving onto a base after a return has started when I
# am close to a base since that means I can count on some of the best
# collect score for future steps
if ship_k in history['returning_to_base_ships'] and observation[
'relative_step'] > config['start_hunting_season_relative_step']:
base_return_grid_multiplier += (dm**2)*collect_grid_scores.max()/1.5
chase_details = history['chase_counter'][0].get(ship_k, None)
if chase_details is not None:
# Keep the relative order using the minimum in case the return to base
# pull is big
base_return_grid_multiplier = np.minimum(
base_return_grid_multiplier+5e4, base_return_grid_multiplier*(config[
'chase_return_base_exponential_bonus']**chase_details[1]))
# Force returning to a base when the episode is almost over and I
# have halite on board
if ship_halite > 0 and steps_remaining < grid_size:
base_return_grid_multiplier, end_game_base_return = (
force_return_base_end_episode(
my_bases, base_return_grid_multiplier, main_base_distances, row, col,
steps_remaining, opponent_less_halite_ships, weighted_base_mask,
safe_to_collect))
else:
end_game_base_return = False
# Override the return base score to 0 to avoid blocking the base early on
# in the game.
if avoid_base_early_game:
base_return_grid_multiplier = override_early_return_base_scores(
base_return_grid_multiplier, my_bases, row, col, my_ship_count)
# if observation['step'] == 247 and row == 15 and col == 4:
# import pdb; pdb.set_trace()
# Scores 3: establish a new base
first_base_or_can_spawn = my_ship_count == 1 and num_my_bases == 0 and (
observation['step'] <= 10 or (player_obs[0]+ship_halite) >= (
2*spawn_cost))
establish_base_scores = dm**(config['establish_base_dm_exponent'])*(
smoothed_halite-obs_halite) * (config[
'establish_base_smoothed_multiplier'] + first_base*config[
'establish_first_base_smoothed_multiplier_correction'])*(
1-((my_bases*dm).max()))*(1-my_bases) * (
config['establish_base_less_halite_ships_multiplier_base'] ** (
opponent_smoother_less_halite_ships)) - (
convert_cost*can_deposit_halite) + min(
ship_halite, convert_cost)*(
config['establish_base_deposit_multiplier']) + first_base*(
config['first_base_no_4_way_camping_spot_bonus']*(
no_zero_halite_neighbors)) - 1e5*int(not (
first_base_or_can_spawn))
# if observation['step'] == 391 and ship_k == '58-1':
# import pdb; pdb.set_trace()
# Scores 4: attack an opponent base at row, col
attack_step_multiplier = min(5, max(1, 1/(
2*(1-observation['relative_step']+1e-9))))
if ship_k in my_prev_step_base_attacker_ships:
# Encourage the base attack of a ship to be persistent
attack_step_multiplier *= 5
attack_base_scores = dm*np.minimum(15e5, camp_attack_mask*(
attack_step_multiplier)*config['attack_base_multiplier']*(
opponent_bases_scaled)*(config[
'attack_base_less_halite_ships_multiplier_base'] ** (
opponent_smoother_less_halite_ships))) - (config[
'attack_base_halite_sum_multiplier'] * obs_halite_sum**0.8 / (
all_ship_count))*int(my_ship_fraction < 0.5) - 1e12*(
ship_halite > 0)
# Keep the preference order in ballistic mode without abandoning recue or
# base defense ships
attack_base_scores = np.minimum(15e5, attack_base_scores) + 1e-10*(
attack_base_scores) * (attack_base_scores > 15e5)
# Update the scores as a function of nearby opponent ships to avoid
# collisions with opposing ships that carry less halite and encourage
# collisions with opponent ships that carry less halite
# Also incorporate the camping score override behavior here
camping_override_strategy = camping_ships_strategy.get(ship_k, ())
attack_campers_override_strategy = attack_opponent_campers.get(ship_k, ())
(collect_grid_scores, base_return_grid_multiplier, establish_base_scores,
attack_base_scores, preferred_directions, valid_directions,
agent_surrounded, two_step_bad_directions, n_step_bad_directions,
one_step_valid_directions, n_step_bad_directions_die_probs,
original_n_step_bad_directions,
original_n_step_bad_directions_die_probs) = update_scores_opponent_ships(
config, collect_grid_scores, base_return_grid_multiplier,
establish_base_scores, attack_base_scores, opponent_ships,
opponent_bases, halite_ships, row, col, grid_size, spawn_cost,
drop_None_valid, obs_halite, collect_rate, np_rng,
opponent_ships_sensible_actions,
opponent_ships_sensible_actions_no_risk, ignore_bad_attack_directions,
observation, ship_k, all_my_bases, my_ships, steps_remaining, history,
escape_influence_probs, player_ids, env_obs_ids, env_observation,
main_base_distances, nearest_base_distances, end_game_base_return,
camping_override_strategy, attack_campers_override_strategy,
boxed_in_attack_squares, safe_to_collect,
boxed_in_zero_halite_opponents, ignore_convert_positions,
avoid_attack_squares_zero_halite, n_step_avoid_min_die_prob_cutoff,
safe_to_return_halites, safe_to_return_base_halites,
my_nearest_base_distances)
# if observation['step'] == 169 and ship_k == '65-2':
# import pdb; pdb.set_trace()
# Update the scores as a function of blocking opponent bases and my early
# game initial base
(collect_grid_scores, base_return_grid_multiplier, establish_base_scores,
attack_base_scores, valid_directions, one_step_valid_directions,
opponent_base_directions) = update_scores_blockers(
collect_grid_scores, base_return_grid_multiplier, establish_base_scores,
attack_base_scores, row, col, grid_size, opponent_bases,
half_dim_grid_mask, valid_directions, one_step_valid_directions,
early_next_base_dir, update_attack_base=False)
if last_episode_turn:
# Convert all ships with more halite than the convert cost on the last
# episode step
last_episode_step_convert = ship_halite >= convert_cost
if last_episode_step_convert and num_my_bases_with_excluded > 0:
# Don't convert if I can safely move to a base next to my square.
min_base_distance = all_base_distances_with_excluded[:, row, col].min()
if min_base_distance == 1:
if opponent_less_halite_ships.sum() == 0:
last_episode_step_convert = False
else:
for base_id in range(num_my_bases_with_excluded):
base_row = base_locations_with_excluded[0][base_id]
base_col = base_locations_with_excluded[1][base_id]
if all_base_distances_with_excluded[base_id, row, col] == 1:
if DISTANCES[base_row, base_col][
opponent_less_halite_ships].min() > 1:
last_episode_step_convert = False
break
if last_episode_step_convert:
establish_base_scores[row, col] = 1e12
base_locations_with_excluded = (
np.append(base_locations_with_excluded[0], row),
np.append(base_locations_with_excluded[1], col))
all_base_distances_with_excluded = np.concatenate(
[all_base_distances_with_excluded,
np.expand_dims(DISTANCES[row, col], 0)])
num_my_bases_with_excluded += 1
elif ship_halite > 0:
base_return_grid_multiplier[DISTANCES[row, col] == 1] += 1e5
end_game_base_return = True
else:
last_episode_step_convert = False
all_ship_scores[ship_k] = (
collect_grid_scores, base_return_grid_multiplier, establish_base_scores,
attack_base_scores, preferred_directions, agent_surrounded,
valid_directions, two_step_bad_directions, n_step_bad_directions,
one_step_valid_directions, opponent_base_directions, 0,
end_game_base_return, last_episode_step_convert,
n_step_bad_directions_die_probs, opponent_smoother_less_halite_ships,
ship_influence_priority_multipliers, original_n_step_bad_directions,
original_n_step_bad_directions_die_probs)
# if observation['relative_step'] < config[
# 'initial_collect_override_relative_step']:
# all_ship_scores, history = override_initial_collect(
# config, all_ship_scores, obs_halite, halite_ships, stacked_ships,
# stacked_bases, player_influence_maps, player_obs, observation,
# history)
ship_scores_duration = time.time() - ship_scores_start_time
return (all_ship_scores, opponent_ships_sensible_actions,
opponent_ships_sensible_actions_no_risk, weighted_base_mask,
opponent_ships_scaled, main_base_distances, ship_scores_duration,
halite_ships, player_influence_maps, boxed_in_zero_halite_opponents,
ignore_convert_positions, ship_diff_smoothed,
ballistic_attack_base_targets, safe_to_return_halites,
safe_to_collect_margin, always_attack_opponent_id,
likely_convert_opponent_positions,
possible_convert_opponent_positions, my_base_distances,
nearest_base_distances, history)
def get_mask_between_exclude_ends(r1, c1, r2, c2, grid_size):
rel_pos = get_relative_position(r1, c1, r2, c2, grid_size)
start_row = r2 if rel_pos[0] < 0 else r1
rows = np.mod(
np.arange(start_row, start_row+
|
np.abs(rel_pos[0])
|
numpy.abs
|
import numpy as np
import scipy.io as sio
import os
from PIL import Image
class BatchDatset:
imgs = []
max_batch = 0
batch_size = 0
cur_imgs = []
cur_labels = []
cur_batch = 0 # index of batch generated
cur_ind = 0 # index of current image in imgs
img_width = 600
img_height = 800
def __init__(self, imgs_path, batch_size=2):
self.imgs = sio.loadmat(imgs_path)['trainlist'][0]
#self.labels = sio.loadmat(labels_path)['test_list'][0]
self.batch_size = batch_size
#self.max_batch = len(self.imgs) * 9 / batch_size
self.cur_imgs, self.cur_labels = self.get_variations(self.imgs[0])
def next_batch(self):
while len(self.cur_imgs) < self.batch_size: # if not enough, get the next image
self.cur_ind += 1
#print('appending', self.cur_ind)
if self.cur_ind >= len(self.imgs):
#print('leaving', self.cur_ind)
break
cur_name = self.imgs[self.cur_ind]
tmp_imgs, tmp_labels = self.get_variations(cur_name)
self.cur_imgs += tmp_imgs
self.cur_labels += tmp_labels
if len(self.cur_imgs) >= self.batch_size:
#print('getting', self.cur_ind)
rimat = np.zeros((self.batch_size, self.img_height, self.img_width, 3), dtype=np.float)
ramat = np.zeros((self.batch_size, self.img_height, self.img_width, 1), dtype=np.int)
self.cur_batch += 1 # output a new batch
for i in range(self.batch_size):
rimat[i] = self.cur_imgs.pop(0)
ramat[i, :, :, 0] = self.cur_labels.pop(0)
#print('batch:', self.cur_batch, 'at img:', self.imgs[self.cur_ind], 'generate image shape', rimat.shape, 'and label shape', ramat.shape)
return rimat, ramat
return [], []
def get_variations(self, img_name):
imgs = []
labels = []
stp = str(img_name)
if img_name < 10:
stp = '0000' + stp
elif img_name < 100:
stp = '000' + stp
elif img_name < 1000:
stp = '00' + stp
else:
stp = '0' + stp
img_path = 'data/portraitFCN_data/' + stp + '.mat'
alpha_path = 'data/images_mask/' + stp + '_mask.mat'
if os.path.exists(img_path) and os.path.exists(alpha_path):
imat = sio.loadmat(img_path)['img']
amat = sio.loadmat(alpha_path)['mask']
nimat = np.array(imat, dtype=np.float)
namat = np.array(amat, dtype=np.int)
imgs.append(nimat)
labels.append(namat)
angs = [-45, -22, 22, 45]
gammas = [0.8, 0.9, 1.1, 1.2]
org_mat = np.zeros(nimat.shape, dtype=np.int)
h, w, _ = nimat.shape
for i in range(h):
for j in range(w):
org_mat[i][j][0] = round(nimat[i][j][2] * 255 + 122.675)
org_mat[i][j][1] = round(nimat[i][j][1] * 255 + 116.669)
org_mat[i][j][2] = round(nimat[i][j][0] * 255 + 104.008)
i_img = Image.fromarray(np.uint8(org_mat))
a_img = Image.fromarray(np.uint8(amat))
for i in range(4):
tmpi_img = i_img.rotate(angs[i])
tmpa_img = a_img.rotate(angs[i])
tmpri_img = np.array(tmpi_img, dtype=np.int)
rimat = np.zeros(tmpri_img.shape, dtype=np.float)
for k in range(h):
for j in range(w):
rimat[k][j][0] = (tmpri_img[k][j][2] * 1.0 - 104.008) / 255
rimat[k][j][1] = (tmpri_img[k][j][1] * 1.0 - 116.669) / 255
rimat[k][j][2] = (tmpri_img[k][j][0] * 1.0 - 122.675) / 255
imgs.append(rimat)
labels.append(np.array(tmpa_img, dtype=np.int))
tmp_nimat = np.array(imat, dtype=np.float)
tmp_nimat[:, :, 0] = tmp_nimat[:, :, 0] * 255 + 104.01
tmp_nimat[:, :, 0] = (pow(tmp_nimat[:, :, 0], gammas[i]) - pow(104.01, gammas[i])) / pow(255, gammas[i])
tmp_nimat[:, :, 1] = tmp_nimat[:, :, 1] * 255 + 116.67
tmp_nimat[:, :, 1] = (pow(tmp_nimat[:, :, 1], gammas[i]) - pow(116.67, gammas[i])) / pow(255, gammas[i])
tmp_nimat[:, :, 2] = tmp_nimat[:, :, 2] * 255 + 122.68
tmp_nimat[:, :, 2] = (pow(tmp_nimat[:, :, 2], gammas[i]) - pow(122.68, gammas[i])) / pow(255, gammas[i])
imgs.append(tmp_nimat)
labels.append(namat)
return imgs, labels
class TestDataset:
imgs = []
max_batch = 0
batch_size = 0
cur_batch = 0 # index of batch generated
cur_ind = -1 # index of current image in imgs
img_width = 600
img_height = 800
def __init__(self, imgs_path, batch_size=2):
self.imgs = sio.loadmat(imgs_path)['testlist'][0]
#self.labels = sio.loadmat(labels_path)['test_list'][0]
self.batch_size = batch_size
#self.max_batch = len(self.imgs) * 9 / batch_size
#self.cur_imgs, self.cur_labels = self.get_images(self.imgs[0])
def next_batch(self):
cur_imgs = []
cur_labels = []
cur_orgs = []
while len(cur_imgs) < self.batch_size: # if not enough, get the next image
self.cur_ind += 1
#print('appending', self.cur_ind)
if self.cur_ind >= len(self.imgs):
#print('leaving', self.cur_ind)
break
cur_name = self.imgs[self.cur_ind]
tmp_img, tmp_label, tmp_org = self.get_images(cur_name)
if tmp_img is not None:
cur_imgs.append(tmp_img)
cur_labels.append(tmp_label)
cur_orgs.append(tmp_org)
if len(cur_imgs) == self.batch_size:
#print('getting', self.cur_ind)
rimat = np.zeros((self.batch_size, self.img_height, self.img_width, 3), dtype=np.float)
org_mat = np.zeros((self.batch_size, self.img_height, self.img_width, 3), dtype=np.int)
ramat = np.zeros((self.batch_size, self.img_height, self.img_width, 1), dtype=np.int)
self.cur_batch += 1 # output a new batch
for i in range(self.batch_size):
rimat[i] = cur_imgs.pop(0)
org_mat[i] = cur_orgs.pop(0)
ramat[i, :, :, 0] = cur_labels.pop(0)
#print('getting', ramat[0, 200:210, 200:220])
#print('batch:', self.cur_batch, 'at img:', self.imgs[self.cur_ind], 'generate image shape', rimat.shape, 'and label shape', ramat.shape)
return rimat, ramat, org_mat
return [], [], []
def get_images(self, img_name):
stp = str(img_name)
if img_name < 10:
stp = '0000' + stp
elif img_name < 100:
stp = '000' + stp
elif img_name < 1000:
stp = '00' + stp
else:
stp = '0' + stp
img_path = 'data/portraitFCN_data/' + stp + '.mat'
alpha_path = 'data/images_mask/' + stp + '_mask.mat'
if os.path.exists(img_path) and os.path.exists(alpha_path):
imat = sio.loadmat(img_path)['img']
amat = sio.loadmat(alpha_path)['mask']
nimat = np.array(imat, dtype=np.float)
namat = np.array(amat, dtype=np.int)
org_mat = np.zeros(nimat.shape, dtype=np.int)
h, w, _ = nimat.shape
for i in range(h):
for j in range(w):
org_mat[i][j][0] = round(nimat[i][j][2] * 255 + 122.675)
org_mat[i][j][1] = round(nimat[i][j][1] * 255 + 116.669)
org_mat[i][j][2] = round(nimat[i][j][0] * 255 + 104.008)
return nimat, namat, org_mat
return None, None, None
if __name__ == '__main__':
data = BatchDatset('data/trainlist.mat')
'''ri, ra = data.next_batch()
while len(ri) != 0:
ri, ra = data.next_batch()
print(np.sum(ra))'''
imgs, labels = data.get_variations(47)
cnt = 0
for img in imgs:
mat =
|
np.zeros(img.shape, dtype=np.int)
|
numpy.zeros
|
# -*- coding: utf-8 -*-
"""Untitled6.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1W8e4Hs79pu25biKKh9mK6h6-jE2XEGA1
"""
import math
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import datetime
import json
from tqdm import tqdm
#from pytorch_transformers import BertTokenizer, BertModel, AdamW
from transformers import BertTokenizer, BertModel, AdamW
# In[111]:
import os
from recordclass import recordclass
from collections import OrderedDict
import numpy as np
import random
import pickle
import sys
language_hyperparameters = {
'hindi':{
'max_src_len' : 199,
'max_trg_len' : 28,
'max_ent_len' : 68,
'max_trig_len': 22
},
'bengali':{
'max_src_len' : 106,
'max_trg_len' : 15,
'max_ent_len' : 28,
'max_trig_len': 7
}
}
language = sys.argv[1]
def custom_print(*msg):
for i in range(0, len(msg)):
if i == len(msg) - 1:
print(msg[i])
logger.write(str(msg[i]) + '\n')
else:
# print(msg[i], ' ', end='')
print(msg[i],sep=" " ,end="")
logger.write(str(msg[i]))
def get_data(src_lines, trg_lines, pos_lines, datatype):
samples = []
uid = 1
for i in range(0, len(src_lines)): # for each line
src_line = src_lines[i].strip()
trg_line = trg_lines[i].strip()
#pos_line = pos_lines[i].strip()
src_words = src_line.split()
#word_pos_tags = pos_line.split()
#trg_rels = [] # holds relations present in a sentence
trg_events = [] # holds events present in a sentence
trg_args = [] # holds arguments present in a sentence
trg_pointers = [] # holds tuples containg records per relation
parts = trg_line.split('|')
'''
if datatype == 1:
random.shuffle(parts)
'''
# adj_data = json.loads(adj_lines[i])#skip
# adj_mat = get_adj_mat(len(src_words), adj_data['adj_mat'])#skip
tuples_in = []
for part in parts:
elements = part.strip().split(";")
tuples_in.append((int(elements[0]), int(elements[1]), eventnameToIdx[elements[2]], int(
elements[3]), int(elements[4]), argnameToIdx[elements[5]]))
if datatype == 1 or datatype==2:
tuples_in = sorted(
tuples_in, key=lambda element: (element[0], element[3]))
for elements in tuples_in:
#elements = part.strip().split()
# print(elements)
# relation index (corresponding to the relation_name from relation_vocab)
# trg_rels.append(elements[6])
trg_events.append(elements[2]) # event index
trg_args.append(elements[5]) # arg index
# all the records like event-start_index, end_index, entity- start_index, end_index
trg_pointers.append((int(elements[0]), int(
elements[1]), int(elements[3]), int(elements[4])))
# if cross max_sentence_length or max_trg_length(max no of relation tuples present in the sentence)
if (datatype == 1 or datatype==2) and (len(src_words) > max_src_len):
# print(src_line)
# print(trg_line)
continue
# sample = Sample(Id=uid, SrcLen=len(src_words), SrcWords=src_words, PosTags=word_pos_tags, TrgLen=len(trg_rels), TrgRels=trg_rels,
# TrgPointers=trg_pointers, eventTypes=trg_events, argTypes=trg_args) # recordclass("Sample", "Id SrcLen SrcWords TrgLen TrgRels eventTypes argTypes TrgPointers")
sample = Sample(Id=uid, SrcLen=len(src_words), SrcWords=src_words,TrgLen=len(parts),
TrgPointers=trg_pointers, eventTypes=trg_events, argTypes=trg_args)
samples.append(sample)
uid += 1
return samples
# In[113]:
def read_data(src_file, trg_file, pos_dev_file, datatype):
reader = open(src_file)
src_lines = reader.readlines()
reader.close()
reader = open(trg_file)
trg_lines = reader.readlines()
reader.close()
# reader = open(pos_dev_file)
# pos_lines = reader.readlines()
# reader.close()
# l = 1000
# src_lines = src_lines[0:min(l, len(src_lines))]
# trg_lines = trg_lines[0:min(l, len(trg_lines))]
# adj_lines = adj_lines[0:min(l, len(adj_lines))]
data = get_data(src_lines, trg_lines, None,
datatype) # call get_data()
return data # list of records, records are of type Sample
# In[114]:
def get_relations(file_name):
nameToIdx = OrderedDict() # dictionary{key=name. value=idx}
idxToName = OrderedDict() # dictionary{key=idx, value=name}
reader = open(file_name)
lines = reader.readlines()
reader.close()
nameToIdx['<PAD>'] = 0
idxToName[0] = '<PAD>'
# nameToIdx['<SOS>'] = 1
# idxToName[1] = '<SOS>'
#nameToIdx['None'] = 1
#idxToName[1] = 'None'
idx = 1
for line in lines:
nameToIdx[line.strip()] = idx
idxToName[idx] = line.strip()
idx += 1
return nameToIdx, idxToName
def get_events(file_name):
nameToIdx = OrderedDict() # dictionary{key=name. value=idx}
idxToName = OrderedDict() # dictionary{key=idx, value=name}
reader = open(file_name)
lines = reader.readlines()
reader.close()
nameToIdx['<PAD>'] = 0
idxToName[0] = '<PAD>'
# nameToIdx['<SOS>'] = 1
# idxToName[1] = '<SOS>'
#nameToIdx['None'] = 1
#idxToName[1] = 'None'
idx = 1
for line in lines:
nameToIdx[line.strip()] = idx
idxToName[idx] = line.strip()
idx += 1
return nameToIdx, idxToName
def get_arguments(file_name):
nameToIdx = OrderedDict() # dictionary{key=name. value=idx}
idxToName = OrderedDict() # dictionary{key=idx, value=name}
reader = open(file_name)
lines = reader.readlines()
reader.close()
nameToIdx['<PAD>'] = 0
idxToName[0] = '<PAD>'
# nameToIdx['<SOS>'] = 1
# idxToName[1] = '<SOS>'
#nameToIdx['None'] = 1
#idxToName[1] = 'None'
idx = 1
for line in lines:
nameToIdx[line.strip()] = idx
idxToName[idx] = line.strip()
idx += 1
return nameToIdx, idxToName
# In[115]:
def is_full_match(triplet, triplets):
for t in triplets:
if t[0] == triplet[0] and t[1] == triplet[1] and t[2] == triplet[2] and t[3] == triplet[3]:
return True
return False
# In[116]:
def is_partial_match(triplet,triplets):
for t in triplets:
if t[0] == triplet[0] or t[1] == triplet[1] or t[2] == triplet[2] or t[3] == triplet[3]:
return True
return False
def get_gt_triples(src_words, rels, pointers, event_list, arg_list):
touples = []
i = 0
for r in pointers:
arg1 = ' '.join(src_words[pointers[i][0]:pointers[i][1] + 1])
arg2 = ' '.join(src_words[pointers[i][2]:pointers[i][3] + 1])
# touplet = (arg1.strip(), eventIdxToName[event_list[i]], arg2.strip(
# ), argIdxToName[arg_list[i]], relIdxToName[r])
touplet = (arg1.strip(), eventIdxToName[event_list[i]], arg2.strip(
), argIdxToName[arg_list[i]])
if not is_full_match(touplet, touples):
touples.append(touplet)
i += 1
'''
for e in event_list:
arg1 = ' '.join(src_words[pointers[i][0]:pointers[i][1] + 1])
arg2 = ' '.join(src_words[pointers[i][2]:pointers[i][3] + 1])
touplet = (arg1.strip(), eventIdxToName[e], arg2.strip(), argIdxToName[arg_list[i]], relIdxToName[rels[i]])
if not is_full_match(touplet, touples):
touples.append(touplet)
i += 1
'''
return touples
# In[117]:
def get_answer_pointers(arg1start_preds, arg1end_preds, arg2start_preds, arg2end_preds, sent_len):
arg1_prob = -1.0
arg1start = -1
arg1end = -1
#FIND MAX LENGTH OF TRIGGER PHRASE AND ENTITY PHRASE
max_ent_len = language_hyperparameters[language]['max_ent_len'] #BENGALI 28 HINDI 68
max_trig_len = language_hyperparameters[language]['max_trig_len'] #BENGALI 7 HINDI 22
for i in range(0, sent_len):
for j in range(i, min(sent_len, i + max_trig_len)):
if arg1start_preds[i] * arg1end_preds[j] > arg1_prob:
arg1_prob = arg1start_preds[i] * arg1end_preds[j]
arg1start = i
arg1end = j
arg2_prob = -1.0
arg2start = -1
arg2end = -1
for i in range(0, arg1start):
for j in range(i, min(arg1start, i + max_ent_len)):
if arg2start_preds[i] * arg2end_preds[j] > arg2_prob:
arg2_prob = arg2start_preds[i] * arg2end_preds[j]
arg2start = i
arg2end = j
for i in range(arg1end + 1, sent_len):
for j in range(i, min(sent_len, i + max_ent_len)):
if arg2start_preds[i] * arg2end_preds[j] > arg2_prob:
arg2_prob = arg2start_preds[i] * arg2end_preds[j]
arg2start = i
arg2end = j
arg2_prob1 = -1.0
arg2start1 = -1
arg2end1 = -1
for i in range(0, sent_len):
for j in range(i, min(sent_len, i + max_ent_len)):
if arg2start_preds[i] * arg2end_preds[j] > arg2_prob1:
arg2_prob1 = arg2start_preds[i] * arg2end_preds[j]
arg2start1 = i
arg2end1 = j
arg1_prob1 = -1.0
arg1start1 = -1
arg1end1 = -1
for i in range(0, arg2start1):
for j in range(i, min(arg2start1, i + max_trig_len)):
if arg1start_preds[i] * arg1end_preds[j] > arg1_prob1:
arg1_prob1 = arg1start_preds[i] * arg1end_preds[j]
arg1start1 = i
arg1end1 = j
for i in range(arg2end1 + 1, sent_len):
for j in range(i, min(sent_len, i + max_trig_len)):
if arg1start_preds[i] * arg1end_preds[j] > arg1_prob1:
arg1_prob1 = arg1start_preds[i] * arg1end_preds[j]
arg1start1 = i
arg1end1 = j
if arg1_prob * arg2_prob > arg1_prob1 * arg2_prob1:
return arg1start, arg1end, arg2start, arg2end
else:
return arg1start1, arg1end1, arg2start1, arg2end1
# In[118]:
def get_pred_triples(rel, arg1s, arg1e, arg2s, arg2e, eTypes, aTypes, src_words):
touples = []
all_touples = []
for i in range(0, len(arg1s)):
s1, e1, s2, e2 = get_answer_pointers(
arg1s[i], arg1e[i], arg2s[i], arg2e[i], len(src_words))
if s1 == 0 or e1 == 0:
break
# r = np.argmax(rel[i][1:]) + 1
# event type can not be <pad> or <None>
ev = np.argmax(eTypes[i][1:]) + 1
at = np.argmax(aTypes[i][1:]) + 1
arg1 = ' '.join(src_words[s1: e1 + 1])
arg2 = ' '.join(src_words[s2: e2 + 1])
arg1 = arg1.strip()
arg2 = arg2.strip()
if arg1 == arg2:
continue
# touplet = (arg1, eventIdxToName[ev], arg2,
# argIdxToName[at], relIdxToName[r])
touplet = (arg1, eventIdxToName[ev], arg2,
argIdxToName[at])
# same (trigger, argument) pair can not have two different role
if (touplet[0], touplet[2]) in [(t[0], t[2]) for t in touples]:
continue
# if (touplet[0], touplet[1], touplet[2]) in [(t[0], t[1], t[2]) for t in touples]:
# continue
all_touples.append(touplet)
if not is_full_match(touplet, touples):
touples.append(touplet)
'''
for i in range(0, len(eTypes)):
r = np.argmax(rel[i][1:]) + 1
if r == relnameToIdx['None']:
break
s1, e1, s2, e2 = get_answer_pointers(arg1s[i], arg1e[i], arg2s[i], arg2e[i], len(src_words))
arg1 = ' '.join(src_words[s1: e1 + 1])
arg2 = ' '.join(src_words[s2: e2 + 1])
arg1 = arg1.strip()
arg2 = arg2.strip()
if arg1 == arg2:
continue
triplet = (arg1, arg2, relIdxToName[r])
all_triples.append(triplet)
if not is_full_match(triplet, triples):
triples.append(triplet)
'''
return touples, all_touples
# In[119]:
def get_F1(data, preds):
gt_pos = 0
pred_pos = 0
total_pred_pos = 0
correct_pos = 0
for i in range(0, len(data)):
# [2,45,67,10],[2,5,13,7],[(1,2,6,7),(7,8,10,10),..],[23,33,1,8]
# gt_triples = get_gt_triples(
# data[i].SrcWords, data[i].TrgRels, data[i].TrgPointers, data[i].eventTypes, data[i].argTypes)
gt_triples = get_gt_triples(
data[i].SrcWords, None, data[i].TrgPointers, data[i].eventTypes, data[i].argTypes)
#NEED INDEX CHANGES, 0-RELATIONS
# pred_triples, all_pred_triples = get_pred_triples(preds[0][i], preds[1][i], preds[2][i], preds[3][i],
# preds[4][i], preds[5][i], preds[6][i], data[i].SrcWords)
pred_triples, all_pred_triples = get_pred_triples(None,preds[0][i], preds[1][i], preds[2][i],
preds[3][i], preds[4][i], preds[5][i], data[i].SrcWords)
total_pred_pos += len(all_pred_triples)#predicted all tuples
gt_pos += len(gt_triples)#actual tuples in data
pred_pos += len(pred_triples)#predicted unique tuples
for gt_triple in gt_triples:
if is_full_match(gt_triple, pred_triples):
correct_pos += 1
print(total_pred_pos)
return pred_pos, gt_pos, correct_pos
def write_test_res(data, actual_sent, actual_data, preds, outfile):
writer = open(outfile, 'w')
for i in range(0, len(data)):
writer.write('Sentence= ' + actual_sent[i])
writer.write('\n')
writer.write('Actual= ' + actual_data[i])
writer.write('\n')
pred_triples, _ = get_pred_triples(
preds[0][i], preds[1][i], preds[2][i], preds[3][i], preds[4][i], preds[5][i], preds[6][i], data[i].SrcWords)
pred_triples_str = []
for pt in pred_triples:
pred_triples_str.append(
pt[0] + ' ; ' + pt[1] + ' ; ' + pt[2] + ' ; ' + pt[3] + ' ; ' + pt[4])
writer.write('predicted: ')
writer.write(' | '.join(pred_triples_str) + '\n\n\n')
writer.close()
def load_word_embedding(embed_file, vocab):
'''
vocab: all the uniq words present in the doc
embed_file: pretrained word embedding path
'''
#print('vocab length:', len(vocab))
custom_print('vocab length:', len(vocab))
embed_vocab = OrderedDict() # dictionar containing all the words and word_index
embed_matrix = list()
embed_vocab['<PAD>'] = 0
embed_matrix.append(np.zeros(word_embed_dim, dtype=np.float32))
embed_vocab['<UNK>'] = 1
embed_matrix.append(np.random.uniform(-0.25, 0.25, word_embed_dim))
word_idx = 2
with open(embed_file, "r") as f:
for line in f:
parts = line.split()
if len(parts) < word_embed_dim + 1:
continue
word = parts[0]
if word in vocab and vocab[word] >= word_min_freq:
vec = [np.float32(val) for val in parts[1:]]
embed_matrix.append(vec)
embed_vocab[word] = word_idx
word_idx += 1
for word in vocab:
if word not in embed_vocab and vocab[word] >= word_min_freq:
embed_matrix.append(
|
np.random.uniform(-0.25, 0.25, word_embed_dim)
|
numpy.random.uniform
|
import torch
from torch.autograd import Variable
import numpy as np
from skimage.io import (imread,
imsave)
from skimage.transform import resize
def gkern(l=5, sig=1.):
"""
Creates gaussian kernel with side length l and a sigma of sig.
Acknowledgement: https://stackoverflow.com/users/6465762/clemisch
"""
ax = np.arange(-l // 2 + 1., l // 2 + 1.)
xx, yy = np.meshgrid(ax, ax)
kernel = np.exp(-(xx**2 + yy**2) / (2. * sig**2))
return kernel / np.sum(kernel)
def get_fm_for_xy(x,y):
"""
Return a feature map corresponding to a keypt at
location (x,y).
"""
fm = np.zeros((128,128))
gauss_len = 8
gauss_std = 1 # 2
#x,y = 64, 64
kern = gkern(l=gauss_len, sig=gauss_std)
# The kernel may be bigger than the region
# of the image it is applied to, so crop it
# here if necessary.
xh, xw = fm[y-(gauss_len//2):y+(gauss_len//2),
x-(gauss_len//2):x+(gauss_len//2)].shape
kern = kern[0:xh,0:xw]
fm[y-(gauss_len//2):y+(gauss_len//2),
x-(gauss_len//2):x+(gauss_len//2)] += kern
return fm
def read_kpt_file(filename, sep=","):
"""Return np array of keypts"""
kpts = open(filename).read().split("\n")[0:-1]
kpts = [ elem.split(sep) for elem in kpts ]
num_cols = len(kpts[0])
for entry in kpts:
for i in range(num_cols):
entry[i] = float(entry[i])
kpts = np.asarray(kpts)
return kpts
def get_data_from_id(root, mode, id_):
"""
Returns:
- img_downsized: this is the image in 128px res.
- y_keypts: the keypts in range [0, 1]. To plot
these, multiply by 128., and overlay these on
img_downsized.
- z_keypts: the z keypoints normalised.
"""
img = imread("%s/%s_img/%s.jpg" % (root,mode,id_))
keypts = read_kpt_file("%s/%s_lm/%s_lm.csv" % (root,mode,id_))
# We want the img + keypts in 128x128px img so preproc them
# accordingly.
img_downsized = resize(img, (128,128))
y_keypts = np.copy(keypts)[:,0:2]
y_keypts[:,0] = y_keypts[:,0] / float(img.shape[1]) # x's
y_keypts[:,1] = y_keypts[:,1] / float(img.shape[0]) # y's
avg_sz = (img.shape[0]+img.shape[1]) / 2.
z_keypts = keypts[:,2] / avg_sz # what range??
return img_downsized, y_keypts, z_keypts
def construct_A(src_kps, src_z_pred):
K = 66
bs = src_kps.shape[0]
# TODO: make more efficient
A = np.zeros((bs, K*2, 8))
for b in range(bs):
c = 0
for i in range(0, A.shape[1]-1, 2):
A[b, i, 0] = src_kps[b, 0, c] # xi
A[b, i, 1] = src_kps[b, 1, c] # yi
#A[i,2] = z_pred[c] # zi
A[b, i, -2] = 1.
#
A[b, i+1, 3] = src_kps[b, 0, c] # xi
A[b, i+1, 4] = src_kps[b, 1, c] # yi
#A[i+1,6] = z_pred[c] # zi
A[b, i+1, -1] = 1.
c += 1
A = torch.from_numpy(A).float()
if src_z_pred.is_cuda:
A = A.cuda()
for b in range(bs):
c = 0
for i in range(0, A.size(1)-1, 2):
A[b, i, 2] = src_z_pred[b, 0, c] # zi
A[b, i+1, 5] = src_z_pred[b, 0, c] # zi
c += 1
return A
def predict_tgt_kp_pseudoinv(xy_keypt_src,
pred_src_z,
xy_keypt_tgt):
"""
Given src keypts, predicted depths, and tgt keypts,
construct a baseline estimate of the predicted
tgt keypoints through the pseudo-inverse (fixed m)
formulation in the paper.
xy_keypt_src: (bs, 66, 2) in numpy
pred_src_z: (bs, 1, 66) in Torch
xy_keypt_tgt: (bs, 66, 2) in numpy
"""
# TODO
assert xy_keypt_src.shape[0] == 1
assert xy_keypt_tgt.shape[0] == 1
# TODO
A = construct_A(xy_keypt_src.swapaxes(1,2),
pred_src_z)
tgt_kps_f = xy_keypt_tgt.swapaxes(1,2).reshape((1, 2*66), order='F')
xt = torch.from_numpy(tgt_kps_f).float()
X1 = [torch.inverse(mat) for mat in
torch.matmul(A.transpose(2, 1), A)]
X1 = torch.stack(X1)
X2 = torch.bmm(A.transpose(2, 1), xt.unsqueeze(2))
m = torch.bmm(X1, X2) # (bs,8,1)
bs = xy_keypt_src.shape[0]
m_rshp = torch.cat((m[:, 0:6, :].reshape((bs, 2, 3)),
m[:, [6, 7], :].reshape((bs, 2, 1))),
dim=2)
ones = torch.ones((1, 1, 66)).float()
xy_keypt_src_torch = torch.from_numpy(xy_keypt_src).float()
xy_keypt_src_torch = xy_keypt_src_torch.transpose(1,2)
rht = torch.cat((xy_keypt_src_torch,
pred_src_z,
ones), dim=1)
rhs = torch.matmul(m_rshp, rht)
return rhs
def convert_keypts_66_to_68(arr):
kps_68 = np.zeros((68, 2))
kps_68[0:60] = arr[0:60] # kpts 1 to 60 is kypts 1 to 60
kps_68[60] = (arr[60-1]+arr[50-1]) / 2. # kpt 61 is the avg of kpts 60 and 50
kps_68[61] = arr[60] # kpt 62 is keypt 61
kps_68[62] = arr[61] # kpt 63 is keypt 62
kps_68[63] = arr[62] # kpt 64 is keypt 63
kps_68[64] = (arr[54-1] + arr[56-1]) / 2. # kpt 65 is the avg of kpts 54 and 56
kps_68[65] = arr[63] # kpt 66 is keypt 64
kps_68[66] = arr[64] # kpt 67 is keypt 65
kps_68[67] = arr[65] # kpt 68 is keypt 66
return kps_68
def convert_depth_66_to_68(arr):
d_68 = np.zeros((68,))
d_68[0:60] = arr[0:60] # kpts 1 to 60 is kypts 1 to 60
d_68[60] = (arr[60-1]+arr[50-1]) / 2. # kpt 61 is the avg of kpts 60 and 50
d_68[61] = arr[60] # kpt 62 is keypt 61
d_68[62] = arr[61] # kpt 63 is keypt 62
d_68[63] = arr[62] # kpt 64 is keypt 63
d_68[64] = (arr[54-1] + arr[56-1]) / 2. # kpt 65 is the avg of kpts 54 and 56
d_68[65] = arr[63] # kpt 66 is keypt 64
d_68[66] = arr[64] # kpt 67 is keypt 65
d_68[67] = arr[65] # kpt 68 is keypt 66
return d_68
def shift_matrix(shift):
mat = np.eye(4)
mat[0,-1] = shift
mat[1,-1] = shift
return mat
def scale_matrix(scale):
mat = np.eye(4)
mat[0,0] = scale
mat[1,1] = scale
return mat
def rot_matrix_x(theta):
"""
theta: measured in radians
"""
mat = np.zeros((3,3)).astype(np.float32)
mat[0, 0] = 1.
mat[1, 1] =
|
np.cos(theta)
|
numpy.cos
|
# Copyright 2021, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Minimal random coding (likelihood decoder) definitions.
This is the code to be used to simulate the minimum random coding algorithm. The
algorithm was introduced by Havasi et al. in "Minimal Random Code Learning:
Getting Bits Back from Compressed Model Parameters" -
https://arxiv.org/pdf/1810.00440.pdf.
For brevity, we may refer to it as 'MIRACLE', although technically this refers
to the complete model compression pipeline of Havasi et al.
"""
import numpy as np
def encoder(seed, x, number_candidates, c1, c2, gamma):
"""This is the encoder used by the miracle algorithm.
Args:
seed: The random seed to be used by the encoder.
x: The 1-dimensional input data.
number_candidates: The number of candidates to be sampled.
c1: The larger constant that the privunit density is proportional to.
c2: The smaller constant that the privunit density is proportional to.
gamma: The gamma parameter of privunit.
Returns:
k: The index sampled by the encoder.
z: The set of candidates sampled at the encoder.
pi: The distribution over the candidates for the given input data x.
"""
if x.ndim > 1:
raise ValueError(f"x must be a vector, got shape {x.shape}.")
d = x.shape[0]
rs = np.random.RandomState(seed)
# The proposal distribution is chosen to be uniform on surface of the sphere.
z = rs.normal(0, 1, (d, number_candidates))
z /= np.linalg.norm(z, axis=0)
pi = np.where(np.dot(x, z) >= gamma, c1, c2)
pi /=
|
np.sum(pi)
|
numpy.sum
|
import csv
import os
import struct
from types import SimpleNamespace
import numpy as np
from tqdm import tqdm
from .utils import get_files_recursively, patch_tqdm, print_error
LAYER_PARAMETERS = {
0: {
'a_step': 90 / 2 ** 22,
'n_sectors': 7,
'f_size1': 372736,
'f_offset1': 4097,
'f_size2': 352256,
'f_offset2': 4097,
'f_size3': -1,
'f_offset3': 0,
'f_size4': -1,
'f_offset4': 0,
'l_size': 256,
'l_size2': 256,
},
1: {
'a_step': 90 / 2 ** 21,
'n_sectors': 3,
'f_size1': 372736,
'f_offset1': 266241,
'f_size2': 352256,
'f_offset2': 266241,
'f_size3': 110592,
'f_offset3': 4097,
'f_size4': 90112,
'f_offset4': 4097,
'l_size': 128,
'l_size2': 128,
},
2: {
'a_step': 90 / 2 ** 20,
'n_sectors': 1,
'f_size1': 372736,
'f_offset1': 331777,
'f_size2': 352256,
'f_offset2': 331777,
'f_size3': 110592,
'f_offset3': 69633,
'f_size4': 90112,
'f_offset4': 69633,
'l_size': 64,
'l_size2': 64,
},
3: {
'a_step': 90 / 2 ** 19,
'n_sectors': 0,
'f_size1': 372736,
'f_offset1': 348161,
'f_size2': 352256,
'f_offset2': 348161,
'f_size3': 110592,
'f_offset3': 86017,
'f_size4': 90112,
'f_offset4': 86017,
'l_size': 32,
'l_size2': 32,
},
4: {
'a_step': 90 / 2 ** 18,
'n_sectors': 1,
'f_size1': 372736,
'f_offset1': 352257,
'f_size2': -1,
'f_offset2': 0,
'f_size3': 110592,
'f_offset3': 90113,
'f_size4': -1,
'f_offset4': 0,
'l_size': 64,
'l_size2': 16,
},
5: {
'a_step': 90 / 2 ** 17,
'n_sectors': 0,
'f_size1': 372736,
'f_offset1': 368641,
'f_size2': -1,
'f_offset2': 0,
'f_size3': 110592,
'f_offset3': 106497,
'f_size4': -1,
'f_offset4': 0,
'l_size': 32,
'l_size2': 8,
},
}
def run_cli(qdc_folder_path, output_path, layer, validity_codes, quite, x_correction, y_correction, z_correction,
csv_delimiter, csv_skip_headers, csv_yxz, message_queue=None):
# Patch tqdm to duplicate messages up to the passed message queue.
if message_queue:
patch_tqdm(tqdm, message_queue)
try:
# Some arguments validation
output_path_ext = os.path.splitext(output_path)[-1]
if output_path_ext.lower() not in ('.csv', '.grd'):
raise ValueError(_('Output file extension must be *.csv (CSV table) or *.grd (ESRI ASCII grid)'))
layer_parameters = SimpleNamespace(**LAYER_PARAMETERS[layer])
qdc_files = get_files_recursively(qdc_folder_path, '.qdc')
# Calculate boundaries
x_min, y_min = 32000, 32000
x_max, y_max = -32000, -32000
for qdc_file in qdc_files:
qdc_file_size = os.path.getsize(qdc_file)
if qdc_file_size in (layer_parameters.f_size1, layer_parameters.f_size2,
layer_parameters.f_size3, layer_parameters.f_size4):
with open(qdc_file, 'rb') as f_qdc:
f_qdc.seek(164)
val = struct.unpack('<h', f_qdc.read(2))[0]
x_min = min(val, x_min)
x_max = max(val, x_max)
f_qdc.seek(160)
val = struct.unpack('<h', f_qdc.read(2))[0]
y_min = min(val, y_min)
y_max = max(val, y_max)
if x_min == 32000 or y_min == 32000 \
or x_max == -32000 or y_max == -32000:
raise RuntimeError(_('No valid QDC files found!'))
x_size = (x_max - x_min + 1) * layer_parameters.l_size - 1
y_size = (y_max - y_min + 1) * layer_parameters.l_size - 1
arr_depth = np.zeros((x_size + 1, y_size + 1), dtype=np.int16)
# Calculate depth array
for qdc_file in tqdm(qdc_files, desc=_('Calculating depth map'), disable=quite):
qdc_file_size = os.path.getsize(qdc_file)
if qdc_file_size in (layer_parameters.f_size1, layer_parameters.f_size2,
layer_parameters.f_size3, layer_parameters.f_size4):
with open(qdc_file, 'rb') as f_qdc:
f_qdc.seek(164)
val = struct.unpack('<h', f_qdc.read(2))[0]
x_orig = (val - x_min) * layer_parameters.l_size2
f_qdc.seek(160)
val = struct.unpack('<h', f_qdc.read(2))[0]
y_orig = (val - y_min) * layer_parameters.l_size2
if qdc_file_size == layer_parameters.f_size1:
i = layer_parameters.f_offset1
elif qdc_file_size == layer_parameters.f_size2:
i = layer_parameters.f_offset2
elif qdc_file_size == layer_parameters.f_size3:
i = layer_parameters.f_offset3
elif qdc_file_size == layer_parameters.f_size4:
i = layer_parameters.f_offset4
for yy in range(layer_parameters.n_sectors + 1):
for xx in range(layer_parameters.n_sectors + 1):
for y in range(32):
for x in range(32):
x_abs = xx * 32 + x + x_orig
y_abs = yy * 32 + y + y_orig
f_qdc.seek(i + 1)
val_code = struct.unpack('<h', f_qdc.read(2))[0] # Read validity code
if validity_codes: # Write validity codes to array instead of depth
arr_depth[x_abs, y_abs] = val_code
else:
if val_code != 0:
f_qdc.seek(i - 1)
val_depth = struct.unpack('<h', f_qdc.read(2))[0] # Read depth in cm
arr_depth[x_abs, y_abs] = val_depth
i += 4
x_orig = x_min * 90 / 2 ** 14
y_orig = y_min * 90 / 2 ** 14
f_fix = lambda x: np.sign(x) * np.int16(
|
np.abs(x)
|
numpy.abs
|
from spb.defaults import cfg
from spb.backends.base_backend import Plot
from spb.backends.utils import compute_streamtubes
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib.collections import LineCollection
from matplotlib.colors import ListedColormap, Normalize
import mpl_toolkits
from mpl_toolkits.mplot3d.art3d import Line3DCollection, Path3DCollection
import numpy as np
from mergedeep import merge
import itertools
"""
TODO:
1. Besides the axis on the center of the image, there are also a couple of
axis with ticks on the bottom/right sides. Delete them?
"""
# Global variable
# Set to False when running tests / doctests so that the plots don't show.
_show = True
def unset_show():
"""
Disable show(). For use in the tests.
"""
global _show
_show = False
def _matplotlib_list(interval_list):
"""
Returns lists for matplotlib ``fill`` command from a list of bounding
rectangular intervals
"""
xlist = []
ylist = []
if len(interval_list):
for intervals in interval_list:
intervalx = intervals[0]
intervaly = intervals[1]
xlist.extend(
[intervalx.start, intervalx.start, intervalx.end, intervalx.end, None]
)
ylist.extend(
[intervaly.start, intervaly.end, intervaly.end, intervaly.start, None]
)
else:
# XXX Ugly hack. Matplotlib does not accept empty lists for ``fill``
xlist.extend([None, None, None, None])
ylist.extend([None, None, None, None])
return xlist, ylist
class MatplotlibBackend(Plot):
"""
A backend for plotting SymPy's symbolic expressions using Matplotlib.
Parameters
==========
aspect : (float, float) or str, optional
Set the aspect ratio of the plot. Possible values:
* ``'auto'``: Matplotlib will fit the plot in the vibile area.
* ``"equal"``: sets equal spacing on the axis of a 2D plot.
* tuple containing 2 float numbers, from which the aspect ratio is
computed.
axis_center : (float, float) or str or None, optional
Set the location of the intersection between the horizontal and
vertical axis in a 2D plot. It can be:
* ``None``: traditional layout, with the horizontal axis fixed on the
bottom and the vertical axis fixed on the left. This is the default
value.
* a tuple ``(x, y)`` specifying the exact intersection point.
* ``'center'``: center of the current plot area.
* ``'auto'``: the intersection point is automatically computed.
contour_kw : dict, optional
A dictionary of keywords/values which is passed to Matplotlib's
contour/contourf function to customize the appearance.
Refer to [#fn1]_ to learn more about customization.
image_kw : dict, optional
A dictionary of keywords/values which is passed to Matplotlib's
imshow function to customize the appearance.
Refer to [#fn2]_ to learn more about customization.
line_kw : dict, optional
A dictionary of keywords/values which is passed to Matplotlib's plot
functions to customize the appearance of the lines.
To learn more about customization:
* Refer to [#fn3]_ if the plot is using solid colors.
* Refer to [#fn4]_ if the plot is using color maps.
quiver_kw : dict, optional
A dictionary of keywords/values which is passed to Matplotlib's quivers
function to customize the appearance.
Refer to [#fn5]_ to learn more about customization.
surface_kw : dict, optional
A dictionary of keywords/values which is passed to Matplotlib's
surface function to customize the appearance.
Refer to [#fn6]_ to learn more about customization.
stream_kw : dict, optional
A dictionary of keywords/values which is passed to Matplotlib's
streamplot function to customize the appearance.
Refer to [#fn7]_ to learn more about customization.
use_cm : boolean, optional
If True, apply a color map to the mesh/surface or parametric lines.
If False, solid colors will be used instead. Default to True.
References
==========
.. [#fn1] https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.contourf.html
.. [#fn2] https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.imshow.html
.. [#fn3] https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.plot.html
.. [#fn4] https://matplotlib.org/stable/api/collections_api.html#matplotlib.collections.LineCollection
.. [#fn5] https://matplotlib.org/stable/api/quiver_api.html#module-matplotlib.quiver
.. [#fn6] https://matplotlib.org/stable/api/_as_gen/mpl_toolkits.mplot3d.axes3d.Axes3D.html#mpl_toolkits.mplot3d.axes3d.Axes3D.plot_surface
.. [#fn7] https://matplotlib.org/stable/api/_as_gen/matplotlib.axes.Axes.streamplot.html#matplotlib.axes.Axes.streamplot
See also
========
Plot, PlotlyBackend, BokehBackend, K3DBackend
"""
_library = "matplotlib"
colormaps = [
cm.viridis, cm.autumn, cm.winter, cm.plasma, cm.jet,
cm.gnuplot, cm.brg, cm.coolwarm, cm.cool, cm.summer,
]
cyclic_colormaps = [cm.twilight, cm.hsv]
def __new__(cls, *args, **kwargs):
# Since Plot has its __new__ method, this will prevent infinite
# recursion
return object.__new__(cls)
def __init__(self, *args, **kwargs):
# set global options like title, axis labels, ...
super().__init__(*args, **kwargs)
# add colors if needed
if (len([s for s in self._series if s.is_2Dline]) > 10) and (
self.colorloop == cm.tab10
):
self.colorloop = cm.tab20
if self.axis_center is None:
self.axis_center = cfg["matplotlib"]["axis_center"]
# see self._add_handle for more info about the following dictionary
self._handles = dict()
def _init_cyclers(self):
super()._init_cyclers()
# For flexibily, spb.backends.utils.convert_colormap returns numpy
# ndarrays whenever plotly/colorcet/k3d color map are given. Here we
# create ListedColormap that can be used by Matplotlib
def process_iterator(it, colormaps):
cm = []
for i in range(len(colormaps)):
c = next(it)
cm.append(c if not isinstance(c, np.ndarray) else ListedColormap(c))
return itertools.cycle(cm)
self._cm = process_iterator(self._cm, self.colormaps)
self._cyccm = process_iterator(self._cyccm, self.cyclic_colormaps)
def _create_figure(self):
# the following import is here in order to avoid a circular import error
from spb.defaults import cfg
use_jupyterthemes = cfg["matplotlib"]["use_jupyterthemes"]
mpl_jupytertheme = cfg["matplotlib"]["jupytertheme"]
if (self._get_mode() == 0) and use_jupyterthemes:
# set matplotlib style to match the used Jupyter theme
try:
from jupyterthemes import jtplot
jtplot.style(mpl_jupytertheme)
except:
pass
is_3Dvector = any([s.is_3Dvector for s in self.series])
aspect = self.aspect
if aspect != "auto":
if aspect == "equal" and is_3Dvector:
# plot_vector uses an aspect="equal" by default. In that case
# we would get:
# NotImplementedError: Axes3D currently only supports the aspect
# argument 'auto'. You passed in 1.0.
# This fixes it
aspect = "auto"
elif aspect == "equal":
aspect = 1.0
if any(s.is_3D for s in self.series):
# for 3D plots, aspect must be "auto"
aspect = "auto"
else:
aspect = float(aspect[1]) / aspect[0]
if self._kwargs.get("fig", None) is not None:
# We assume we are generating a PlotGrid object, hence the figure
# and the axes are provided by the user.
self._fig = self._kwargs.pop("fig", None)
self.ax = self._kwargs.pop("ax", None)
else:
self._fig = plt.figure(figsize=self.size)
is_3D = [s.is_3D for s in self.series]
if any(is_3D) and (not all(is_3D)):
raise ValueError("The matplotlib backend can not mix 2D and 3D.")
kwargs = dict(aspect=aspect)
if all(is_3D):
kwargs["projection"] = "3d"
self.ax = self._fig.add_subplot(1, 1, 1, **kwargs)
@property
def fig(self):
"""Returns the objects used to render/display the plots"""
return self._fig, self.ax
@staticmethod
def get_segments(x, y, z=None):
"""
Convert two list of coordinates to a list of segments to be used
with Matplotlib's LineCollection.
Parameters
==========
x: list
List of x-coordinates
y: list
List of y-coordinates
z: list
List of z-coordinates for a 3D line.
"""
if z is not None:
dim = 3
points = (x, y, z)
else:
dim = 2
points = (x, y)
points = np.ma.array(points).T.reshape(-1, 1, dim)
return np.ma.concatenate([points[:-1], points[1:]], axis=1)
def _add_colorbar(self, c, label, override=False):
"""Add a colorbar for the specificied collection
Keyword Aurguments
==================
override : boolean
For parametric plots the colorbar acts like a legend. Hence,
when legend=False we don't display the colorbar. However,
for contour plots the colorbar is essential to understand it.
Hence, to show it we set override=True.
Default to False.
"""
# design choice: instead of showing a legend entry (which
# would require to work with proxy artists and custom
# classes in order to create a gradient line), just show a
# colorbar with the name of the expression on the side.
if (self.legend and self._use_cm) or override:
# TODO: colorbar position? used space?
cb = self._fig.colorbar(c, ax=self.ax)
cb.set_label(label, rotation=90)
return True
return False
def _add_handle(self, i, h, kw=None, *args):
"""self._handle is a dictionary where:
key: integer corresponding to the i-th series.
value: a list of two elements:
1. handle of the object created by Matplotlib commands
2. optionally, keyword arguments used to create the handle.
Some object can't be updated, hence we need to reconstruct
it from scratch at every update.
3. anything else needed to reconstruct the object.
This dictionary will be used with iplot
"""
self._handles[i] = [h if not isinstance(h, (list, tuple)) else h[0], kw, *args]
def _process_series(self, series):
# XXX Workaround for matplotlib issue
# https://github.com/matplotlib/matplotlib/issues/17130
xlims, ylims, zlims = [], [], []
self.ax.cla()
self._init_cyclers()
for i, s in enumerate(series):
if s.is_2Dline:
line_kw = self._kwargs.get("line_kw", dict())
if s.is_parametric and self._use_cm:
x, y, param = s.get_data()
x, y, _ = self._detect_poles(x, y)
colormap = (
next(self._cyccm)
if self._use_cyclic_cm(param, s.is_complex)
else next(self._cm)
)
lkw = dict(array=param, cmap=colormap)
kw = merge({}, lkw, line_kw)
segments = self.get_segments(x, y)
c = LineCollection(segments, **kw)
self.ax.add_collection(c)
is_cb_added = self._add_colorbar(c, s.label)
self._add_handle(i, c, kw, is_cb_added, self._fig.axes[-1])
else:
if s.is_parametric:
x, y, param = s.get_data()
else:
x, y = s.get_data()
x, y, _ = self._detect_poles(x, y)
lkw = dict(label=s.label, color=next(self._cl))
if s.is_point:
lkw["marker"] = "o"
lkw["linestyle"] = "None"
l = self.ax.plot(x, y, **merge({}, lkw, line_kw))
self._add_handle(i, l)
elif s.is_contour and (not s.is_complex):
x, y, z = s.get_data()
ckw = dict(cmap=next(self._cm))
contour_kw = self._kwargs.get("contour_kw", dict())
kw = merge({}, ckw, contour_kw)
c = self.ax.contourf(x, y, z, **kw)
self._add_colorbar(c, s.label, True)
self._add_handle(i, c, kw, self._fig.axes[-1])
elif s.is_contour and s.is_complex:
# this is specifically tailored to create cplot-like contour
# lines for magnitude/argument of a complex function.
x, y, z, r = s.get_data()
ckw = dict(colors="#a0a0a050", linestyles="solid")
contour_kw = self._kwargs.get("contour_kw", dict())
kw = merge({}, ckw, contour_kw)
levels = contour_kw.pop("levels", None)
if levels is None:
if s.abs:
ckw["levels"] = s.abs_levels
kw = merge({}, ckw, contour_kw)
c = self.ax.contour(x, y, z, **kw)
else:
levels = s.arg_levels
if len(levels) > 0:
c = self.ax.contour(
x, y, s.angle_func(r), levels=levels, **kw
)
for level, allseg in zip(levels, c.allsegs):
for segment in allseg:
xx, yy = segment.T
zz = xx + 1j * yy
angle = s.angle_func(s.function(zz))
# cut off segments close to the branch cut
is_near_branch_cut = np.logical_or(
*[
np.abs(angle - bc) < np.abs(angle - level)
for bc in s.angle_range
]
)
segment[is_near_branch_cut] = np.nan
self._add_handle(i, c, kw, self._fig.axes[-1])
elif s.is_3Dline:
x, y, z, param = s.get_data()
lkw = dict()
line_kw = self._kwargs.get("line_kw", dict())
if len(x) > 1:
if self._use_cm:
segments = self.get_segments(x, y, z)
lkw["cmap"] = next(self._cm)
lkw["array"] = param
c = Line3DCollection(segments, **merge({}, lkw, line_kw))
self.ax.add_collection(c)
self._add_colorbar(c, s.label)
self._add_handle(i, c)
else:
lkw["label"] = s.label
l = self.ax.plot(x, y, z, **merge({}, lkw, line_kw))
self._add_handle(i, l)
else:
# 3D points
lkw["label"] = s.label
lkw["color"] = next(self._cl)
l = self.ax.scatter(x, y, z, **merge({}, lkw, line_kw))
self._add_handle(i, l)
xlims.append((np.amin(x), np.amax(x)))
ylims.append((
|
np.amin(y)
|
numpy.amin
|
"""Urban Flood Risk Mitigation model."""
import logging
import os
from osgeo import gdal
from osgeo import ogr
from osgeo import osr
import pygeoprocessing
import taskgraph
import numpy
import scipy
import rtree
import shapely.wkb
import shapely.prepared
from . import validation
from . import utils
LOGGER = logging.getLogger(__name__)
ARGS_SPEC = {
"model_name": "Urban Flood Risk Mitigation",
"module": __name__,
"userguide_html": "urban_flood_risk_mitigation.html",
"args_with_spatial_overlap": {
"spatial_keys": ["aoi_watersheds_path", "lulc_path",
"built_infrastructure_vector_path",
"soils_hydrological_group_raster_path"],
"different_projections_ok": True,
},
"args": {
"workspace_dir": validation.WORKSPACE_SPEC,
"results_suffix": validation.SUFFIX_SPEC,
"n_workers": validation.N_WORKERS_SPEC,
"aoi_watersheds_path": {
"type": "vector",
"required": True,
"about": (
"Path to a vector of (sub)watersheds or sewersheds used to "
"indicate spatial area of interest."),
"name": "Watershed Vector"
},
"rainfall_depth": {
"validation_options": {
"expression": "value > 0",
},
"type": "number",
"required": True,
"about": "Depth of rainfall in mm.",
"name": "Depth of rainfall in mm"
},
"lulc_path": {
"type": "raster",
"validation_options": {
"projected": True,
},
"required": True,
"about": "Path to a landcover raster",
"name": "Landcover Raster"
},
"soils_hydrological_group_raster_path": {
"type": "raster",
"required": True,
"validation_options": {
"projected": True,
},
"about": (
"Raster with values equal to 1, 2, 3, 4, corresponding to "
"soil hydrologic group A, B, C, or D, respectively (used to "
"derive the CN number)"),
"name": "Soils Hydrological Group Raster"
},
"curve_number_table_path": {
"validation_options": {
"required_fields": ["lucode", "CN_A", "CN_B", "CN_C", "CN_D"],
},
"type": "csv",
"required": True,
"about": (
"Path to a CSV table that to map landcover codes to curve "
"numbers and contains at least the headers 'lucode', "
"'CN_A', 'CN_B', 'CN_C', 'CN_D'"),
"name": "Biophysical Table"
},
"built_infrastructure_vector_path": {
"validation_options": {
"required_fields": ["type"],
},
"type": "vector",
"required": False,
"about": (
"Path to a vector with built infrastructure footprints. "
"Attribute table contains a column 'Type' with integers "
"(e.g. 1=residential, 2=office, etc.)."),
"name": "Built Infrastructure Vector"
},
"infrastructure_damage_loss_table_path": {
"validation_options": {
"required_fields": ["type", "damage"],
},
"type": "csv",
"required": "built_infrastructure_vector_path",
"about": (
"Path to a a CSV table with columns 'Type' and 'Damage' with "
"values of built infrastructure type from the 'Type' field "
"in the 'Built Infrastructure Vector' and potential damage "
"loss (in $/m^2). Required if the built infrastructure vector "
"is provided."),
"name": "Built Infrastructure Damage Loss Table"
}
}
}
def execute(args):
"""Urban Flood Risk Mitigation model.
The model computes the peak flow attenuation for each pixel, delineates
areas benefiting from this service, then calculates the monetary value of
potential avoided damage to built infrastructure.
Args:
args['workspace_dir'] (string): a path to the directory that will
write output and other temporary files during calculation.
args['results_suffix'] (string): appended to any output file name.
args['aoi_watersheds_path'] (string): path to a shapefile of
(sub)watersheds or sewersheds used to indicate spatial area of
interest.
args['rainfall_depth'] (float): depth of rainfall in mm.
args['lulc_path'] (string): path to a landcover raster.
args['soils_hydrological_group_raster_path'] (string): Raster with
values equal to 1, 2, 3, 4, corresponding to soil hydrologic group
A, B, C, or D, respectively (used to derive the CN number).
args['curve_number_table_path'] (string): path to a CSV table that
contains at least the headers 'lucode', 'CN_A', 'CN_B', 'CN_C',
'CN_D'.
args['built_infrastructure_vector_path'] (string): (optional) path to
a vector with built infrastructure footprints. Attribute table
contains a column 'Type' with integers (e.g. 1=residential,
2=office, etc.).
args['infrastructure_damage_loss_table_path'] (string): (optional)
path to a CSV table with columns 'Type' and 'Damage' with values
of built infrastructure type from the 'Type' field in
``args['built_infrastructure_vector_path']`` and potential damage
loss (in $/m^2).
args['n_workers'] (int): (optional) if present, indicates how many
worker processes should be used in parallel processing. -1
indicates single process mode, 0 is single process but
non-blocking mode, and >= 1 is number of processes.
Returns:
None.
"""
invalid_parameters = validate(args)
if invalid_parameters:
raise ValueError("Invalid parameters passed: %s" % invalid_parameters)
file_suffix = utils.make_suffix_string(args, 'results_suffix')
temporary_working_dir = os.path.join(
args['workspace_dir'], 'temp_working_dir_not_for_humans')
intermediate_dir = os.path.join(
args['workspace_dir'], 'intermediate_files')
utils.make_directories([
args['workspace_dir'], intermediate_dir, temporary_working_dir])
try:
n_workers = int(args['n_workers'])
except (KeyError, ValueError, TypeError):
# KeyError when n_workers is not present in args
# ValueError when n_workers is an empty string.
# TypeError when n_workers is None.
n_workers = -1 # Synchronous mode.
task_graph = taskgraph.TaskGraph(temporary_working_dir, n_workers)
# Align LULC with soils
aligned_lulc_path = os.path.join(
temporary_working_dir, 'aligned_lulc%s.tif' % file_suffix)
aligned_soils_path = os.path.join(
temporary_working_dir,
'aligned_soils_hydrological_group%s.tif' % file_suffix)
lulc_raster_info = pygeoprocessing.get_raster_info(
args['lulc_path'])
target_pixel_size = lulc_raster_info['pixel_size']
pixel_area = abs(target_pixel_size[0] * target_pixel_size[1])
target_sr_wkt = lulc_raster_info['projection']
soil_raster_info = pygeoprocessing.get_raster_info(
args['soils_hydrological_group_raster_path'])
align_raster_stack_task = task_graph.add_task(
func=pygeoprocessing.align_and_resize_raster_stack,
args=(
[args['lulc_path'], args['soils_hydrological_group_raster_path']],
[aligned_lulc_path, aligned_soils_path],
['mode', 'mode'],
target_pixel_size, 'intersection'),
kwargs={
'target_sr_wkt': target_sr_wkt,
'base_vector_path_list': [args['aoi_watersheds_path']],
'raster_align_index': 0},
target_path_list=[aligned_lulc_path, aligned_soils_path],
task_name='align raster stack')
# Load CN table
cn_table = utils.build_lookup_from_csv(
args['curve_number_table_path'], 'lucode')
# make cn_table into a 2d array where first dim is lucode, second is
# 0..3 to correspond to CN_A..CN_D
data = []
row_ind = []
col_ind = []
for lucode in cn_table:
data.extend([
cn_table[lucode]['cn_%s' % soil_id]
for soil_id in ['a', 'b', 'c', 'd']])
row_ind.extend([int(lucode)] * 4)
col_ind = [0, 1, 2, 3] * (len(row_ind) // 4)
lucode_to_cn_table = scipy.sparse.csr_matrix((data, (row_ind, col_ind)))
cn_nodata = -1
lucode_nodata = lulc_raster_info['nodata'][0]
soil_type_nodata = soil_raster_info['nodata'][0]
cn_raster_path = os.path.join(
temporary_working_dir, 'cn_raster%s.tif' % file_suffix)
align_raster_stack_task.join()
cn_raster_task = task_graph.add_task(
func=pygeoprocessing.raster_calculator,
args=(
[(aligned_lulc_path, 1), (aligned_soils_path, 1),
(lucode_nodata, 'raw'), (soil_type_nodata, 'raw'),
(cn_nodata, 'raw'), (lucode_to_cn_table, 'raw')], _lu_to_cn_op,
cn_raster_path, gdal.GDT_Float32, cn_nodata),
target_path_list=[cn_raster_path],
dependent_task_list=[align_raster_stack_task],
task_name='create Curve Number raster')
# Generate S_max
s_max_nodata = -9999
s_max_raster_path = os.path.join(
temporary_working_dir, 's_max%s.tif' % file_suffix)
s_max_task = task_graph.add_task(
func=pygeoprocessing.raster_calculator,
args=(
[(cn_raster_path, 1), (cn_nodata, 'raw'), (s_max_nodata, 'raw')],
_s_max_op, s_max_raster_path, gdal.GDT_Float32, s_max_nodata),
target_path_list=[s_max_raster_path],
dependent_task_list=[cn_raster_task],
task_name='create S_max')
# Generate Qpi
q_pi_nodata = -9999.
q_pi_raster_path = os.path.join(
args['workspace_dir'], 'Q_mm%s.tif' % file_suffix)
q_pi_task = task_graph.add_task(
func=pygeoprocessing.raster_calculator,
args=(
[(float(args['rainfall_depth']), 'raw'), (s_max_raster_path, 1),
(s_max_nodata, 'raw'), (q_pi_nodata, 'raw')], _q_pi_op,
q_pi_raster_path, gdal.GDT_Float32, q_pi_nodata),
target_path_list=[q_pi_raster_path],
dependent_task_list=[s_max_task],
task_name='create Q_mm.tif')
# Generate Runoff Retention
runoff_retention_nodata = -9999.
runoff_retention_raster_path = os.path.join(
args['workspace_dir'], 'Runoff_retention%s.tif' % file_suffix)
runoff_retention_task = task_graph.add_task(
func=pygeoprocessing.raster_calculator,
args=([
(q_pi_raster_path, 1), (float(args['rainfall_depth']), 'raw'),
(q_pi_nodata, 'raw'), (runoff_retention_nodata, 'raw')],
_runoff_retention_op, runoff_retention_raster_path,
gdal.GDT_Float32, runoff_retention_nodata),
target_path_list=[runoff_retention_raster_path],
dependent_task_list=[q_pi_task],
task_name='generate runoff retention')
# calculate runoff retention volume
runoff_retention_vol_raster_path = os.path.join(
args['workspace_dir'], 'Runoff_retention_m3%s.tif' % file_suffix)
runoff_retention_vol_task = task_graph.add_task(
func=pygeoprocessing.raster_calculator,
args=([
(runoff_retention_raster_path, 1),
(runoff_retention_nodata, 'raw'),
(float(args['rainfall_depth']), 'raw'),
(abs(target_pixel_size[0]*target_pixel_size[1]), 'raw'),
(runoff_retention_nodata, 'raw')], _runoff_retention_vol_op,
runoff_retention_vol_raster_path, gdal.GDT_Float32,
runoff_retention_nodata),
target_path_list=[runoff_retention_vol_raster_path],
dependent_task_list=[runoff_retention_task],
task_name='calculate runoff retention vol')
# calculate flood vol raster
flood_vol_raster_path = os.path.join(
intermediate_dir, 'Q_m3%s.tif' % file_suffix)
flood_vol_nodata = -1
flood_vol_task = task_graph.add_task(
func=pygeoprocessing.raster_calculator,
args=(
[(q_pi_raster_path, 1), (q_pi_nodata, 'raw'),
(pixel_area, 'raw'), (flood_vol_nodata, 'raw')],
_flood_vol_op, flood_vol_raster_path, gdal.GDT_Float32,
flood_vol_nodata),
target_path_list=[flood_vol_raster_path],
dependent_task_list=[q_pi_task],
task_name='calculate service built raster')
reprojected_aoi_path = os.path.join(
intermediate_dir, 'reprojected_aoi.gpkg')
reprojected_aoi_task = task_graph.add_task(
func=pygeoprocessing.reproject_vector,
args=(
args['aoi_watersheds_path'],
target_sr_wkt,
reprojected_aoi_path),
kwargs={'driver_name': 'GPKG'},
target_path_list=[reprojected_aoi_path],
task_name='reproject aoi/watersheds')
# Determine flood_volume over the watershed
flood_volume_in_aoi_task = task_graph.add_task(
func=pygeoprocessing.zonal_statistics,
args=(
(flood_vol_raster_path, 1),
reprojected_aoi_path),
dependent_task_list=[flood_vol_task, reprojected_aoi_task],
task_name='zonal_statistics over the flood_volume raster')
runoff_retention_stats_task = task_graph.add_task(
func=pygeoprocessing.zonal_statistics,
args=(
(runoff_retention_raster_path, 1),
reprojected_aoi_path),
dependent_task_list=[runoff_retention_task],
task_name='zonal_statistics over runoff_retention raster')
runoff_retention_volume_stats_task = task_graph.add_task(
func=pygeoprocessing.zonal_statistics,
args=(
(runoff_retention_vol_raster_path, 1),
reprojected_aoi_path),
dependent_task_list=[runoff_retention_vol_task],
task_name='zonal_statistics over runoff_retention_volume raster')
damage_per_aoi_stats = None
flood_volume_stats = flood_volume_in_aoi_task.get()
summary_tasks = [
flood_volume_in_aoi_task,
runoff_retention_stats_task,
runoff_retention_volume_stats_task]
if 'built_infrastructure_vector_path' in args and (
args['built_infrastructure_vector_path'] not in ('', None)):
# Reproject the built infrastructure vector to the target SRS.
reprojected_structures_path = os.path.join(
intermediate_dir, 'structures_reprojected.gpkg')
reproject_built_infrastructure_task = task_graph.add_task(
func=pygeoprocessing.reproject_vector,
args=(args['built_infrastructure_vector_path'],
target_sr_wkt,
reprojected_structures_path),
kwargs={'driver_name': 'GPKG'},
target_path_list=[reprojected_structures_path],
task_name='reproject built infrastructure to target SRS')
# determine the total damage to all infrastructure in the watershed/AOI
damage_to_infrastructure_in_aoi_task = task_graph.add_task(
func=_calculate_damage_to_infrastructure_in_aoi,
args=(reprojected_aoi_path,
reprojected_structures_path,
args['infrastructure_damage_loss_table_path']),
dependent_task_list=[
reprojected_aoi_task,
reproject_built_infrastructure_task],
task_name='calculate damage to infrastructure in aoi')
damage_per_aoi_stats = damage_to_infrastructure_in_aoi_task.get()
# It isn't strictly necessary for us to append this task to
# ``summary_tasks`` here, since the ``.get()`` calls below will block
# until those tasks complete. I'm adding these tasks ere anyways
# "just in case".
summary_tasks.append(damage_to_infrastructure_in_aoi_task)
summary_vector_path = os.path.join(
args['workspace_dir'], 'flood_risk_service%s.shp' % file_suffix)
_ = task_graph.add_task(
func=_write_summary_vector,
args=(reprojected_aoi_path,
summary_vector_path),
kwargs={
'runoff_ret_stats': runoff_retention_stats_task.get(),
'runoff_ret_vol_stats': runoff_retention_volume_stats_task.get(),
'damage_per_aoi_stats': damage_per_aoi_stats,
'flood_volume_stats': flood_volume_stats,
},
target_path_list=[summary_vector_path],
task_name='write summary stats to flood_risk_service.shp',
dependent_task_list=summary_tasks)
task_graph.close()
task_graph.join()
def _write_summary_vector(
source_aoi_vector_path, target_vector_path, runoff_ret_stats,
runoff_ret_vol_stats, flood_volume_stats, damage_per_aoi_stats=None):
"""Write a vector with summary statistics.
This vector will always contain two fields::
* ``'flood_vol'``: The volume of flood (runoff), in m3, per watershed.
* ``'rnf_rt_idx'``: Average of runoff retention values per watershed
* ``'rnf_rt_m3'``: Sum of runoff retention volumes, in m3,
per watershed.
If ``damage_per_aoi_stats`` is provided, then these additional columns will
be written to the vector::
* ``'aff.bld'``: Potential damage to built infrastructure in $,
per watershed.
* ``'serv.blt'``: Spatial indicator of the importance of the runoff
retention service
Args:
source_aoi_vector_path (str): The path to a GDAL vector that exists on
disk.
target_vector_path (str): The path to a vector that will be
created. If a file already exists at this path, it will be deleted
before the new file is created. This filepath must end with the
extension ``.shp``, as the file created will be an ESRI Shapefile.
runoff_ret_stats (dict): A dict representing summary statistics of the
runoff raster. If provided, it must be a dictionary mapping feature
IDs from ``source_aoi_vector_path`` to dicts with ``'count'`` and
``'sum'`` keys.
runoff_ret_vol_stats (dict): A dict representing summary statistics of
the runoff volume raster. If provided, it must be a dictionary
mapping feature IDs from ``source_aoi_vector_path`` to dicts with
``'count'`` and ``'sum'`` keys.
flood_volume_stats(dict): A dict mapping feature IDs from
``source_aoi_vector_path`` to float values representing the flood
volume over the AOI.
damage_per_aoi_stats (dict): A dict mapping feature IDs from
``source_aoi_vector_path`` to float values representing the total
damage to built infrastructure in that watershed.
Returns:
``None``
"""
source_aoi_vector = gdal.OpenEx(source_aoi_vector_path, gdal.OF_VECTOR)
source_aoi_layer = source_aoi_vector.GetLayer()
source_geom_type = source_aoi_layer.GetGeomType()
source_srs_wkt = pygeoprocessing.get_vector_info(
source_aoi_vector_path)['projection']
source_srs = osr.SpatialReference()
source_srs.ImportFromWkt(source_srs_wkt)
esri_driver = gdal.GetDriverByName('ESRI Shapefile')
target_watershed_vector = esri_driver.Create(
target_vector_path, 0, 0, 0, gdal.GDT_Unknown)
layer_name = str(os.path.splitext(os.path.basename(
target_vector_path))[0])
LOGGER.debug("creating layer %s", layer_name)
target_watershed_layer = target_watershed_vector.CreateLayer(
str(layer_name), source_srs, source_geom_type)
target_fields = ['rnf_rt_idx', 'rnf_rt_m3', 'flood_vol']
if not damage_per_aoi_stats:
damage_per_aoi_stats = {}
else:
target_fields += ['aff.bld', 'serv.blt']
for field_name in target_fields:
field_def = ogr.FieldDefn(field_name, ogr.OFTReal)
field_def.SetWidth(36)
field_def.SetPrecision(11)
target_watershed_layer.CreateField(field_def)
target_layer_defn = target_watershed_layer.GetLayerDefn()
for base_feature in source_aoi_layer:
feature_id = base_feature.GetFID()
target_feature = ogr.Feature(target_layer_defn)
base_geom_ref = base_feature.GetGeometryRef()
target_feature.SetGeometry(base_geom_ref.Clone())
base_geom_ref = None
if feature_id in runoff_ret_stats:
pixel_count = runoff_ret_stats[feature_id]['count']
if pixel_count > 0:
mean_value = (
runoff_ret_stats[feature_id]['sum'] / float(pixel_count))
target_feature.SetField('rnf_rt_idx', float(mean_value))
if feature_id in runoff_ret_vol_stats:
target_feature.SetField(
'rnf_rt_m3', float(
runoff_ret_vol_stats[feature_id]['sum']))
if feature_id in damage_per_aoi_stats:
pixel_count = runoff_ret_vol_stats[feature_id]['count']
if pixel_count > 0:
damage_sum = damage_per_aoi_stats[feature_id]
target_feature.SetField('aff.bld', damage_sum)
# This is the service.built equation.
target_feature.SetField(
'serv.blt', (
damage_sum * runoff_ret_vol_stats[feature_id]['sum']))
if feature_id in flood_volume_stats:
target_feature.SetField(
'flood_vol', float(flood_volume_stats[feature_id]['sum']))
target_watershed_layer.CreateFeature(target_feature)
target_watershed_layer.SyncToDisk()
target_watershed_layer = None
target_watershed_vector = None
def _calculate_damage_to_infrastructure_in_aoi(
aoi_vector_path, structures_vector_path, structures_damage_table):
"""Determine the damage to infrastructure in each AOI feature.
Args:
aoi_vector_path (str): Path to a GDAL vector of AOI or watershed
polygons. Must be in the same projection as
``structures_vector_path``.
structures_vector_path (str): Path to a GDAL vector of built
infrastructure polygons. Must be in the same projection as
``aoi_vector_path``. Must have a ``Type`` column matching a type
in the ``structures_damage_table`` table.
structures_damage_table (str): Path to a CSV containing information
about the damage to each type of structure. This table must have
the ``Type`` and ``Damage`` columns.
Returns:
A ``dict`` mapping the FID of geometries in ``aoi_vector_path`` with
the ``float`` total damage to infrastructure in that AOI/watershed.
"""
infrastructure_vector = gdal.OpenEx(structures_vector_path, gdal.OF_VECTOR)
infrastructure_layer = infrastructure_vector.GetLayer()
damage_type_map = utils.build_lookup_from_csv(
structures_damage_table, 'type', to_lower=True)
infrastructure_layer_defn = infrastructure_layer.GetLayerDefn()
type_index = -1
for field_defn in infrastructure_layer.schema:
field_name = field_defn.GetName()
if field_name.lower() == 'type':
type_index = infrastructure_layer_defn.GetFieldIndex(field_name)
break
if type_index == -1:
raise ValueError(
"Could not find field 'Type' in %s" % structures_vector_path)
structures_index = rtree.index.Index(interleaved=True)
for infrastructure_feature in infrastructure_layer:
infrastructure_geometry = infrastructure_feature.GetGeometryRef()
# We've had a case on the forums where a user provided an
# infrastructure vector with either invalid or missing geometries. This
# allows us to handle these in the model run itself.
if not infrastructure_geometry:
LOGGER.debug(
'Infrastructure feature %s has no geometry; skipping.',
infrastructure_feature.GetFID())
continue
shapely_geometry = shapely.wkb.loads(
infrastructure_geometry.ExportToWkb())
structures_index.insert(
infrastructure_feature.GetFID(), shapely_geometry.bounds)
aoi_vector = gdal.OpenEx(aoi_vector_path, gdal.OF_VECTOR)
aoi_layer = aoi_vector.GetLayer()
aoi_damage = {}
for aoi_feature in aoi_layer:
aoi_geometry = aoi_feature.GetGeometryRef()
aoi_geometry_shapely = shapely.wkb.loads(aoi_geometry.ExportToWkb())
aoi_geometry_prep = shapely.prepared.prep(aoi_geometry_shapely)
total_damage = 0.0
for infrastructure_fid in structures_index.intersection(
aoi_geometry_shapely.bounds):
infrastructure_feature = infrastructure_layer.GetFeature(
infrastructure_fid)
infrastructure_geometry = shapely.wkb.loads(
infrastructure_feature.GetGeometryRef().ExportToWkb())
if aoi_geometry_prep.intersects(infrastructure_geometry):
intersection_geometry = aoi_geometry_shapely.intersection(
infrastructure_geometry)
damage_type = infrastructure_feature.GetField(type_index)
damage = damage_type_map[damage_type]['damage']
total_damage += intersection_geometry.area * damage
aoi_damage[aoi_feature.GetFID()] = total_damage
return aoi_damage
def _flood_vol_op(
q_pi_array, q_pi_nodata, pixel_area, target_nodata):
"""Calculate vol of flood water.
Parmeters:
rainfall_depth (float): depth of rainfall in mm.
q_pi_array (numpy.ndarray): quick flow array.
q_pi_nodata (float): nodata for q_pi.
pixel_area (float): area of pixel in m^2.
target_nodata (float): output nodata value.
Returns:
numpy array of flood volume per pixel in m^3.
"""
result = numpy.empty(q_pi_array.shape, dtype=numpy.float32)
result[:] = target_nodata
valid_mask = q_pi_array != q_pi_nodata
# 0.001 converts mm (quickflow) to m (pixel area units)
result[valid_mask] = (
q_pi_array[valid_mask] * pixel_area * 0.001)
return result
def _runoff_retention_vol_op(
runoff_retention_array, runoff_retention_nodata, p_value,
cell_area, target_nodata):
"""Calculate peak flow retention as a vol.
Args:
runoff_retention_array (numpy.ndarray): proportion of pixel retention.
runoff_retention_nodata (float): nodata value for corresponding array.
p_value (float): precipitation depth in mm.
cell_area (float): area of cell in m^2.
target_nodata (float): target nodata to write.
Returns:
(runoff_retention * p_value * pixel_area * 10e-3)
"""
result = numpy.empty(runoff_retention_array.shape, dtype=numpy.float32)
result[:] = target_nodata
valid_mask = runoff_retention_array != runoff_retention_nodata
# the 1e-3 converts the mm of p_value to meters.
result[valid_mask] = (
runoff_retention_array[valid_mask] * p_value * cell_area * 1e-3)
return result
def _runoff_retention_op(q_pi_array, p_value, q_pi_nodata, result_nodata):
"""Calculate peak flow retention.
Args:
q_pi_array (numpy.ndarray): quick flow array.
p_value (float): precipition in mm.
q_pi_nodata (float): nodata for q_pi.
pixel_area (float): area of pixel in m^2.
target_nodata (float): output nodata value.
Returns:
1.0 - q_pi/p
"""
result = numpy.empty_like(q_pi_array)
result[:] = result_nodata
valid_mask = numpy.ones(q_pi_array.shape, dtype=numpy.bool)
if q_pi_nodata is not None:
valid_mask[:] = ~numpy.isclose(q_pi_array, q_pi_nodata)
result[valid_mask] = 1.0 - (q_pi_array[valid_mask] / p_value)
return result
def _q_pi_op(p_value, s_max_array, s_max_nodata, result_nodata):
"""Calculate peak flow Q (mm) with the Curve Number method.
Args:
p_value (float): precipitation in mm.
s_max_array (numpy.ndarray): max S value per pixel.
s_max_nodata (float): nodata value for s_max_array.
result_nodata (float): return value nodata.
Returns:
ndarray of peak flow.
"""
lam = 0.2 # this value of lambda is hard-coded in the design doc.
result = numpy.empty_like(s_max_array)
result[:] = result_nodata
zero_mask = (p_value <= lam * s_max_array)
non_nodata_mask = numpy.ones(s_max_array.shape, dtype=numpy.bool)
if s_max_nodata is not None:
non_nodata_mask[:] = ~numpy.isclose(s_max_array, s_max_nodata)
# valid if not nodata and not going to be set to 0.
valid_mask = non_nodata_mask & ~zero_mask
result[valid_mask] = (
p_value - lam * s_max_array[valid_mask])**2.0 / (
p_value + (1 - lam) * s_max_array[valid_mask])
# any non-nodata result that should be zero is set so.
result[zero_mask & non_nodata_mask] = 0.0
return result
def _s_max_op(cn_array, cn_nodata, result_nodata):
"""Calculate S_max from the curve number.
Args:
cn_array (numpy.ndarray): curve number array.
cn_nodata (float): nodata value for cn_array.
result_nodata (float): output nodata value.
Return:
ndarray of Smax calcualted from curve number.
"""
result = numpy.empty_like(cn_array)
result[:] = result_nodata
zero_mask = cn_array == 0
valid_mask = ~zero_mask
if cn_nodata is not None:
valid_mask[:] &= ~numpy.isclose(cn_array, cn_nodata)
result[valid_mask] = 25400.0 / cn_array[valid_mask] - 254.0
result[zero_mask] = 0.0
return result
def _lu_to_cn_op(
lucode_array, soil_type_array, lucode_nodata, soil_type_nodata,
cn_nodata, lucode_to_cn_table):
"""Map combination landcover soil type map to curve number raster.
Args:
lucode_array (numpy.ndarray): array of landcover codes.
soil_type_array (numpy.ndarray): array of soil type values.
lucode_nodata (float): nodata value for corresponding array.
soil_type_nodata (float): nodata value for corresponding array.
cn_nodata (float): nodata value for return value array.
lucode_to_cn_table (scipy.sparse.csr.csr_matrix):
Returns:
ndarray of curve numbers by looking up landcover type to soil type
to then soil value.
"""
result = numpy.empty_like(lucode_array, dtype=numpy.float32)
result[:] = cn_nodata
valid_mask = numpy.ones(lucode_array.shape, dtype=numpy.bool)
if lucode_nodata is not None:
valid_mask[:] &= ~numpy.isclose(lucode_array, lucode_nodata)
if soil_type_nodata is not None:
valid_mask[:] &= ~
|
numpy.isclose(soil_type_array, soil_type_nodata)
|
numpy.isclose
|
#!/usr/bin/env python
# coding: utf-8
# 探索数据集
# 首先,我们将检查我们将用于训练分类器的数据集。 训练数据包含在文件facies_vectors.csv中。 该数据集包括5个电缆测井测量,两个指示变量和半英尺间距的相标签。 在机器学习术语中,每个日志测量都是一个特征向量,它将一组“特征”(对数测量)映射到一个类(相类型)。 我们将使用pandas库将数据加载到数据框中,这提供了一个方便的数据结构来处理日志数据。
# In[8]:
# get_ipython().run_line_magic('matplotlib', 'inline')
import pandas as pd
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from mpl_toolkits.axes_grid1 import make_axes_locatable
import pickle
from pandas import set_option
## 设置字符集,防止中文乱码
mpl.rcParams['font.family'] = 'sans-serif'
mpl.rcParams['font.sans-serif'] = 'SimHei'
mpl.rcParams['axes.unicode_minus'] = False
set_option("display.max_rows", 10)
## ndArray 类型 X 转换成 pandas 的 DataFrame: pd.DataFrame(X) 即可!
filename ='facies_vectors.csv' #training_data, validation_data, test_data = pickle.load(filename, encoding='bytes')
training_data = pd.read_csv(filename)
training_data
# In[7]:
training_data.describe()
# 使用机器学习进行相分类
#
# 本演示如何训练机器学习算法以从测井数据中预测相。我们将使用的数据集来自堪萨斯大学神经网络和模糊系统的课程练习。本练习基于一个联合项目,该项目利用机器学习技术创建北美最大气田 - 雨波和帕诺马油田的油藏模型。有关数据来源的更多信息。
#
# 我们将使用的数据集是来自9个井的测井数据,这些井已根据岩心的观测标记了相类型。我们将使用此日志数据来训练支持向量机以对相类型进行分类。支持向量机(或SVM)是一种监督学习模型,可以对数据进行训练以执行分类和回归任务。 SVM算法使用训练数据来拟合不同类别(或我们的情况下的相)之间的最佳超平面。我们将在scikit-learn中使用SVM实现。
#
# 首先,我们将探索数据集。我们将从9口井中加载训练数据,并查看我们必须使用的内容。我们将绘制来自几口井的数据,并创建交叉图以查看数据中的变化。
# 接下来我们将调整数据集。我们将删除包含不完整数据的条目。数据将被缩放为具有零均值和单位方差。我们还将数据分成训练和测试集。
# 然后,我们将准备构建SVM分类器。我们将演示如何使用交叉验证集来进行模型参数选择。
#
# 最后,一旦我们建立并调整了分类器,我们就可以应用训练好的模型来对没有标签的井中的相进行分类。我们将分类器应用于两个井,但原则上您可以将分类器应用于具有相同日志数据的任意数量的井。
# Remove a single well to use as a blind test later.
# In[8]:
blind = training_data[training_data['Well Name'] == 'NEWBY']
training_data = training_data[training_data['Well Name'] != 'NEWBY']
# 该数据来自海外的一个气藏。 P气田主要是一个碳酸盐气藏,位于XX西南部,占地2700平方英里。该数据集来自9个井(具有4149个实例),由一组七个预测变量和一个岩相(类)组成,每个实例矢量和验证(测试)数据(来自两口井的830个例子)具有相同的七个预测变量在特征向量中。相位基于对以间隔半英尺垂直测试的9口井的岩心进行检测。预测变量包括五个来自电缆测井测量值和两个来自地质知识的地质约束变量。这些基本上是以半英尺采样率采样的连续变量。
# 七个预测变量是:
# 五条线对数曲线包括
# 1、伽马射线(GR)
# 2、电阻率测井(ILD_log10)
# 3、光电效应(PE)
# 4、中子密度孔隙度差
# 5、平均中子密度孔隙度(DeltaPHI和PHIND)
# 注意,有些井没有PE。
# 两个地质约束变量:非海洋 - 海洋指标(NM_M)和相对位置(RELPOS)
#
# 九个离散相(岩石类)是:
# 1、非海洋砂岩
# 2、非海洋粗粉砂岩
# 3、非海洋细粉砂岩
# 4、海洋粉砂岩和页岩
# 5、泥岩(石灰石)
# 6、Wackestone(石灰石)
# 7、白云石
# 8、Packstone-grainstone(石灰石)
# 9、Phylloid-algal bafflestone(石灰石)
#
# 这些岩相不是离散的,而是逐渐相互融合。有些相邻的相邻相。可以预期在这些相邻相内发生错误标记。下表列出了岩相,它们的缩写标签及其近似邻域。
# 相标签相邻相
#
# 让我们清理这个数据集。 “Well Name”和“Formation”列可以转换为分类数据类型。
#
# Facies |Label| Adjacent Facies
# :---: | :---: |:--:
# 1 |SS| 2
# 2 |CSiS| 1,3
# 3 |FSiS| 2
# 4 |SiSh| 5
# 5 |MS| 4,6
# 6 |WS| 5,7
# 7 |D| 6,8
# 8 |PS| 6,7,9
# 9 |BS| 7,8
#
# In[38]:
training_data.loc[:,['Well Name']].astype('category')
training_data.loc[:,['Formation']].astype('category') #使用astype实现dataframe字段类型转换
# In[39]:
training_data['Well Name'].unique()
# 这些是 P气藏中10口训练井的名称。 数据被招募到'Recruit F9'中以更好地代表相9,即藻类 - 藻类挡板。在绘制井数据之前,让我们定义一个颜色图,以便在以下的所有图中用一致的颜色表示岩相。 我们还创建了缩写相标签,并将它们添加到`facies_vectors`数据帧中。
# In[ ]:
# In[49]:
# 1=sandstone 2=c_siltstone 3=f_siltstone
# 4=marine_silt_shale 5=mudstone 6=wackestone 7=dolomite
# 8=packstone 9=bafflestone
facies_colors = ['#F4D03F', '#F5B041','#DC7633','#6E2C00',
'#1B4F72','#2E86C1', '#AED6F1', '#A569BD', '#196F3D']
facies_labels = ['SS', 'CSiS', 'FSiS', 'SiSh', 'MS',
'WS', 'D','PS', 'BS']
#facies_color_map is a dictionary that maps facies labels
#to their respective colors
facies_color_map = {}
for ind, label in enumerate(facies_labels):
facies_color_map[label] = facies_colors[ind]
def label_facies(row, labels):
return labels[ row['Facies'] -1]
training_data.loc[:,'FaciesLabels'] = training_data.apply(lambda row: label_facies(row, facies_labels), axis=1)
training_data.describe()
# 这是输入变量统计分布的快速视图。 查看计数值,大多数值有4149个有效值,PE除外,它有3232.在本教程中,我们将删除没有有效PE条目的特征向量。
# In[50]:
PE_mask = training_data['PE'].notnull().values
training_data = training_data[PE_mask]
# 让我们以更熟悉的对数图形式查看各个井的数据。 我们将为五个井日志变量创建图,以及为相标签创建日志。
# In[51]:
def make_facies_log_plot(logs, facies_colors):
#make sure logs are sorted by depth
logs = logs.sort_values(by='Depth')
cmap_facies = colors.ListedColormap(
facies_colors[0:len(facies_colors)], 'indexed')
ztop=logs.Depth.min(); zbot=logs.Depth.max()
cluster=np.repeat(np.expand_dims(logs['Facies'].values,1), 100, 1)
f, ax = plt.subplots(nrows=1, ncols=6, figsize=(8, 12))
ax[0].plot(logs.GR, logs.Depth, '-g')
ax[1].plot(logs.ILD_log10, logs.Depth, '-')
ax[2].plot(logs.DeltaPHI, logs.Depth, '-', color='0.5')
ax[3].plot(logs.PHIND, logs.Depth, '-', color='r')
ax[4].plot(logs.PE, logs.Depth, '-', color='black')
im=ax[5].imshow(cluster, interpolation='none', aspect='auto',
cmap=cmap_facies,vmin=1,vmax=9)
divider = make_axes_locatable(ax[5])
cax = divider.append_axes("right", size="20%", pad=0.05)
cbar=plt.colorbar(im, cax=cax)
cbar.set_label((17*' ').join([' SS ', 'CSiS', 'FSiS',
'SiSh', ' MS ', ' WS ', ' D ',
' PS ', ' BS ']))
cbar.set_ticks(range(0,1)); cbar.set_ticklabels('')
for i in range(len(ax)-1):
ax[i].set_ylim(ztop,zbot)
ax[i].invert_yaxis()
ax[i].grid()
ax[i].locator_params(axis='x', nbins=3)
ax[0].set_xlabel("GR")
ax[0].set_xlim(logs.GR.min(),logs.GR.max())
ax[1].set_xlabel("ILD_log10")
ax[1].set_xlim(logs.ILD_log10.min(),logs.ILD_log10.max())
ax[2].set_xlabel("DeltaPHI")
ax[2].set_xlim(logs.DeltaPHI.min(),logs.DeltaPHI.max())
ax[3].set_xlabel("PHIND")
ax[3].set_xlim(logs.PHIND.min(),logs.PHIND.max())
ax[4].set_xlabel("PE")
ax[4].set_xlim(logs.PE.min(),logs.PE.max())
ax[5].set_xlabel('Facies')
ax[1].set_yticklabels([]); ax[2].set_yticklabels([]); ax[3].set_yticklabels([])
ax[4].set_yticklabels([]); ax[5].set_yticklabels([])
ax[5].set_xticklabels([])
f.suptitle('Well: %s'%logs.iloc[0]['Well Name'], fontsize=14,y=0.94)
plt.show()
# 将测井曲线绘图代码放置在函数中将使得从多个井绘制测井曲线变得容易,并且当我们将相分类模型应用于其他井时可以稍后重复使用以查看结果。 编写该函数是为了将颜色和相标签列表作为参数。
#
# 然后,我们显示了井“SHRIMPLIN”和“SHANKLE”的对数图。
# In[52]:
make_facies_log_plot(
training_data[training_data['Well Name'] == 'SHRIMPLIN'],
facies_colors)
make_facies_log_plot(
training_data[training_data['Well Name'] == 'SHANKLE'],
facies_colors)
# 除了单个井,我们还可以查看整个训练集如何表示各个相。 让我们绘制每个相类的训练样例数的直方图。
# In[53]:
#count the number of unique entries for each facies, sort them by
#facies number (instead of by number of entries)
facies_counts = training_data['Facies'].value_counts().sort_index()
#use facies labels to index each count
facies_counts.index = facies_labels
facies_counts.plot(kind='bar',color=facies_colors,
title='Distribution of Training Data by Facies')
facies_counts
# 这显示了训练集中3232训练样例的相的示例分布。 白云岩(相7)最少,有141个例子。 还有185个挡板的例子。 根据我们将要训练的分类器的性能,我们可能会考虑获得这些相的更多示例。
#
# 交叉图是地球科学中一种熟悉的工具,用于可视化两种属性如何随岩石类型而变化。 此数据集包含5个对数变量,散点矩阵可帮助快速可视化数据集中所有变量之间的差异。 我们可以使用非常有用的来快速创建漂亮的散布矩阵。 图中的每个窗格显示x和y轴上两个变量之间的关系,每个点根据其相位着色。 相同的色图用于表示9个相。
# In[55]:
#save plot display settings to change back to when done plotting with seaborn
inline_rc = dict(mpl.rcParams)
import seaborn as sns
sns.set()
sns.pairplot(training_data.drop(['Well Name','Facies','Formation','Depth','NM_M','RELPOS'],axis=1),
hue='FaciesLabels', palette=facies_color_map,
hue_order=list(reversed(facies_labels)))
#switch back to default matplotlib plot style
mpl.rcParams.update(inline_rc)
# ## 调整数据集
#
# 现在我们只提取执行分类所需的特征变量。 预测变量是五个有线值和两个地质约束变量。 我们还获得了与每个特征向量对应的相标签的向量。
# In[64]:
correct_facies_labels = training_data['Facies'].values
feature_vectors = training_data.drop(['Formation', 'Well Name', 'Depth','Facies','FaciesLabels'], axis=1)
feature_vectors.describe()
# Scikit包含一个[预处理](http://scikit-learn.org/stable/modules/preprocessing.html)模块,可以“标准化”数据(给每个变量零均值和单位方差,也称为*白化*)。 许多机器学习算法假设特征将是标准的正态分布数据(即:具有零均值和单位方差的高斯分布)。 用于标准化训练集的因子必须应用于将输入到分类器的任何后续特征集。 标准标量类可以适合训练集,后来用于标准化任何训练数据。
# In[57]:
from sklearn import preprocessing
scaler = preprocessing.StandardScaler().fit(feature_vectors)
scaled_features = scaler.transform(feature_vectors)
# Scikit还包括一个方便的功能,可以将训练数据随机分成训练和测试集。 测试集包含一小部分特征向量,这些特征向量不用于训练网络。 因为我们知道这些例子的真实相标签,我们可以将分类器的结果与实际相比较,并确定模型的准确性。 让我们将20%的数据用于测试集。
# In[58]:
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
scaled_features, correct_facies_labels, test_size=0.2, random_state=42)
# 训练SVM分类器
# 现在我们使用经过清洁和调节的训练集来创建一个相分类器。 如上所述,我们将使用一种称为支持向量机的机器学习模型。 SVM是特征向量的映射,作为多维空间中的点,映射使得来自不同相的示例被尽可能宽的清晰间隙划分。
# scikit-learn中的SVM实现需要许多重要参数。 首先,我们使用默认设置创建分类器。
# In[59]:
from sklearn import svm
clf = svm.SVC()
# 现在我们可以使用上面创建的训练集训练分类器。
# In[60]:
clf.fit(X_train,y_train)
# 现在模型已经对我们的数据进行了训练,我们可以用它来预测测试集中特征向量的相。 因为我们知道测试集中向量的真实相标签,所以我们可以使用结果来评估分类器的准确性。
# In[61]:
predicted_labels = clf.predict(X_test)
# 我们需要一些指标来评估分类器的效果。 [混淆矩阵](http://www.dataschool.io/simple-guide-to-confusion-matrix-terminology/)是一个表,可用于描述分类模型的性能。 [Scikit-learn](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.confusion_matrix.html)允许我们通过提供实际和预测的相标签轻松创建混淆矩阵。
#
# 混淆矩阵只是一个二维数组。 混淆矩阵“C [i] [j]”的条目等于预测具有相“j”的观测数,但已知有相“i”。
#
# 为了简化读取混淆矩阵,已经编写了一个函数来显示矩阵以及相标签和各种误差度量。 请参阅此repo中的文件`classification_utilities.py`以获取`display_cm()`函数。
# In[62]:
from sklearn.metrics import confusion_matrix
from classification_utilities import display_cm, display_adj_cm
conf = confusion_matrix(y_test, predicted_labels)
display_cm(conf, facies_labels, hide_zeros=True)
# 混淆矩阵的行对应于实际的相标签。 列对应于分类器分配的标签。 例如,考虑第一行。 对于实际上具有标记“SS”的测试集中的特征向量,23被正确识别为“SS”,21个被分类为“CSiS”,2个被分类为“FSiS”。
#
# 对角线上的条目是已正确分类的相。 下面我们定义两个函数,它们将给出算法执行方式的总体值。 准确度定义为正确分类的数量除以分类总数。
# In[20]:
def accuracy(conf):
total_correct = 0.
nb_classes = conf.shape[0]
for i in np.arange(0,nb_classes):
total_correct += conf[i][i]
acc = total_correct/sum(sum(conf))
return acc
# 如上所述,相类之间的界限并非都是尖锐的,并且它们中的一些相互融合。 还可以计算这些“相邻相”内的误差。 我们定义一个数组来表示彼此相邻的相。 对于相标记`i`,`adjacent_facies [i]`是相邻相标记的数组。
# In[21]:
adjacent_facies = np.array([[1], [0,2], [1], [4], [3,5], [4,6,7], [5,7], [5,6,8], [6,7]])
def accuracy_adjacent(conf, adjacent_facies):
nb_classes = conf.shape[0]
total_correct = 0.
for i in np.arange(0,nb_classes):
total_correct += conf[i][i]
for j in adjacent_facies[i]:
total_correct += conf[i][j]
return total_correct / sum(sum(conf))
# In[22]:
print ('Facies classification accuracy = %f' % accuracy(conf))
print ('Adjacent facies classification accuracy = %f' % accuracy_adjacent(conf, adjacent_facies))
# ## 模型参数选择
#
# 到目前为止,分类器已使用默认参数构建。但是,我们可以通过最佳参数选择获得改进的分类结果。
#
# 我们将考虑两个参数。参数“C”是一个正则化因子,它告诉分类器我们想要避免错误分类训练样例。较大的C值将尝试从训练集中正确分类更多示例,但如果“C”太大,则可能“过度拟合”数据并且在分类新数据时无法概括。如果`C`太小,那么模型将不适合拟合异常值并且在训练集上会有很大的误差。
#
# SVM学习算法使用核函数来计算特征向量之间的距离。存在许多内核函数,但在这种情况下我们使用径向基函数`rbf`内核(默认)。 'gamma`参数描述了径向基函数的大小,它是特征空间中两个向量需要被认为接近的距离。
#
# 我们将训练一系列具有不同“C”和“伽玛”值的分类器。两个嵌套循环用于为指定范围内的每个可能值组合训练分类器。记录每个参数值组合的分类精度。结果显示在一系列图中,因此可以选择在测试集上提供最佳分类精度的参数值。
#
# 此过程也称为“交叉验证”。除了训练和测试集之外,通常还会创建单独的“交叉验证”数据集来进行模型选择。在本教程中,我们将使用测试集来选择模型参数。
# In[23]:
#model selection takes a few minutes, change this variable
#to true to run the parameter loop
do_model_selection = True
if do_model_selection:
C_range = np.array([.01, 1, 5, 10, 20, 50, 100, 1000, 5000, 10000])
gamma_range = np.array([0.0001, 0.001, 0.01, 0.1, 1, 10])
fig, axes = plt.subplots(3, 2,
sharex='col', sharey='row',figsize=(10,10))
plot_number = 0
for outer_ind, gamma_value in enumerate(gamma_range):
row = plot_number / 2
column = plot_number % 2
cv_errors = np.zeros(C_range.shape)
train_errors = np.zeros(C_range.shape)
for index, c_value in enumerate(C_range):
clf = svm.SVC(C = c_value, gamma = gamma_value)
clf.fit(X_train,y_train)
train_conf = confusion_matrix(y_train, clf.predict(X_train))
cv_conf = confusion_matrix(y_test, clf.predict(X_test))
cv_errors[index] = accuracy(cv_conf)
train_errors[index] = accuracy(train_conf)
row=int(row)
column=int(column)
ax = axes[row, column]
ax.set_title('Gamma = %g'%gamma_value)
ax.semilogx(C_range, cv_errors, label='CV error')
ax.semilogx(C_range, train_errors, label='Train error')
plot_number += 1
ax.set_ylim([0.2,1])
ax.legend(bbox_to_anchor=(1.05, 0), loc='lower left', borderaxespad=0.)
fig.text(0.5, 0.03, 'C value', ha='center',
fontsize=14)
fig.text(0.04, 0.5, 'Classification Accuracy', va='center',
rotation='vertical', fontsize=14)
# 对于'gamma = 1`和`C = 10`,实现了交叉验证误差曲线的最佳精度。 我们现在可以基于以下参数创建和训练优化的分类器:
# In[24]:
clf = svm.SVC(C = 10, gamma = 1)
clf.fit(X_train,y_train)
cv_conf = confusion_matrix(y_test, clf.predict(X_test))
print ('Optimized facies classification accuracy = %.2f' % accuracy(cv_conf))
print ('Optimized adjacent facies classification accuracy = %.2f' % accuracy_adjacent(cv_conf, adjacent_facies))
# [精确度和召回](https://en.wikipedia.org/wiki/Precision_and_recall)是一个指标,可以更深入地了解分类器如何为单个相执行。 精度是给定样本的分类结果的概率,样本实际上属于该类。 回想一下,对于给定的类,样本将被正确分类的概率。
#
# 使用混淆矩阵可以轻松计算精度和召回率。 执行此操作的代码已添加到`display_confusion_matrix()`函数中:
# In[25]:
display_cm(cv_conf, facies_labels,
display_metrics=True, hide_zeros=True)
# 要解释这些结果,请考虑相“SS”。在我们的测试集中,如果样品标记为“SS”,则样品正确的概率为0.8(精确度)。如果我们知道样品具有相“SS”,则分类器正确标记的概率为0.78(召回)。期望具有高精度和召回的值,但是通常当算法被调整为增加一个时,另一个减少。 [F1得分](https://en.wikipedia.org/wiki/Precision_and_recall#F-measure)将两者结合起来,给出了分类器结果相关性的单一度量。
#
# 这些结果有助于指导如何改进分类器结果的直觉。例如,对于具有相“MS”或泥岩的样品,它仅在57%的时间内被正确分类(召回)。也许这可以通过引入更多的训练样本来改善。样品质量也可以发挥作用。比赛`BS`或bafflestone拥有最好的'F1`得分和相对较少的训练样例。但这些数据是从其他井中精心挑选出来的,以提供识别该相的训练实例。
#
# 当我们考虑将相邻相错误分类为正确时,我们也可以考虑分类指标:
#
# In[26]:
display_adj_cm(cv_conf, facies_labels, adjacent_facies,
display_metrics=True, hide_zeros=True)
# 考虑到相邻相,所有相类型的“F1”得分均高于0.9,除非对“SiSh”或海洋粉砂岩和页岩进行分类。 分类器经常错误地分类这个相(回忆0.66),最常见的是wackestone。
#
# 这些结果与Dubois等人报道的结果相当。(2007年)。
# ## 将分类模型应用于盲数据
#
# 我们从训练中退了回来,并将其存储在名为`blind`的数据框中:
# In[27]:
blind
# The label vector is just the `Facies` column:
# In[28]:
y_blind = blind['Facies'].values
# 我们可以通过删除一些列并创建一个新的数据帧来形成特征矩阵:
# In[29]:
well_features = blind.drop(['Facies', 'Formation', 'Well Name', 'Depth'], axis=1)
# 现在我们可以用我们之前制作的标量来改变它:
# In[30]:
X_blind = scaler.transform(well_features)
# 现在,进行预测并将其存储在数据框架中是一件简单的事情:
# In[31]:
y_pred = clf.predict(X_blind)
blind['Prediction'] = y_pred
# Let's see how we did with the confusion matrix:
# In[32]:
cv_conf = confusion_matrix(y_blind, y_pred)
print ('Optimized facies classification accuracy = %.2f' % accuracy(cv_conf))
print ('Optimized adjacent facies classification accuracy = %.2f' % accuracy_adjacent(cv_conf, adjacent_facies))
# 我们使用测试数据管理0.75,但它来自与训练数据相同的井。 这种更合理的测试表现不佳......但在相邻的相预测上表现非常好。
# In[33]:
display_adj_cm(cv_conf, facies_labels, adjacent_facies, display_metrics=True, hide_zeros=True)
# In[34]:
def compare_facies_plot(logs, compadre, facies_colors):
#make sure logs are sorted by depth
logs = logs.sort_values(by='Depth')
cmap_facies = colors.ListedColormap(
facies_colors[0:len(facies_colors)], 'indexed')
ztop=logs.Depth.min(); zbot=logs.Depth.max()
cluster1 = np.repeat(np.expand_dims(logs['Facies'].values,1), 100, 1)
cluster2 = np.repeat(
|
np.expand_dims(logs[compadre].values,1)
|
numpy.expand_dims
|
from funcy import print_durations
import numpy as np
import pandas as pd
from io import StringIO
from collections import deque
# Puzzle: https://adventofcode.com/2021/day/19
# - an assortment of beacons and scanners float motionless in the water
# - each scanner is capable of detecting all beacons in a cube centered on the scanner
# - scanners do not know their own position
# - scanner regions overlap
# - at least 12 beacons within each overlap between scanner pair
# - each scanner could be in any of 24 different orientations:
# facing positive or negative x, y, or z, four directions "up" from that facing
#
# task: how many beacons are there
# input: relative 3D positions of beacons detected by each scanner
#
# solution:
# (I) find scanner pairs with overlap
# - for all beacons in a scan as reference beacon, hash all beacons in scan
# - compare with all other beacons hashes in all other scans
# - >= 12 matching hashes? -> overlap, found pair
# (II) find transforms between scanners
# - scanner 0 is reference at (0, 0, 0), direction xyz
# - for any matching overlap, take 4 points, compute transform
# (III) transform all beacons, return take unique number
#
# possible performance improvement:
# - match only from latest scanner or queue onto all unknown candidates
# instead of matching all scans with all others
# - remove pandas
#
# alternative solution (not implemented):
# as we are constrained to a grid, a much simpler solution could be:
# for all pairs of scans (26 * 25)
# for all possible directions (24)
# - take the difference between the beacon points
# - at least twelve share the same offset? -> found direction and translation
def read_scans(file):
scans = []
with open(file, "r") as f:
for scan_chunk in f.read().split("\n\n"):
beacons = np.loadtxt(StringIO(scan_chunk), skiprows=1, dtype=np.int16, delimiter=",")
# use homogeneous coordinates
# to be able to find transforms with translations later
ones = np.ones((beacons.shape[0], 1), dtype=np.int16)
beacons_homogeneous = np.hstack((beacons, ones))
scans.append(beacons_homogeneous)
return scans
def beacon_hash(beacon_list, reference_beacon):
# hash function: manhatten distance to reference beacon
# all beacons lie on a grid, and this distance is transform invariant
# as all transforms are axis-aligned (only facing down/up/90 deg...)
return np.abs(beacon_list - reference_beacon).sum(axis=1)
def build_lookup(scan):
lookup_beacons = []
lookup_sets = []
for ref_beacon in scan:
bh = beacon_hash(scan, ref_beacon)
lookup_beacons.append(bh)
lookup_sets.append(set(bh))
return lookup_beacons, lookup_sets
def find_transform(source_transform, source_scan, target_scan, source_lookups, target_lookups):
matching_beacon_hashes = list(set(source_lookups) & set(target_lookups))
source_matches = []
target_matches = []
for match in matching_beacon_hashes:
source_match_id =
|
np.nonzero(source_lookups==match)
|
numpy.nonzero
|
import io
import logging
import numpy as np
import os
import pickle
import typing
from tqdm import tqdm
from typing import Any, Dict, List, Optional, Text, Tuple
from rasa_nlu.classifiers import INTENT_RANKING_LENGTH
from rasa_nlu.components import Component
logger = logging.getLogger(__name__)
if typing.TYPE_CHECKING:
import tensorflow as tf
from rasa_nlu.config import RasaNLUModelConfig
from rasa_nlu.training_data import TrainingData
from rasa_nlu.model import Metadata
from rasa_nlu.training_data import Message
try:
import tensorflow as tf
except ImportError:
tf = None
class EmbeddingIntentClassifier(Component):
"""Intent classifier using supervised embeddings.
The embedding intent classifier embeds user inputs
and intent labels into the same space.
Supervised embeddings are trained by maximizing similarity between them.
It also provides rankings of the labels that did not "win".
The embedding intent classifier needs to be preceded by
a featurizer in the pipeline.
This featurizer creates the features used for the embeddings.
It is recommended to use ``CountVectorsFeaturizer`` that
can be optionally preceded by ``SpacyNLP`` and ``SpacyTokenizer``.
Based on the starspace idea from: https://arxiv.org/abs/1709.03856.
However, in this implementation the `mu` parameter is treated differently
and additional hidden layers are added together with dropout.
"""
provides = ["intent", "intent_ranking"]
requires = ["text_features"]
defaults = {
# nn architecture
# sizes of hidden layers before the embedding layer for input words
# the number of hidden layers is thus equal to the length of this list
"hidden_layers_sizes_a": [256, 128],
# sizes of hidden layers before the embedding layer for intent labels
# the number of hidden layers is thus equal to the length of this list
"hidden_layers_sizes_b": [],
# training parameters
# initial and final batch sizes - batch size will be
# linearly increased for each epoch
"batch_size": [64, 256],
# number of epochs
"epochs": 300,
# embedding parameters
# dimension size of embedding vectors
"embed_dim": 20,
# how similar the algorithm should try
# to make embedding vectors for correct intent labels
"mu_pos": 0.8, # should be 0.0 < ... < 1.0 for 'cosine'
# maximum negative similarity for incorrect intent labels
"mu_neg": -0.4, # should be -1.0 < ... < 1.0 for 'cosine'
# the type of the similarity
"similarity_type": 'cosine', # string 'cosine' or 'inner'
# the number of incorrect intents, the algorithm will minimize
# their similarity to the input words during training
"num_neg": 20,
# flag: if true, only minimize the maximum similarity for
# incorrect intent labels
"use_max_sim_neg": True,
# set random seed to any int to get reproducible results
# try to change to another int if you are not getting good results
"random_seed": None,
# regularization parameters
# the scale of L2 regularization
"C2": 0.002,
# the scale of how critical the algorithm should be of minimizing the
# maximum similarity between embeddings of different intent labels
"C_emb": 0.8,
# dropout rate for rnn
"droprate": 0.2,
# flag: if true, the algorithm will split the intent labels into tokens
# and use bag-of-words representations for them
"intent_tokenization_flag": False,
# delimiter string to split the intent labels
"intent_split_symbol": '_',
# visualization of accuracy
# how often to calculate training accuracy
"evaluate_every_num_epochs": 10, # small values may hurt performance
# how many examples to use for calculation of training accuracy
"evaluate_on_num_examples": 1000 # large values may hurt performance
}
def __init__(self,
component_config: Optional[Dict[Text, Any]] = None,
inv_intent_dict: Optional[Dict[int, Text]] = None,
encoded_all_intents: Optional[np.ndarray] = None,
session: Optional['tf.Session'] = None,
graph: Optional['tf.Graph'] = None,
message_placeholder: Optional['tf.Tensor'] = None,
intent_placeholder: Optional['tf.Tensor'] = None,
similarity_op: Optional['tf.Tensor'] = None,
word_embed: Optional['tf.Tensor'] = None,
intent_embed: Optional['tf.Tensor'] = None
) -> None:
"""Declare instant variables with default values"""
self._check_tensorflow()
super(EmbeddingIntentClassifier, self).__init__(component_config)
self._load_params()
# transform numbers to intents
self.inv_intent_dict = inv_intent_dict
# encode all intents with numbers
self.encoded_all_intents = encoded_all_intents
# tf related instances
self.session = session
self.graph = graph
self.a_in = message_placeholder
self.b_in = intent_placeholder
self.sim_op = similarity_op
# persisted embeddings
self.word_embed = word_embed
self.intent_embed = intent_embed
# init helpers
def _load_nn_architecture_params(self, config: Dict[Text, Any]) -> None:
self.hidden_layer_sizes = {'a': config['hidden_layers_sizes_a'],
'b': config['hidden_layers_sizes_b']}
self.batch_size = config['batch_size']
self.epochs = config['epochs']
def _load_embedding_params(self, config: Dict[Text, Any]) -> None:
self.embed_dim = config['embed_dim']
self.mu_pos = config['mu_pos']
self.mu_neg = config['mu_neg']
self.similarity_type = config['similarity_type']
self.num_neg = config['num_neg']
self.use_max_sim_neg = config['use_max_sim_neg']
self.random_seed = self.component_config['random_seed']
def _load_regularization_params(self, config: Dict[Text, Any]) -> None:
self.C2 = config['C2']
self.C_emb = config['C_emb']
self.droprate = config['droprate']
def _load_flag_if_tokenize_intents(self, config: Dict[Text, Any]) -> None:
self.intent_tokenization_flag = config['intent_tokenization_flag']
self.intent_split_symbol = config['intent_split_symbol']
if self.intent_tokenization_flag and not self.intent_split_symbol:
logger.warning("intent_split_symbol was not specified, "
"so intent tokenization will be ignored")
self.intent_tokenization_flag = False
def _load_visual_params(self, config: Dict[Text, Any]) -> None:
self.evaluate_every_num_epochs = config['evaluate_every_num_epochs']
if self.evaluate_every_num_epochs < 1:
self.evaluate_every_num_epochs = self.epochs
self.evaluate_on_num_examples = config['evaluate_on_num_examples']
def _load_params(self) -> None:
self._load_nn_architecture_params(self.component_config)
self._load_embedding_params(self.component_config)
self._load_regularization_params(self.component_config)
self._load_flag_if_tokenize_intents(self.component_config)
self._load_visual_params(self.component_config)
# package safety checks
@classmethod
def required_packages(cls) -> List[Text]:
return ["tensorflow"]
@staticmethod
def _check_tensorflow():
if tf is None:
raise ImportError(
'Failed to import `tensorflow`. '
'Please install `tensorflow`. '
'For example with `pip install tensorflow`.')
# training data helpers:
@staticmethod
def _create_intent_dict(training_data: 'TrainingData') -> Dict[Text, int]:
"""Create intent dictionary"""
distinct_intents = set([example.get("intent")
for example in training_data.intent_examples])
return {intent: idx
for idx, intent in enumerate(sorted(distinct_intents))}
@staticmethod
def _create_intent_token_dict(intents: List[Text],
intent_split_symbol: Text) -> Dict[Text, int]:
"""Create intent token dictionary"""
distinct_tokens = set([token
for intent in intents
for token in intent.split(intent_split_symbol)])
return {token: idx
for idx, token in enumerate(sorted(distinct_tokens))}
def _create_encoded_intents(self,
intent_dict: Dict[Text, int]) -> np.ndarray:
"""Create matrix with intents encoded in rows as bag of words.
If intent_tokenization_flag is off, returns identity matrix.
"""
if self.intent_tokenization_flag:
intent_token_dict = self._create_intent_token_dict(
list(intent_dict.keys()), self.intent_split_symbol)
encoded_all_intents = np.zeros((len(intent_dict),
len(intent_token_dict)))
for key, idx in intent_dict.items():
for t in key.split(self.intent_split_symbol):
encoded_all_intents[idx, intent_token_dict[t]] = 1
return encoded_all_intents
else:
return np.eye(len(intent_dict))
# noinspection PyPep8Naming
def _create_all_Y(self, size: int) -> np.ndarray:
"""Stack encoded_all_intents on top of each other
to create candidates for training examples and
to calculate training accuracy
"""
return np.stack([self.encoded_all_intents] * size)
# noinspection PyPep8Naming
def _prepare_data_for_training(
self,
training_data: 'TrainingData',
intent_dict: Dict[Text, int]
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""Prepare data for training"""
X = np.stack([e.get("text_features")
for e in training_data.intent_examples])
intents_for_X = np.array([intent_dict[e.get("intent")]
for e in training_data.intent_examples])
Y = np.stack([self.encoded_all_intents[intent_idx]
for intent_idx in intents_for_X])
return X, Y, intents_for_X
# tf helpers:
def _create_tf_embed_nn(self, x_in: 'tf.Tensor', is_training: 'tf.Tensor',
layer_sizes: List[int], name: Text) -> 'tf.Tensor':
"""Create nn with hidden layers and name"""
reg = tf.contrib.layers.l2_regularizer(self.C2)
x = x_in
for i, layer_size in enumerate(layer_sizes):
x = tf.layers.dense(inputs=x,
units=layer_size,
activation=tf.nn.relu,
kernel_regularizer=reg,
name='hidden_layer_{}_{}'.format(name, i))
x = tf.layers.dropout(x, rate=self.droprate, training=is_training)
x = tf.layers.dense(inputs=x,
units=self.embed_dim,
kernel_regularizer=reg,
name='embed_layer_{}'.format(name))
return x
def _create_tf_embed(self,
a_in: 'tf.Tensor',
b_in: 'tf.Tensor',
is_training: 'tf.Tensor'
) -> Tuple['tf.Tensor', 'tf.Tensor']:
"""Create tf graph for training"""
emb_a = self._create_tf_embed_nn(a_in, is_training,
self.hidden_layer_sizes['a'],
name='a')
emb_b = self._create_tf_embed_nn(b_in, is_training,
self.hidden_layer_sizes['b'],
name='b')
return emb_a, emb_b
def _tf_sim(self,
a: 'tf.Tensor',
b: 'tf.Tensor') -> Tuple['tf.Tensor', 'tf.Tensor']:
"""Define similarity
in two cases:
sim: between embedded words and embedded intent labels
sim_emb: between individual embedded intent labels only
"""
if self.similarity_type == 'cosine':
# normalize embedding vectors for cosine similarity
a = tf.nn.l2_normalize(a, -1)
b = tf.nn.l2_normalize(b, -1)
if self.similarity_type in {'cosine', 'inner'}:
sim = tf.reduce_sum(tf.expand_dims(a, 1) * b, -1)
sim_emb = tf.reduce_sum(b[:, 0:1, :] * b[:, 1:, :], -1)
return sim, sim_emb
else:
raise ValueError("Wrong similarity type {}, "
"should be 'cosine' or 'inner'"
"".format(self.similarity_type))
def _tf_loss(self, sim: 'tf.Tensor', sim_emb: 'tf.Tensor') -> 'tf.Tensor':
"""Define loss"""
# loss for maximizing similarity with correct action
loss = tf.maximum(0., self.mu_pos - sim[:, 0])
if self.use_max_sim_neg:
# minimize only maximum similarity over incorrect actions
max_sim_neg = tf.reduce_max(sim[:, 1:], -1)
loss += tf.maximum(0., self.mu_neg + max_sim_neg)
else:
# minimize all similarities with incorrect actions
max_margin = tf.maximum(0., self.mu_neg + sim[:, 1:])
loss += tf.reduce_sum(max_margin, -1)
# penalize max similarity between intent embeddings
max_sim_emb = tf.maximum(0., tf.reduce_max(sim_emb, -1))
loss += max_sim_emb * self.C_emb
# average the loss over the batch and add regularization losses
loss = (tf.reduce_mean(loss) + tf.losses.get_regularization_loss())
return loss
# training helpers:
def _create_batch_b(self, batch_pos_b: np.ndarray,
intent_ids: np.ndarray) -> np.ndarray:
"""Create batch of intents.
Where the first is correct intent
and the rest are wrong intents sampled randomly
"""
batch_pos_b = batch_pos_b[:, np.newaxis, :]
# sample negatives
batch_neg_b = np.zeros((batch_pos_b.shape[0], self.num_neg,
batch_pos_b.shape[-1]))
for b in range(batch_pos_b.shape[0]):
# create negative indexes out of possible ones
# except for correct index of b
negative_indexes = [i for i in
range(self.encoded_all_intents.shape[0])
if i != intent_ids[b]]
negs = np.random.choice(negative_indexes, size=self.num_neg)
batch_neg_b[b] = self.encoded_all_intents[negs]
return
|
np.concatenate([batch_pos_b, batch_neg_b], 1)
|
numpy.concatenate
|
import numpy as np
import nibabel as nib
from nilearn import image
from nilearn import datasets
from nilearn import masking
from nibabel import processing
from sklearn.model_selection import train_test_split
from ATLAS import ATLAS
import matplotlib.pyplot as plt
class DATA():
def __init__(self):
self.Fetch_OASIS()
self.ROIs_3D_gm = None
self.ROIs_3D_wm = None
def Fetch_OASIS(self,balanced=1):
dataset_files = datasets.fetch_oasis_vbm()
## Load datasets
age = dataset_files.ext_vars['age'].astype(float)
age =
|
np.array(age)
|
numpy.array
|
# # Short Assignment 2: Image Restoration
# ## SCC0251.2020.1 - Image Processing
# ### Prof. Dr. <NAME>
# ### 10284952 - <NAME>
# https://github.com/vitorgt/SCC0251
# Imports
import numpy as np
import imageio
# import matplotlib.pyplot as plt
r = imageio.imread(str(input()).rstrip()).astype(np.uint8)
k = int(input())
sigma = float(input())
gamma = float(input())
maxr = np.max(r)
# Normalize function
def scale(image, c=0, d=255):
a = np.min(image)
b = np.max(image)
return (image-a)*((d-c)/(b-a))+c
# Given function for gaussian filter
def gaussian_filter(k=3, sigma=1.0):
arx = np.arange((-k//2) + 1.0, (k//2) + 1.0)
x, y = np.meshgrid(arx, arx)
f = np.exp(-(1/2) * (np.square(x) + np.square(y))/np.square(sigma))
return f/np.sum(f)
# Function to apply filters on Fourier domain to images
def fft_filter(img, flt):
# padding
pad = (img.shape[0]//2)-flt.shape[0]//2
fltpad =
|
np.pad(flt, (pad, pad-1), "constant", constant_values=0)
|
numpy.pad
|
# encoding: utf-8
# Author: <NAME>
# Created: 2021/11/18
# from .pc_utils import *
from .PCTransforms import PointCloudNormalizer, PointCloudSubsampler, ShiftPoints
from .crop_pc import crop_pc_3d, crop_pc_2d, crop_pc_2d_index
from .pc_io import save_pc_to_ply, load_pc, load_las_as_numpy
if __name__ == '__main__':
# test module
import numpy as np
import torch
# test load pc
folder1 = "/scratch2/bingxin/IPA/Data/ZUR1/Point_Clouds/"
# folder2 = "/scratch2/bingxin/IPA/Data/ZUR1/Point_Clouds_npy/"
# for file in os.listdir(folder1):
# full_path = os.path.join(folder1, file)
# points = load_pc(full_path)
# test crop pc
mesh_path = "/scratch2/bingxin/IPA/Data/ZUR1/Ground_Truth_3D/merged_dach_wand_terrain.obj"
mesh = o3d.io.read_triangle_mesh(mesh_path)
pcd = mesh.sample_points_uniformly(number_of_points=10000000)
pts = np.asarray(pcd.points)
p1 =
|
np.array([463328., 5248140.])
|
numpy.array
|
"""Calculate graph edge bearings."""
import warnings
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
from . import projection
# scipy is an optional dependency for entropy calculation
try:
import scipy
except ImportError: # pragma: no cover
scipy = None
def calculate_bearing(lat1, lng1, lat2, lng2):
"""
Calculate the compass bearing(s) between pairs of lat-lng points.
Vectorized function to calculate (initial) bearings between two points'
coordinates or between arrays of points' coordinates. Expects coordinates
in decimal degrees. Bearing represents angle in degrees (clockwise)
between north and the geodesic line from point 1 to point 2.
Parameters
----------
lat1 : float or numpy.array of float
first point's latitude coordinate
lng1 : float or numpy.array of float
first point's longitude coordinate
lat2 : float or numpy.array of float
second point's latitude coordinate
lng2 : float or numpy.array of float
second point's longitude coordinate
Returns
-------
bearing : float or numpy.array of float
the bearing(s) in decimal degrees
"""
# get the latitudes and the difference in longitudes, in radians
lat1 = np.radians(lat1)
lat2 = np.radians(lat2)
d_lng = np.radians(lng2 - lng1)
# calculate initial bearing from -180 degrees to +180 degrees
y = np.sin(d_lng) * np.cos(lat2)
x = np.cos(lat1) * np.sin(lat2) - np.sin(lat1) * np.cos(lat2) * np.cos(d_lng)
initial_bearing = np.degrees(np.arctan2(y, x))
# normalize to 0-360 degrees to get compass bearing
return initial_bearing % 360
def get_bearing(origin_point, destination_point):
"""
Do not use, deprecated.
Parameters
----------
origin_point : tuple
deprecated, do not use
destination_point : tuple
deprecated, do not use
Returns
-------
bearing : float
deprecated, do not use
"""
msg = (
"The `get_bearing` function has been deprecated and will be removed in a "
"future release. Use the vectorized `bearing.calculate_bearing` instead."
)
warnings.warn(msg)
return calculate_bearing(
origin_point[0], origin_point[1], destination_point[0], destination_point[1]
)
def add_edge_bearings(G, precision=1):
"""
Add compass `bearing` attributes to all graph edges.
Vectorized function to calculate (initial) bearing from origin node to
destination node for each edge in a directed, unprojected graph then add
these bearings as new edge attributes. Bearing represents angle in degrees
(clockwise) between north and the geodesic line from from the origin node
to the destination node. Ignores self-loop edges as their bearings are
undefined.
Parameters
----------
G : networkx.MultiDiGraph
unprojected graph
precision : int
decimal precision to round bearing
Returns
-------
G : networkx.MultiDiGraph
graph with edge bearing attributes
"""
if projection.is_projected(G.graph["crs"]): # pragma: no cover
raise ValueError("graph must be unprojected to add edge bearings")
# extract edge IDs and corresponding coordinates from their nodes
uvk = [(u, v, k) for u, v, k in G.edges if u != v]
x = G.nodes(data="x")
y = G.nodes(data="y")
coords = np.array([(y[u], x[u], y[v], x[v]) for u, v, k in uvk])
# calculate bearings then set as edge attributes
bearings = calculate_bearing(coords[:, 0], coords[:, 1], coords[:, 2], coords[:, 3])
values = zip(uvk, bearings.round(precision))
nx.set_edge_attributes(G, dict(values), name="bearing")
return G
def orientation_entropy(Gu, num_bins=36, min_length=0, weight=None):
"""
Calculate undirected graph's orientation entropy.
Orientation entropy is the entropy of its edges' bidirectional bearings
across evenly spaced bins. Ignores self-loop edges as their bearings are
undefined.
Parameters
----------
Gu : networkx.MultiGraph
undirected, unprojected graph with `bearing` attributes on each edge
num_bins : int
number of bins; for example, if `num_bins=36` is provided, then each
bin will represent 10° around the compass
min_length : float
ignore edges with `length` attributes less than `min_length`; useful
to ignore the noise of many very short edges
weight : string
if not None, weight edges' bearings by this (non-null) edge attribute.
for example, if "length" is provided, this will return 1 bearing
observation per meter per street, which could result in a very large
`bearings` array.
Returns
-------
entropy : float
the graph's orientation entropy
"""
# check if we were able to import scipy
if scipy is None: # pragma: no cover
raise ImportError("scipy must be installed to calculate entropy")
bin_counts, _ = _bearings_distribution(Gu, num_bins, min_length, weight)
return scipy.stats.entropy(bin_counts)
def _extract_edge_bearings(Gu, min_length=0, weight=None):
"""
Extract undirected graph's bidirectional edge bearings.
For example, if an edge has a bearing of 90° then we will record bearings
of both 90° and 270° for this edge.
Parameters
----------
Gu : networkx.MultiGraph
undirected, unprojected graph with `bearing` attributes on each edge
min_length : float
ignore edges with `length` attributes less than `min_length`; useful
to ignore the noise of many very short edges
weight : string
if not None, weight edges' bearings by this (non-null) edge attribute.
for example, if "length" is provided, this will return 1 bearing
observation per meter per street, which could result in a very large
`bearings` array.
Returns
-------
bearings : numpy.array
the graph's bidirectional edge bearings
"""
if nx.is_directed(Gu) or projection.is_projected(Gu.graph["crs"]): # pragma: no cover
raise ValueError("graph must be undirected and unprojected to analyze edge bearings")
bearings = list()
for u, v, data in Gu.edges(data=True):
# ignore self-loops and any edges below min_length
if u != v and data["length"] >= min_length:
if weight:
# weight edges' bearings by some edge attribute value
bearings.extend([data["bearing"]] * int(data[weight]))
else:
# don't weight bearings, just take one value per edge
bearings.append(data["bearing"])
# drop any nulls, calculate reverse bearings, concatenate and return
bearings = np.array(bearings)
bearings = bearings[~np.isnan(bearings)]
bearings_r = (bearings - 180) % 360
return
|
np.concatenate([bearings, bearings_r])
|
numpy.concatenate
|
import numpy as np
import pandas as pd
import warnings
from scipy.stats import expon, uniform
from scipy.optimize import root_scalar
try:
from scipy.integrate import solve_ivp
except ImportError:
print("Warning, solve_ivp could not be imported. Use f_is_stepwise_constant = True")
def solve_ivp(*args, **kwargs):
raise NotImplementedError
def interpolate_F_inverse(tau, t_grid, F_grid):
if F_grid[-1] < tau:
return np.inf
## Left bound
# Largest i such that F_i <= tau
i = np.max(np.nonzero(F_grid <= tau))
# Smallest j such that F_j == F_i
if tau == F_grid[i]:
j = np.min(np.nonzero(F_grid == tau))
return t_grid[j]
## Right bound
k = i + 1
return np.interp(tau, [F_grid[i], F_grid[k]], [t_grid[i], t_grid[k]])
def F_sister_bias(t, N, beta, tG):
output = 0
output += (t // tG) * (1 - beta) * tG
remainder = np.mod(t, tG)
if remainder < beta * tG:
output += remainder * (N - 1) * (1 - beta) / N
else:
output += (N - 1) / N * (1 - beta) * beta * tG
output += (remainder - beta * tG) * \
(((N - 1) * (1 - beta) + 1) / N)
return output
def inverse_F_sister_bias(tau, N, beta, tG):
output = 0
output += (tau // ((1 - beta) * tG)) * tG
remainder = np.mod(tau, (1 - beta) * tG)
if remainder < (N - 1) / N * (1 - beta) * beta * tG:
output += remainder / ((N - 1) / N * (1 - beta))
else:
output += beta * tG
output += (remainder - (N - 1) / N * (1 - beta) * beta * tG) / \
(((N - 1) * (1 - beta) + 1) / N)
return output
inverse_F_sister_bias = np.vectorize(inverse_F_sister_bias)
def average_f_g1_proportion(avg_tG1_fun, gamma_fun, tG, beta, Tdeath, c):
## Compute gamma (cell cycle model parameters)
gamma = gamma_fun(beta, tG)
# Define functions
def eta_fun(f, tG, beta, Tdeath, c):
if c * beta * tG * f == 0:
output = np.inf
else:
output = Tdeath / (c * beta * tG * f)
return output
def g_fun(f, gamma, tG, beta, Tdeath, c):
output = f - (1 - beta) * tG / \
(avg_tG1_fun(eta_fun(f, tG, beta, Tdeath, c), gamma)
+ (1 - beta) * tG )
return output
# Solution always lies between 0 and 1
bracket = [0, 1]
# Initialise output
output = []
sol = root_scalar(g_fun,
args=(gamma, tG, beta, Tdeath, c),
bracket=bracket,
x0 = 1 - beta)
if sol.converged:
return sol.root
else:
return np.nan
average_f_g1_proportion = np.vectorize(average_f_g1_proportion)
def exponential_ccm(random_state=None, clone=None, tG1_param=50):
return expon.rvs(scale=tG1_param, random_state=random_state)
def exponential_ccm_heterotypic(random_state=None, clone=None, tG1_param_clone_0=50, tG1_param_clone_1=50):
assert clone == 0 or clone == 1
if clone == 0:
return expon.rvs(scale=tG1_param_clone_0, random_state=random_state)
elif clone == 1:
return expon.rvs(scale=tG1_param_clone_1, random_state=random_state)
else:
raise Exception('This should not be reached. Something has gone horribly wrong.')
def exponential_cdf(t, tG1_param=50):
return 1 - np.exp(- t / tG1_param)
def uniform_ccm(random_state=None, clone=None, tG1_param=50, r=20):
assert 0.5 * r / tG1_param <= 1
return uniform.rvs(loc=tG1_param - 0.5 * r, scale=r, random_state=random_state)
def uniform_ccm_heterotypic(random_state=None, clone=None, tG1_param_clone_0=50,
r_clone_0=20, tG1_param_clone_1=50, r_clone_1=20):
assert 0.5 * r_clone_0 / tG1_param_clone_0 <= 1
assert 0.5 * r_clone_1 / tG1_param_clone_1 <= 1
assert clone == 0 or clone == 1
if clone == 0:
return uniform.rvs(loc=tG1_param_clone_0 - 0.5 * r_clone_0,
scale=r_clone_0, random_state=random_state)
elif clone == 1:
return uniform.rvs(loc=tG1_param_clone_1 - 0.5 * r_clone_1,
scale=r_clone_1, random_state=random_state)
else:
raise Exception('This should not be reached. Something has gone horribly wrong.')
def uniform_cdf(t, tG1_param=50, r=20):
assert 0.5 * r / tG1_param <= 1
if t < tG1_param - 0.5 * r:
return 0
if tG1_param + 0.5 * r < t:
return 1
return (t - (tG1_param - 0.5 * r)) / r
def base_rate_death_signal(t, tau, tbirth, tG1, clone, isinG1, base_rate=1):
return base_rate * np.ones(tau.shape)
def base_rate_death_signal_heterotypic(t, tau, tbirth, tG1, clone, isinG1,
base_rate_clone_0=1, base_rate_clone_1=1):
assert np.all(np.logical_or(clone == 0, clone == 1))
return base_rate_clone_0 * (clone == 0) + base_rate_clone_1 * (clone == 1)
def normalised_g2_death_signal(t, tau, tbirth, tG1, clone, isinG1, coef=1):
# All cells in G1
if np.all(isinG1):
return np.zeros(tau.shape)
# All cells in G2
if np.all(np.logical_not(isinG1)):
return coef * np.ones(tau.shape)
# Neither of these scenarios
return coef * np.sum(np.logical_not(isinG1)) / (tau.size - 1) * np.ones(tau.shape)
def normalised_g2_death_signal_heterotypic(t, tau, tbirth, tG1, clone, isinG1,
coef_clone_0=1, coef_clone_1=1):
assert np.all(np.logical_or(clone == 0, clone == 1))
coef = coef_clone_0 * (clone == 0) + coef_clone_1 * (clone == 1)
# All cells in G1
if np.all(isinG1):
return np.zeros(tau.shape)
# All cells in G2
if np.all(np.logical_not(isinG1)):
return coef * np.ones(tau.shape)
# Neither of these scenarios
return coef * np.sum(np.logical_not(isinG1)) / (tau.size - 1) * np.ones(tau.shape)
def g2_death_signal(t, tau, tbirth, tG1, clone, isinG1, coef=1):
return coef * np.sum(np.logical_not(isinG1)) * np.ones(tau.shape)
def g2_death_signal_heterotypic(t, tau, tbirth, tG1, clone, isinG1,
coef_clone_0=1, coef_clone_1=1):
assert np.all(np.logical_or(clone == 0, clone == 1))
coef = coef_clone_0 * (clone == 0) + coef_clone_1 * (clone == 1)
return coef * np.sum(np.logical_not(isinG1)) * np.ones(tau.shape)
class WellMixedSimulator(object):
def __init__(self, f=base_rate_death_signal, ccm=exponential_ccm,
Tdeath=100, tG2=50, tstart=0, tend=500,
f_args=(),
ccm_args=(),
max_cell_count=np.inf,
min_cell_count=0,
f_is_stepwise_constant=True,
min_cell_count_for_clone={},
max_cell_count_for_clone={},
apoptosis_at_checkpoint=False,
switch_apoptosis_time=None,
):
# Some assertions
assert callable(f)
assert callable(ccm)
if not callable(Tdeath):
assert Tdeath >= 0
Tdeath = lambda clone, Tdeath=Tdeath: Tdeath * np.ones(clone.shape)
if not callable(tG2):
assert tG2 >= 0
tG2 = lambda clone, tG2=tG2: tG2 * np.ones(clone.shape)
assert tend >= tstart
self.f = f
self.ccm = ccm
self.Tdeath = Tdeath
self.tG2 = tG2
self.tstart = tstart
self.tend = tend
self.f_args = f_args
self.ccm_args = ccm_args
self.min_cell_count = min_cell_count
self.max_cell_count = max_cell_count
self.f_is_stepwise_constant = f_is_stepwise_constant
self.max_cell_count_for_clone = max_cell_count_for_clone
self.min_cell_count_for_clone = min_cell_count_for_clone
self.apoptosis_at_checkpoint = apoptosis_at_checkpoint
self.switch_apoptosis_time = switch_apoptosis_time
self.switched = False
# Save parameters in dict for output
self.param = {
'f' : f,
'ccm' : ccm,
'Tdeath' : Tdeath,
'tG2' : tG2,
'tstart' : tstart,
'tend' : tend,
'f_args' : f_args,
'ccm_args' : ccm_args,
'min_cell_count' : min_cell_count,
'max_cell_count' : max_cell_count,
'f_is_stepwise_constant' : f_is_stepwise_constant,
'min_cell_count_for_clone' : min_cell_count_for_clone,
'max_cell_count_for_clone' : max_cell_count_for_clone,
'apoptosis_at_checkpoint' : apoptosis_at_checkpoint,
'switch_apoptosis_time' : switch_apoptosis_time,
}
# Define division, transition, and death events
if not self.f_is_stepwise_constant:
if self.apoptosis_at_checkpoint:
raise NotImplementedError
if self.switch_apoptosis_time:
raise NotImplementedError
division_event = lambda t, tau, tbirth, tG1, clone, isinG1, *args: \
np.amax(t - tbirth - tG1 - self.tG2(clone))
division_event.terminal = True
division_event.direction = 1
def transition_event(t, tau, tbirth, tG1, clone, isinG1, *args):
if np.any(isinG1):
return np.amax(t - tbirth[isinG1] - tG1[isinG1])
else:
return -1
transition_event.terminal = True
transition_event.direction = 1
def death_event(t, tau, tbirth, tG1, clone, isinG1, *args):
if np.any(isinG1):
return np.amax(tau[isinG1] - self.Tdeath(clone)[isinG1])
else:
return -1
death_event.terminal = True
death_event.direction = 1
self.events = [ division_event, transition_event, death_event ]
def run(self,
tau_0=np.zeros(4),
tbirth_0=np.random.rand(4) * -100,
tG1_0=expon.rvs(scale=50, size=4) + 50,
clone_0=np.arange(4),
seed=None,
):
"""
Runs Monte Carlo Death Clock simulation with given initial conditions
and returns dictionary with following items:
(t1, t2, ..., tN are the time points of the N division/transition/death events.)
t_events: [ tstart, t1, t2, ..., tN, tend ]
t_grid: [ np.array(time points for ODE solution) per time interval ]
tau: [ num-cells-by-num-timepoints np.array(tau) per time interval ]
cell_indices: [ np.array(indices of cells) per time interval ]
isinG1: [ np.array(True if in G1, else False) per time interval ]
division: [ np.array(indices of cells undergoing division) per event ]
transition: [ np.array(indices of cells undergoing transition) per event ]
death: [ np.array(indices of cells undergoing death) per event ]
tbirth: np.array(birth times per cell)
tG1: np.array(G1 durations per cell)
clone: np.array(clones per cell)
status: 0 means end of simulation time reached
1 means zero cell count reached
2 means max cell count reached
3 means min cell count reached
4 means clone-specific max cell count reached (see status_info for clone)
5 means clone-specific min cell count reached (see status_info for clone)
status_info: status = 0: None
status = 1: None
status = 2: None
status = 3: None
status = 4: clone whose max cell count was reached
status = 5: clone whose min cell count was reached
param: dictionary containing parameters
f: death clock signal function
ccm: cell cycle model function
Tdeath: death threshold function
tG2: G2 duration function
tstart: start time
tend: end time
f_args: additional args to f
ccm_args: additional args to ccm
min_cell_count: self-explanatory
max_cell_count: self-explanatory
f_is_stepwise_constant: whether f is stepwise constant between events
min_cell_count_for_clone: dict containing clone-specific minimum cell count
max_cell_count_for_clone: dict containing clone-specific maximum cell count
init_cond: dictionary containing initial conditions
tau_0: tau
tbirth_0: birth times
tG1_0: G1 durations
clone_0: clones
seed: seed for random number generator
"""
self.tau_0 = np.array(tau_0, dtype=float)
self.tbirth_0 = np.array(tbirth_0, dtype=float)
self.tG1_0 = np.array(tG1_0, dtype=float)
self.clone_0 = np.array(clone_0, dtype=int)
self.check_initial_conditions()
# Create random state if seed is not None
if not seed is None:
self.random_state = np.random.RandomState(seed)
else:
self.random_state = None
# Save initial conditions in dict for output
self.init_cond = {
'tau_0' : np.array(tau_0, dtype=float),
'tbirth_0' : np.array(tbirth_0, dtype=float),
'tG1_0' : np.array(tG1_0, dtype=float),
'clone_0' : np.array(clone_0, dtype=int),
'seed' : seed,
}
# Index cells
self.cell_indices_now = np.arange(len(self.tbirth_0))
self.last_cell_index = self.cell_indices_now[-1]
# Initialise simulation time
self.t_now = self.tstart
# Initialise state variables
self.tau_now = np.array(self.tau_0)
self.tbirth_now = np.array(self.tbirth_0)
self.tG1_now = np.array(self.tG1_0)
self.clone_now = np.array(self.clone_0)
# Helper state variable
self.isinG1_now = self.t_now - self.tbirth_now < self.tG1_now
# Initialise output data
self.t_events_data = [self.t_now]
self.t_grid_data = []
self.tau_data = []
self.division_data = []
self.transition_data = []
self.death_data = []
self.cell_indices_data = []
self.isinG1_data = []
self.tbirth_data = np.array(self.tbirth_now)
self.tG1_data = np.array(self.tG1_now)
self.clone_data = np.array(self.clone_now)
# Initialise status and status_info
self.status = None
self.status_info = None
# Simulation loop
while True:
t, tau, event_occurred = self.solve_until_next_event()
# Switch apoptosis mode if required
if self.switch_apoptosis_time:
if t[-1] >= self.switch_apoptosis_time and not self.switched:
self.apoptosis_at_checkpoint = not self.apoptosis_at_checkpoint
self.switched = True
# Save data
self.t_events_data.append(t[-1])
self.t_grid_data.append(t)
self.tau_data.append(tau)
self.cell_indices_data.append(np.array(self.cell_indices_now))
self.isinG1_data.append(np.array(self.isinG1_now))
# If division, transition or death event occurred
if event_occurred:
# Update tau and t
self.tau_now = np.array(self.tau_data[-1][:,-1])
self.t_now = self.t_events_data[-1]
# If event happened or past end simulation time, break loop and
# do NOT record event. In other words, discrete events
# happening at exactly tend will not be recorded.
if self.t_now >= self.tend:
break
self.do_cell_transitions_divisions_deaths()
# If there are no cells remaining, tack on last data item and
# break
if len(self.tau_now) == 0:
self.t_events_data.append(self.tend)
self.t_grid_data.append(np.array([self.t_now, self.tend]))
self.tau_data.append(np.array([]))
self.cell_indices_data.append(np.array([]))
self.isinG1_data.append(np.array([]))
self.status = 1
break
# If the maximum number of cells is hit, tack on last data item
# and break
if len(self.tau_now) >= self.max_cell_count:
self.t_events_data.append(self.tend)
self.t_grid_data.append(np.array([self.t_now, self.tend]))
self.tau_data.append(np.array([self.tau_now, self.tau_now]).T)
self.cell_indices_data.append(np.array(self.cell_indices_now))
self.isinG1_data.append(np.array(self.isinG1_now))
self.status = 2
break
# If the minimum number of cells is hit, tack on last data item
# and break
if len(self.tau_now) <= self.min_cell_count:
self.t_events_data.append(self.tend)
self.t_grid_data.append(np.array([self.t_now, self.tend]))
self.tau_data.append(np.array([self.tau_now, self.tau_now]).T)
self.cell_indices_data.append(np.array(self.cell_indices_now))
self.isinG1_data.append(np.array(self.isinG1_now))
self.status = 3
break
# If the maximum number of cells for a clone is hit, tack on
# last data item and break
for clone, max_cell_count in self.max_cell_count_for_clone.items():
if np.sum(self.clone_now == clone) >= max_cell_count:
self.t_events_data.append(self.tend)
self.t_grid_data.append(np.array([self.t_now, self.tend]))
self.tau_data.append(np.array([self.tau_now, self.tau_now]).T)
self.cell_indices_data.append(np.array(self.cell_indices_now))
self.isinG1_data.append(np.array(self.isinG1_now))
self.status = 4
self.status_info = clone
break
# Break out of while loop
if self.status == 4:
break
# If the minimum number of cells for a clone is hit, tack on
# last data item and break
for clone, min_cell_count in self.min_cell_count_for_clone.items():
if np.sum(self.clone_now == clone) <= min_cell_count:
self.t_events_data.append(self.tend)
self.t_grid_data.append(np.array([self.t_now, self.tend]))
self.tau_data.append(np.array([self.tau_now, self.tau_now]).T)
self.cell_indices_data.append(np.array(self.cell_indices_now))
self.isinG1_data.append(np.array(self.isinG1_now))
self.status = 5
self.status_info = clone
break
# Break out of while loop
if self.status == 5:
break
# Else simulation has terminated
else:
self.status = 0
break
return {
't_events' : self.t_events_data,
't_grid' : self.t_grid_data,
'tau' : self.tau_data,
'cell_indices' : self.cell_indices_data,
'isinG1' : self.isinG1_data,
'division' : self.division_data,
'transition' : self.transition_data,
'death' : self.death_data,
'tbirth' : self.tbirth_data,
'tG1' : self.tG1_data,
'clone' : self.clone_data,
'status' : self.status,
'status_info' : self.status_info,
'param' : self.param,
'init_cond' : self.init_cond,
}
def sample_g1_duration(self, clone):
return self.ccm(self.random_state, clone, *self.ccm_args)
def check_initial_conditions(self):
if not len(self.tau_0) == len(self.tbirth_0) == len(self.tG1_0) == len(self.clone_0):
raise ValueError("tau_0, tbirth_0, tG1_0, and clone_0 must have the same length")
if not len(self.tau_0) < self.max_cell_count:
raise ValueError("The initial cell count ({}) must be smaller than "
"the maximum cell count ({})".format(len(self.tau_0), self.max_cell_count))
if not len(self.tau_0) > 0:
raise ValueError("The initial cell count ({}) must be larger than "
"0".format(len(self.tau_0)))
if not len(self.tau_0) > self.min_cell_count:
raise ValueError("The initial cell count ({}) must be larger than "
"the minimum cell count ({})".format(len(self.tau_0), self.min_cell_count))
for clone, min_cell_count in self.min_cell_count_for_clone.items():
if not np.sum(self.clone_0 == clone) > min_cell_count:
raise ValueError("The initial cell count of clone {0} ({1}) must "
"be larger than the minimum cell count for clone {0} ({2})".format(
clone, np.sum(self.clone_0 == clone), min_cell_count))
for clone, max_cell_count in self.max_cell_count_for_clone.items():
if not np.sum(self.clone_0 == clone) < max_cell_count:
raise ValueError("The initial cell count of clone {0} ({1}) must "
"be smaller than the maximum cell count for clone {0} ({2})".format(
clone, np.sum(self.clone_0 == clone), max_cell_count))
if not np.all(self.tbirth_0 <= self.tstart):
raise ValueError("Initial birth times must be at or before start of simulation")
if not self.apoptosis_at_checkpoint:
if not np.all(np.logical_or(self.tau_0 < self.Tdeath(self.clone_0),
np.logical_and(self.tau_0 >= self.Tdeath(self.clone_0), self.tstart
- self.tbirth_0 >= self.tG1_0))):
raise ValueError("Death invariant is violated in initial conditions")
if not np.all(self.tstart - self.tbirth_0 < self.tG1_0 + self.tG2(self.clone_0)):
raise ValueError("Birth invariant is violated in initial conditions")
def do_cell_divisions(self):
division_indices = np.nonzero(np.isclose(self.t_now - self.tbirth_now, self.tG1_now + self.tG2(self.clone_now)))
self.division_data.append(self.cell_indices_now[division_indices])
for division_index in division_indices[0]:
# Reset death clock of daughter cells
self.tau_now[division_index] = 0
self.tau_now = np.append(self.tau_now, 0)
# Set time of birth on daughter cells
self.tbirth_now[division_index] = self.t_now
self.tbirth_now = np.append(self.tbirth_now, self.t_now)
# Draw random G1 duration for daughter cells
self.tG1_now[division_index] = self.sample_g1_duration(self.clone_now[division_index])
self.tG1_now = np.append(self.tG1_now, self.sample_g1_duration(self.clone_now[division_index]))
# Set clone of new daughter cell
self.clone_now = np.append(self.clone_now, self.clone_now[division_index])
# Both cell starts in G1
self.isinG1_now[division_index] = True
self.isinG1_now = np.append(self.isinG1_now, True)
# Generate new indices for cells
self.cell_indices_now[division_index] = self.last_cell_index + 1
self.cell_indices_now = np.append(self.cell_indices_now, self.last_cell_index + 2)
# Save static data
self.tbirth_data = np.append(self.tbirth_data, [self.t_now, self.t_now])
self.tG1_data = np.append(self.tG1_data,
[ self.tG1_now[division_index], self.tG1_now[-1] ])
self.clone_data = np.append(self.clone_data,
[ self.clone_now[-1], self.clone_now[-1] ])
# Update last cell index
self.last_cell_index += 2
def do_cell_transitions(self):
if self.apoptosis_at_checkpoint:
transition_indices = np.nonzero(
np.logical_and(np.isclose(self.t_now - self.tbirth_now, self.tG1_now),
self.tau_now < self.Tdeath(self.clone_now)))
else:
transition_indices = np.nonzero(np.isclose(self.t_now - self.tbirth_now, self.tG1_now))
self.transition_data.append(self.cell_indices_now[transition_indices])
self.isinG1_now[transition_indices] = False
def do_cell_deaths(self):
if self.apoptosis_at_checkpoint:
death_indices = np.nonzero(
np.logical_and(np.isclose(self.t_now - self.tbirth_now, self.tG1_now),
self.tau_now >= self.Tdeath(self.clone_now)))
else:
death_indices = np.nonzero(
np.logical_and(
np.isclose(self.tau_now, self.Tdeath(self.clone_now)),
self.isinG1_now))
self.death_data.append(self.cell_indices_now[death_indices])
# Traverse from last index to first, else indexing is incorrect
for death_index in death_indices[0][::-1]:
# Remove dead cell
self.tau_now = np.delete(self.tau_now, death_index)
self.tbirth_now = np.delete(self.tbirth_now, death_index)
self.tG1_now = np.delete(self.tG1_now, death_index)
self.clone_now = np.delete(self.clone_now, death_index)
self.isinG1_now = np.delete(self.isinG1_now, death_index)
self.cell_indices_now = np.delete(self.cell_indices_now, death_index)
def do_cell_transitions_divisions_deaths(self):
self.do_cell_transitions()
self.do_cell_divisions()
self.do_cell_deaths()
def solve_until_next_event_for_constant_f(self):
# Compute constant f
f = self.f(self.t_now, self.tau_now, self.tbirth_now, self.tG1_now,
self.clone_now, self.isinG1_now, *self.f_args)
# Find time until next event
time_until_next_division = np.min(self.tbirth_now + self.tG1_now + self.tG2(self.clone_now) - self.t_now)
if np.any(self.isinG1_now):
tbirth_filtered = self.tbirth_now[self.isinG1_now]
tG1_filtered = self.tG1_now[self.isinG1_now]
tau_filtered = self.tau_now[self.isinG1_now]
f_filtered = f[self.isinG1_now]
Tdeath_filtered = self.Tdeath(self.clone_now)[self.isinG1_now]
time_until_next_transition = np.min(tbirth_filtered + tG1_filtered - self.t_now)
with np.errstate(divide='ignore'):
time_until_next_death = np.min((Tdeath_filtered - tau_filtered) / f_filtered)
else:
time_until_next_transition = np.inf
time_until_next_death = np.inf
# If apoptosis at checkpoint, then death events are at the same time as
# transition events
if self.apoptosis_at_checkpoint:
time_until_next_death = np.inf
time_until_next_event = \
np.min([time_until_next_division, time_until_next_transition, time_until_next_death])
assert time_until_next_event >= 0
# Event occurred if the time of the next event is before termination
event_occurred = self.t_now + time_until_next_event < self.tend
# Get timestep
if event_occurred:
timestep = time_until_next_event
else:
timestep = self.tend - self.t_now
# Compute and return results
output_t = np.array([self.t_now, self.t_now + timestep])
output_tau = np.array([self.tau_now, self.tau_now + f * timestep]).T
return output_t, output_tau, event_occurred
def solve_until_next_event_for_nonconstant_f(self):
# Solve ODE
output = solve_ivp(self.f, [self.t_now, self.tend], self.tau_now,
args=(self.tbirth_now, self.tG1_now, self.clone_now, self.isinG1_now) + self.f_args,
events=self.events)
if not output.success:
raise Exception('An error occured in scipy.integrate.solve_ivp: "{}"'.format(
output.message))
if output.status == 1:
# Sanity check: only one event should have happened
assert np.sum([len(events) for events in output.y_events]) == 1
return output.t, output.y, output.status == 1
def solve_until_next_event(self):
if self.f_is_stepwise_constant:
return self.solve_until_next_event_for_constant_f()
else:
return self.solve_until_next_event_for_nonconstant_f()
class WellMixedSimulationData(object):
def __init__(self, data):
# Store raw data
self.data = data
# Initialise all data members to None for lazy initialisation
# global data
self.status = None
self.status_info = None
self.unique_clones = None
self.num_divisions = None
self.num_transitions = None
self.num_deaths = None
self.total_cell_count = None # sum of initial cells and cells born during simulation
self.num_divisions_for_clone = {}
self.num_transitions_for_clone = {}
self.num_deaths_for_clone = {}
self.f = None
# timeseries data (per time interval)
self.t_events = None
self.cell_count = None
self.clone_cell_count = None
self.G1_cell_count = None
self.G2_cell_count = None
self.timeseries_df = None
self.G1_cell_count_for_clone = {}
self.G2_cell_count_for_clone = {}
self.cell_count_for_clone = {}
# fine timeseries data (per ODE time point)
self.t_grid = None
self.tau = {}
# cellwise data
self.tbirth = None # Time of birth
self.tG1 = None # G1 duration
self.clone = None # clone
self.died = None # whether cell died during simulation
self.tdeath = None # time of death, inf if not died
self.divided = None # whether cell divided during simulation
self.tdivision = None # time of division, inf if not divided
self.transitioned = None # whether cell transitioned to G2
self.ttransition = None # time of transition to G2, inf if not transitioned
self.t_last_alive = None # time that cell was last alive
# (until it divided, died or simulation terminated)
self.max_age = None # t_last_alive - tbirth
self.time_in_G1 = None # time spent in G1 (from birth until
# transition/death/termination)
self.time_in_G2 = None # time spent in G2 (from transition until
# termination/divions, 0 if not transitioned)
self.last_tau = None # Last death clock value
self.average_f = None # Average death clock signal: last death
# clock value divided by time spent in G1
self.effective_time_in_G1 = None # Effective time spent in G1 (from birth until
# transition/death/termination) until first
# cell that did neither die nor transition
self.cellwise_df = None
def __str__(self):
if self.get_status() == 0:
status_str = 'End of simulation reached'
elif self.get_status() == 1:
status_str = 'Extinction reached'
elif self.get_status() == 2:
status_str = 'Maximum cell count reached'
elif self.get_status() == 3:
status_str = 'Minimum cell count reached'
else:
raise Exception('Never reached')
unique_clones = self.get_unique_clones()
num_divisions = self.get_num_divisions()
num_transitions = self.get_num_transitions()
num_deaths = self.get_num_deaths()
total_cell_count = self.get_total_cell_count()
timeseries_df = self.get_timeseries_df()
timeseries_df_all = timeseries_df.describe(include='all')
cellwise_df = self.get_cellwise_df()
with warnings.catch_warnings():
# cellwise_df may contain 'inf' values (e.g. for tdeath) This
# creates warnings when computing summary statistics, which we
# suppressed by this construction
warnings.simplefilter('ignore')
cellwise_df_all = cellwise_df.describe(include='all')
return """
Global data
-----------
status:\t{}
unique_clones:\t\t{}
num_divisions:\t\t{}
num_transitions:\t{}
num_deaths:\t\t{}
total_cell_count:\t{}
Timeseries data
---------------
{}
{}
Cellwise data
-------------
{}
{}
""".format(status_str,
unique_clones,
num_divisions,
num_transitions,
num_deaths,
total_cell_count,
timeseries_df,
timeseries_df_all,
cellwise_df,
cellwise_df_all
)
def get_unique_clones(self):
if self.unique_clones is None:
self.unique_clones = np.unique(self.data['clone'])
return self.unique_clones
def get_num_divisions(self):
if self.num_divisions is None:
self.num_divisions = np.sum(self.get_divided())
return self.num_divisions
def get_num_divisions_for_clone(self, clone):
if not clone in self.num_divisions_for_clone:
self.num_divisions_for_clone[clone] = np.sum(
np.logical_and(self.get_clone() == clone, self.get_divided()))
return self.num_divisions_for_clone[clone]
def get_num_transitions(self):
# Note that this is not the sum(self.get_transitioned()), because that
# function also counts cells that are in G2 at the start of the
# simulation as transitioned
if self.num_transitions is None:
self.num_transitions = sum(array.size for array in self.data['transition'])
return self.num_transitions
def get_num_transitions_for_clone(self, clone):
if not clone in self.num_transitions_for_clone:
self.num_transitions_for_clone[clone] = sum(
np.sum(self.get_clone()[array] == clone) for array in self.data['transition'])
return self.num_transitions_for_clone[clone]
def get_num_deaths(self):
if self.num_deaths is None:
self.num_deaths = np.sum(self.get_died())
return self.num_deaths
def get_num_deaths_for_clone(self, clone):
if not clone in self.num_deaths_for_clone:
self.num_deaths_for_clone[clone] = np.sum(
np.logical_and(self.get_clone() == clone, self.get_died()))
return self.num_deaths_for_clone[clone]
def get_status(self):
if self.status is None:
self.status = self.data['status']
return self.status
def get_status_info(self):
if self.status_info is None:
self.status_info = self.data['status_info']
return self.status_info
def get_t_events(self):
if self.t_events is None:
self.t_events = np.array(self.data['t_events'])
return self.t_events
def get_cell_count(self):
if self.cell_count is None:
self.cell_count = np.array(
[len(cell_indices) for cell_indices in self.data['cell_indices']])
# Repeat last cell count at termination time
self.cell_count = np.append(self.cell_count, self.cell_count[-1])
return self.cell_count
def get_cell_count_for_clone(self, clone):
if not clone in self.cell_count_for_clone:
cell_count = []
for cell_indices in self.data['cell_indices']:
cell_count.append(np.sum(self.get_clone()[cell_indices] == clone))
# Repeat last cell count at termination time
cell_count.append(cell_count[-1])
# Save as numpy array
self.cell_count_for_clone[clone] = np.array(cell_count)
return self.cell_count_for_clone[clone]
def get_total_cell_count(self):
if self.total_cell_count is None:
self.total_cell_count = len(self.get_tbirth())
return self.total_cell_count
def get_clone_cell_count(self):
if self.clone_cell_count is None:
unique_clones = self.get_unique_clones()
self.clone_cell_count = np.zeros((len(self.get_t_events()), len(unique_clones)))
for i, cell_indices in enumerate(self.data['cell_indices']):
if len(cell_indices) == 0:
continue
clones = self.get_clone()[cell_indices]
unique_clones_now, counts = np.unique(clones, return_counts=True)
for clone, count in zip(unique_clones_now, counts):
idx = np.searchsorted(unique_clones, clone)
self.clone_cell_count[i, idx] = count
# Repeat last cell count at termination time
self.clone_cell_count[-1,:] = self.clone_cell_count[-2, :]
return self.clone_cell_count
def get_G1_cell_count(self):
if self.G1_cell_count is None:
self.G1_cell_count = np.array([np.sum(isinG1)
for isinG1 in self.data['isinG1']])
# Repeat last cell count at termination time
self.G1_cell_count = np.append(self.G1_cell_count, self.G1_cell_count[-1])
return self.G1_cell_count
def get_G1_cell_count_for_clone(self, clone):
if not clone in self.G1_cell_count_for_clone:
G1_cell_count = []
for cell_indices, isinG1 in zip(self.data['cell_indices'], self.data['isinG1']):
G1_cell_count.append(np.sum(np.logical_and(self.get_clone()[cell_indices] == clone,
isinG1)))
# Repeat last cell count at termination time
G1_cell_count.append(G1_cell_count[-1])
# Save as numpy array
self.G1_cell_count_for_clone[clone] = np.array(G1_cell_count)
return self.G1_cell_count_for_clone[clone]
def get_G2_cell_count(self):
if self.G2_cell_count is None:
self.G2_cell_count = np.array([np.sum(np.logical_not(isinG1))
for isinG1 in self.data['isinG1']])
# Repeat last cell count at termination time
self.G2_cell_count = np.append(self.G2_cell_count, self.G2_cell_count[-1])
return self.G2_cell_count
def get_G2_cell_count_for_clone(self, clone):
if not clone in self.G2_cell_count_for_clone:
G2_cell_count = []
for cell_indices, isinG1 in zip(self.data['cell_indices'], self.data['isinG1']):
G2_cell_count.append(np.sum(np.logical_and(self.get_clone()[cell_indices] == clone,
np.logical_not(isinG1))))
# Repeat last cell count at termination time
G2_cell_count.append(G2_cell_count[-1])
# Save as numpy array
self.G2_cell_count_for_clone[clone] = np.array(G2_cell_count)
return self.G2_cell_count_for_clone[clone]
def get_timeseries_df(self):
if self.timeseries_df is None:
timeseries_dict = {
't_events' : self.get_t_events(),
'cell_count' : self.get_cell_count(),
}
for clone, cell_count in \
zip(self.get_unique_clones(), self.get_clone_cell_count().T):
timeseries_dict['clone_{}'.format(clone)] = cell_count
self.timeseries_df = pd.DataFrame(timeseries_dict)
return self.timeseries_df
def get_t_grid(self):
if self.t_grid is None:
self.t_grid = np.concatenate(self.data['t_grid'])
return self.t_grid
def get_tau_for_cell_index(self, cell_index):
if not cell_index in self.tau:
cur_tau = np.array([], dtype=float)
last_G1_tau = 0
# Loop over time intervals
for t_grid, tau, cell_indices, isinG1 in \
zip(self.data['t_grid'], self.data['tau'],
self.data['cell_indices'], self.data['isinG1']):
# Cell is alive in this time interval
if cell_index in cell_indices:
matches = np.nonzero(cell_indices == cell_index)[0]
assert len(matches) == 1
local_idx = matches[0]
if isinG1[local_idx]:
cur_tau = np.append(cur_tau, tau[local_idx])
last_G1_tau = cur_tau[-1]
else:
cur_tau = np.append(cur_tau, t_grid * 0 + last_G1_tau)
else:
cur_tau = np.append(cur_tau, t_grid * 0)
self.tau[cell_index] = cur_tau
return self.tau[cell_index]
def get_last_tau(self):
if self.last_tau is None:
self.last_tau = np.zeros(self.get_total_cell_count(), dtype=float)
# Loop over time intervals
for t_grid, tau, cell_indices, isinG1 in \
zip(self.data['t_grid'], self.data['tau'],
self.data['cell_indices'], self.data['isinG1']):
cur_last_taus = tau[:, -1]
self.last_tau[cell_indices[isinG1]] = cur_last_taus[isinG1]
return self.last_tau
def get_average_f(self):
if self.average_f is None:
# If time_in_G1 is zero, then replace by inf so that average f
# computes to zero.
t = np.array(self.get_time_in_G1())
t[t == 0] = np.inf
self.average_f = self.get_last_tau() / t
return self.average_f
def get_tbirth(self):
if self.tbirth is None:
self.tbirth = self.data['tbirth']
return self.tbirth
def get_tG1(self):
if self.tG1 is None:
self.tG1 = self.data['tG1']
return self.tG1
def get_clone(self):
if self.clone is None:
self.clone = self.data['clone']
return self.clone
def get_died(self):
if self.died is None:
self.died = np.zeros(self.get_total_cell_count(), dtype=bool)
for death_indices in self.data['death']:
self.died[death_indices] = True
return self.died
def get_tdeath(self):
if self.tdeath is None:
self.tdeath = np.ones(self.get_total_cell_count(), dtype=float) * np.inf
for time, death_indices in zip(self.get_t_events()[1:-1], self.data['death']):
self.tdeath[death_indices] = time
return self.tdeath
def get_divided(self):
if self.divided is None:
self.divided = np.zeros(self.get_total_cell_count(), dtype=bool)
for division_indices in self.data['division']:
self.divided[division_indices] = True
return self.divided
def get_tdivision(self):
if self.tdivision is None:
self.tdivision = np.ones(self.get_total_cell_count(), dtype=float) * np.inf
for time, division_indices in zip(self.get_t_events()[1:-1], self.data['division']):
self.tdivision[division_indices] = time
return self.tdivision
def get_transitioned(self):
if self.transitioned is None:
self.transitioned = np.zeros(self.get_total_cell_count(), dtype=bool)
for transition_indices in self.data['transition']:
self.transitioned[transition_indices] = True
# Cells that were already in G2 at start of simulation should be
# considered to have transitioned
tstart = self.get_t_events()[0]
cell_indices_start = self.data['cell_indices'][0]
tG1_start = self.get_tG1()[cell_indices_start]
tbirth_start = self.get_tbirth()[cell_indices_start]
in_G2_at_start = tstart - tbirth_start >= tG1_start
self.transitioned[cell_indices_start[in_G2_at_start]] = True
return self.transitioned
def get_ttransition(self):
if self.ttransition is None:
self.ttransition = np.ones(self.get_total_cell_count(), dtype=float) * np.inf
for time, transition_indices in zip(self.get_t_events()[1:-1], self.data['transition']):
self.ttransition[transition_indices] = time
# Cells that were already in G2 at start of simulation should be
# considered to have spent tG1 in G1
tstart = self.get_t_events()[0]
cell_indices_start = self.data['cell_indices'][0]
tG1_start = self.get_tG1()[cell_indices_start]
tbirth_start = self.get_tbirth()[cell_indices_start]
in_G2_at_start = tstart - tbirth_start >= tG1_start
self.ttransition[cell_indices_start[in_G2_at_start]] = \
tbirth_start[in_G2_at_start] + tG1_start[in_G2_at_start]
return self.ttransition
def get_t_last_alive(self):
if self.t_last_alive is None:
# In case of normal termination, tend is last time point in
# t_events, else the second to last one
if self.get_status() == 0:
tend = self.get_t_events()[-1]
else:
tend = self.get_t_events()[-2]
self.t_last_alive = np.minimum(tend,
np.minimum(self.get_tdeath(), self.get_tdivision()))
return self.t_last_alive
def get_max_age(self):
if self.max_age is None:
self.max_age = self.get_t_last_alive() - self.get_tbirth()
return self.max_age
def get_time_in_G1(self):
if self.time_in_G1 is None:
self.time_in_G1 = np.minimum(self.get_ttransition(), self.get_t_last_alive()) \
- self.get_tbirth()
return self.time_in_G1
def get_time_in_G2(self):
if self.time_in_G2 is None:
self.time_in_G2 = np.zeros(self.get_total_cell_count(), dtype=float)
transitioned = self.get_transitioned()
t_last_alive = self.get_t_last_alive()
ttransition = self.get_ttransition()
self.time_in_G2[transitioned] = t_last_alive[transitioned] \
- ttransition[transitioned]
return self.time_in_G2
def get_effective_time_in_G1(self):
if self.effective_time_in_G1 is None:
died_or_transitioned = np.logical_or(self.get_died(), self.get_transitioned())
indices = np.nonzero(np.logical_not(died_or_transitioned))[0]
if len(indices) > 0:
self.effective_time_in_G1 = self.get_time_in_G1()[:indices[0]]
else:
self.effective_time_in_G1 = self.get_time_in_G1()
return self.effective_time_in_G1
def get_cellwise_df(self):
if self.cellwise_df is None:
cellwise_dict = {
'tbirth' : self.get_tbirth(),
'tG1' : self.get_tG1(),
'clone' : self.get_clone(),
'died' : self.get_died(),
'tdeath' : self.get_tdeath(),
'divided' : self.get_divided(),
'tdivision' : self.get_tdivision(),
'transitioned' : self.get_transitioned(),
'ttransition' : self.get_ttransition(),
't_last_alive' : self.get_t_last_alive(),
'max_age' : self.get_max_age(),
'time_in_G1' : self.get_time_in_G1(),
'time_in_G2' : self.get_time_in_G2(),
}
self.cellwise_df = pd.DataFrame(cellwise_dict)
return self.cellwise_df
def get_death_clock_signal(self, f, f_args=None):
if f_args is None:
f_args = self.data['param']['f_args']
cur_f = []
for t_grid, tau, cell_indices, isinG1 in \
zip(self.data['t_grid'], self.data['tau'],
self.data['cell_indices'], self.data['isinG1']):
tbirth = self.get_tbirth()[cell_indices]
tG1 = self.get_tG1()[cell_indices]
clone = self.get_clone()[cell_indices]
for t, cur_tau in zip(t_grid, tau.T):
cur_f.append(f(t, cur_tau, tbirth, tG1, clone, isinG1, *f_args))
return np.array(cur_f)
def get_integrated_death_clock_signal(self, f):
t_grid = self.get_t_grid()
cur_f = self.get_death_clock_signal(f)
assert len(t_grid) == len(cur_f)
# Apply trapezoid rule to integrate signal
F = np.concatenate([[0.0], 0.5 * (cur_f[1:] + cur_f[:-1]) * (t_grid[1:] - t_grid[:-1])])
F = np.cumsum(F)
return F
def get_ergodic_rms(self, f, c, beta):
t_grid = self.get_t_grid()
cur_f = self.get_death_clock_signal(f)
square_error = (cur_f / c - (1 - beta))**2
assert len(t_grid) == len(cur_f)
# Apply trapezoid rule to integrate square error
F = np.sum(0.5 * (square_error[1:] + square_error[:-1]) * (t_grid[1:] - t_grid[:-1]))
rms = np.sqrt(F / (t_grid[-1] - t_grid[0]))
return rms
def get_f(self):
if self.f is None:
self.f = self.data['param']['f']
return self.f
def get_hindsight_survival_probability(self, t, f, cdf, ccm_args=None, Tdeath=None):
# Make flat array of t
t = np.array(t).flatten()
# Check validity of t
t_grid = self.get_t_grid()
assert np.all(t_grid[0] <= t)
assert np.all(t <= t_grid[-1])
# Assert non-strict monotonicity of F
F_grid = self.get_integrated_death_clock_signal(f)
assert np.all(np.diff(F_grid) >= 0)
# If t_grid[-1] is infinity, drop last element from t_grid and F_grid
if t_grid[-1] == np.inf:
t_grid = t_grid[:-1]
F_grid = F_grid[:-1]
# Retrieve parameters if needed
if Tdeath is None:
# None argument because we assume homotypic Tdeath
Tdeath = self.data['param']['Tdeath'](np.array([None]))
if ccm_args is None:
ccm_args = self.data['param']['ccm_args']
# Initialise output
output = []
for cur_t in t:
## Step 1: Find death time
F0 = np.interp(cur_t, t_grid, F_grid)
F1 = Tdeath + F0
t1 = interpolate_F_inverse(F1, t_grid, F_grid)
# Step 2: Compute time until death
time_until_death = t1 - cur_t
# Sanity check
assert time_until_death >= 0
# Step 3: if G1 duration is smaller than time until death, then the cell
# survives. Hence the survival probability is the cumulative distribution
# function of the time until death
output.append(cdf(time_until_death, *ccm_args))
return np.array(output)
def plot_cell_history(self, ax, modulo=np.inf):
tbirth = self.get_tbirth()
tG1 = self.get_tG1()
transitioned = self.get_transitioned()
ttransition = self.get_ttransition()
t_last_alive = self.get_t_last_alive()
first_G1 = True
first_G2 = True
for cell_idx, (tbirth, transitioned, ttransition, t_last_alive) in enumerate(zip(
self.get_tbirth(),
self.get_transitioned(),
self.get_ttransition(),
self.get_t_last_alive())):
cell_idx = np.mod(cell_idx, modulo)
if first_G1:
first_G1 = False
G1_label='G1'
else:
G1_label=None
if transitioned:
if first_G2:
first_G2 = False
G2_label='G2'
else:
G2_label=None
ax.plot([tbirth, ttransition], [cell_idx, cell_idx], 'b', label=G1_label)
ax.plot([ttransition, t_last_alive], [cell_idx, cell_idx], 'r', label=G2_label)
else:
ax.plot([tbirth, t_last_alive], [cell_idx, cell_idx], 'b', label=G1_label)
ax.set_xlabel('t')
ax.set_ylabel('cell_idx mod {}'.format(modulo))
ax.legend()
def plot_cell_counts(self, ax):
t_events = self.get_t_events()
cell_count = self.get_cell_count()
G1_cell_count = self.get_G1_cell_count()
G2_cell_count = self.get_G2_cell_count()
ax.step(t_events, cell_count, where='post', label='total')
ax.step(t_events, G1_cell_count, where='post', label='G1')
ax.step(t_events, G2_cell_count, where='post', label='G2')
ax.set_xlabel('t')
ax.set_ylabel('cell count')
ax.legend()
if __name__ == "__main__":
# Test 1: single cell with constant base rate and fixed G1, no divisions or deaths
print("Test 1: no divisions or deaths")
f = base_rate_death_signal
ccm = lambda random_state, clone: 1
Tdeath = 2
tG2 = 1
tstart = 0
tend = 1
tau_0 = [0]
tbirth_0 = [0]
tG1_0 = [1]
clone_0 = [0]
simulator = WellMixedSimulator(f, ccm, Tdeath, tG2, tstart, tend)
data = simulator.run(tau_0, tbirth_0, tG1_0, clone_0)
mc_data = WellMixedSimulationData(data)
for key, value in data.items():
print('{}: {}'.format(key, value))
print()
assert mc_data.get_num_divisions() == 0
assert mc_data.get_num_transitions() == 0
assert mc_data.get_num_deaths() == 0
assert mc_data.get_status() == 0
# Test 2: same, but with 1 division
print("Test 2: 1 division")
tbirth_0 = [-1.5]
simulator = WellMixedSimulator(f, ccm, Tdeath, tG2, tstart, tend)
data = simulator.run(tau_0, tbirth_0, tG1_0, clone_0)
mc_data = WellMixedSimulationData(data)
for key, value in data.items():
print('{}: {}'.format(key, value))
print()
assert mc_data.get_num_divisions() == 1
assert mc_data.get_num_transitions() == 0
assert mc_data.get_num_deaths() == 0
assert mc_data.get_status() == 0
# Test 3: same, but with 3 divisions and 2 transitions, also test different
# clone number
print("Test 3: 3 divisions and 2 transitions")
tstart, tend = 0, 3
simulator = WellMixedSimulator(f, ccm, Tdeath, tG2, tstart, tend)
data = simulator.run(tau_0, tbirth_0, tG1_0, [1])
mc_data = WellMixedSimulationData(data)
for key, value in data.items():
print('{}: {}'.format(key, value))
print()
assert mc_data.get_num_divisions() == 3
assert mc_data.get_num_transitions() == 2
assert mc_data.get_num_deaths() == 0
assert mc_data.get_status() == 0
assert np.all(mc_data.get_tbirth() == [-1.5, 0.5, 0.5, 2.5, 2.5, 2.5, 2.5])
assert np.all(mc_data.get_tG1() == [1.0] * 7)
assert np.all(mc_data.get_clone() == [1] * 7)
assert np.all(mc_data.get_died() == [False] * 7)
assert np.all(mc_data.get_tdeath() == [np.inf] * 7)
assert np.all(mc_data.get_divided() == [True, True, True, False, False, False, False])
assert np.all(mc_data.get_tdivision() == [0.5, 2.5, 2.5, np.inf, np.inf, np.inf, np.inf])
assert np.all(mc_data.get_transitioned() == [True, True, True, False, False, False, False])
assert np.all(mc_data.get_ttransition() == [-0.5, 1.5, 1.5, np.inf, np.inf, np.inf, np.inf])
assert np.all(mc_data.get_t_last_alive() == [0.5, 2.5, 2.5, 3, 3, 3, 3])
assert np.all(mc_data.get_max_age() == [2, 2, 2, 0.5, 0.5, 0.5, 0.5])
assert np.all(mc_data.get_time_in_G1() == [1, 1, 1, 0.5, 0.5, 0.5, 0.5, ])
assert np.all(mc_data.get_time_in_G2() == [1, 1, 1, 0.0, 0.0, 0.0, 0.0, ])
assert np.all(mc_data.get_effective_time_in_G1() == [1, 1, 1])
# Test 4: same, but with 1 death
print("Test 4: 1 death")
tstart, tend = 0, 1
tbirth_0 = [0]
tau_0 = [1.5]
simulator = WellMixedSimulator(f, ccm, Tdeath, tG2, tstart, tend)
data = simulator.run(tau_0, tbirth_0, tG1_0, clone_0)
mc_data = WellMixedSimulationData(data)
for key, value in data.items():
print('{}: {}'.format(key, value))
print()
assert mc_data.get_num_divisions() == 0
assert mc_data.get_num_transitions() == 0
assert mc_data.get_num_deaths() == 1
assert mc_data.get_status() == 1
assert np.all(mc_data.get_tbirth() == [0])
assert np.all(mc_data.get_tG1() == [1.0])
assert np.all(mc_data.get_clone() == [0])
assert np.all(mc_data.get_died() == [True])
assert np.all(mc_data.get_tdeath() == [0.5] )
assert np.all(mc_data.get_divided() == [False] )
assert np.all(mc_data.get_tdivision() == [np.inf])
assert np.all(mc_data.get_transitioned() == [False])
assert np.all(mc_data.get_ttransition() == [np.inf])
assert np.all(mc_data.get_t_last_alive() == [0.5])
assert np.all(mc_data.get_max_age() == [0.5])
assert np.all(mc_data.get_time_in_G1() == [0.5])
# Test 5: same, but with transition
print("Test 5: 1 transition")
tstart, tend = 0, 1
tbirth_0 = [-0.5]
tau_0 = [1.5]
simulator = WellMixedSimulator(f, ccm, Tdeath, tG2, tstart, tend)
data = simulator.run(tau_0, tbirth_0, tG1_0, clone_0)
mc_data = WellMixedSimulationData(data)
for key, value in data.items():
print('{}: {}'.format(key, value))
print()
assert mc_data.get_num_divisions() == 0
assert mc_data.get_num_transitions() == 1
assert mc_data.get_num_deaths() == 0
assert mc_data.get_status() == 0
# Test 6: two cells, with constant base rate and fixed G1, no division or deaths
print("Test 6: 2 cells, no divisions or deaths")
tbirth_0 = [0, 0]
tau_0 = [0, 0]
tG1_0 = [1, 1]
clone_0 = [0, 1]
simulator = WellMixedSimulator(f, ccm, Tdeath, tG2, tstart, tend)
data = simulator.run(tau_0, tbirth_0, tG1_0, clone_0)
mc_data = WellMixedSimulationData(data)
for key, value in data.items():
print('{}: {}'.format(key, value))
print()
assert mc_data.get_num_divisions() == 0
assert mc_data.get_num_transitions() == 0
assert mc_data.get_num_deaths() == 0
assert mc_data.get_status() == 0
# Test 7: same, but with 1 division
print("Test 7: 2 cells, 1 division")
tbirth_0 = [-1.5, 0]
tau_0 = [0, 0]
tG1_0 = [1, 1]
clone_0 = [0, 1]
simulator = WellMixedSimulator(f, ccm, Tdeath, tG2, tstart, tend)
data = simulator.run(tau_0, tbirth_0, tG1_0, clone_0)
mc_data = WellMixedSimulationData(data)
for key, value in data.items():
print('{}: {}'.format(key, value))
print()
assert mc_data.get_num_divisions() == 1
assert mc_data.get_num_transitions() == 0
assert mc_data.get_num_deaths() == 0
assert mc_data.get_status() == 0
# Test 8: same, but with 2 divisions
print("Test 8: 2 cells, 2 divisions, not simultaneous")
tbirth_0 = [-1.5, -1.4]
tau_0 = [0, 0]
tG1_0 = [1, 1]
clone_0 = [0, 1]
simulator = WellMixedSimulator(f, ccm, Tdeath, tG2, tstart, tend)
data = simulator.run(tau_0, tbirth_0, tG1_0, clone_0)
mc_data = WellMixedSimulationData(data)
for key, value in data.items():
print('{}: {}'.format(key, value))
print()
assert mc_data.get_num_divisions() == 2
assert mc_data.get_num_transitions() == 0
assert mc_data.get_num_deaths() == 0
assert mc_data.get_status() == 0
# Test 9: same, but with 1 death
print("Test 9: 2 cells, 1 death")
tbirth_0 = [0, 0]
tau_0 = [1.5, 0]
tG1_0 = [1, 1]
clone_0 = [0, 1]
simulator = WellMixedSimulator(f, ccm, Tdeath, tG2, tstart, tend)
data = simulator.run(tau_0, tbirth_0, tG1_0, clone_0)
mc_data = WellMixedSimulationData(data)
for key, value in data.items():
print('{}: {}'.format(key, value))
print()
assert mc_data.get_num_divisions() == 0
assert mc_data.get_num_transitions() == 0
assert mc_data.get_num_deaths() == 1
assert mc_data.get_status() == 0
# Test 10: same, but with 2 deaths
print("Test 10: 2 cells, 2 deaths, not simultaneous")
tbirth_0 = [0, 0]
tau_0 = [1.5, 1.6]
tG1_0 = [1, 1]
clone_0 = [0, 1]
simulator = WellMixedSimulator(f, ccm, Tdeath, tG2, tstart, tend)
data = simulator.run(tau_0, tbirth_0, tG1_0, clone_0)
mc_data = WellMixedSimulationData(data)
for key, value in data.items():
print('{}: {}'.format(key, value))
print()
assert mc_data.get_num_divisions() == 0
assert mc_data.get_num_transitions() == 0
assert mc_data.get_num_deaths() == 2
assert mc_data.get_status() == 1
# Test 11: same, but with 2 deaths, simultaneously
print("Test 11: 2 cells, 2 deaths, simultaneous")
tbirth_0 = [0, 0]
tau_0 = [1.5, 1.5]
tG1_0 = [1, 1]
clone_0 = [0, 1]
simulator = WellMixedSimulator(f, ccm, Tdeath, tG2, tstart, tend)
data = simulator.run(tau_0, tbirth_0, tG1_0, clone_0)
mc_data = WellMixedSimulationData(data)
for key, value in data.items():
print('{}: {}'.format(key, value))
print()
assert mc_data.get_num_divisions() == 0
assert mc_data.get_num_transitions() == 0
assert mc_data.get_num_deaths() == 2
assert mc_data.get_status() == 1
# Test 12: same, but with 1 division, 1 death
print("Test 12: 2 cells, 1 division, 1 death, simultaneous")
tbirth_0 = [-1.5, 0]
tau_0 = [0, 1.5]
tG1_0 = [1, 1]
clone_0 = [0, 1]
simulator = WellMixedSimulator(f, ccm, Tdeath, tG2, tstart, tend)
data = simulator.run(tau_0, tbirth_0, tG1_0, clone_0)
mc_data = WellMixedSimulationData(data)
for key, value in data.items():
print('{}: {}'.format(key, value))
print()
assert mc_data.get_num_divisions() == 1
assert mc_data.get_num_transitions() == 0
assert mc_data.get_num_deaths() == 1
assert mc_data.get_status() == 0
# Test 13: same, but with 1 division, 1 death
print("Test 13: 1 cells, 1 division, 1 death, non-simultaneous")
tbirth_0 = [-1.4, 0]
tau_0 = [0, 1.5]
tG1_0 = [1, 1]
clone_0 = [0, 1]
simulator = WellMixedSimulator(f, ccm, Tdeath, tG2, tstart, tend)
data = simulator.run(tau_0, tbirth_0, tG1_0, clone_0)
mc_data = WellMixedSimulationData(data)
for key, value in data.items():
print('{}: {}'.format(key, value))
print()
assert mc_data.get_num_divisions() == 1
assert mc_data.get_num_transitions() == 0
assert mc_data.get_num_deaths() == 1
assert mc_data.get_status() == 0
# Test 14: same, but with 2 divisions
print("Test 14: 2 cells, 2 divisions, negative control for death")
tbirth_0 = [-1.5, -1.4]
tau_0 = [1.9, 1.8]
tG1_0 = [1, 1]
clone_0 = [0, 1]
simulator = WellMixedSimulator(f, ccm, Tdeath, tG2, tstart, tend)
data = simulator.run(tau_0, tbirth_0, tG1_0, clone_0)
mc_data = WellMixedSimulationData(data)
for key, value in data.items():
print('{}: {}'.format(key, value))
print()
assert mc_data.get_num_divisions() == 2
assert mc_data.get_num_transitions() == 0
assert mc_data.get_num_deaths() == 0
assert mc_data.get_status() == 0
# Test 15: same as 14, but with f_is_stepwise_constant = False
print("Test 15: 2 cells, 2 divisions, negative control for death")
tbirth_0 = [-1.5, -1.4]
tau_0 = [1.9, 1.8]
tG1_0 = [1, 1]
clone_0 = [0, 1]
simulator = WellMixedSimulator(f, ccm, Tdeath, tG2, tstart, tend, f_is_stepwise_constant=False)
data = simulator.run(tau_0, tbirth_0, tG1_0, clone_0)
mc_data = WellMixedSimulationData(data)
for key, value in data.items():
print('{}: {}'.format(key, value))
print()
assert mc_data.get_num_divisions() == 2
assert mc_data.get_num_transitions() == 0
assert mc_data.get_num_deaths() == 0
assert mc_data.get_status() == 0
# Test 16: same as 14, but with ccm that takes argument
print("Test 16: 2 cells, 2 divisions, cell cycle model with arg")
tbirth_0 = [-1.5, -1.4]
tau_0 = [1.9, 1.8]
tG1_0 = [1, 1]
clone_0 = [0, 1]
ccm = lambda random_state, clone, x: x
simulator = WellMixedSimulator(f, ccm, Tdeath, tG2, tstart, tend, ccm_args=(.9,))
data = simulator.run(tau_0, tbirth_0, tG1_0, clone_0)
mc_data = WellMixedSimulationData(data)
for key, value in data.items():
print('{}: {}'.format(key, value))
print()
assert mc_data.get_num_divisions() == 2
assert mc_data.get_num_transitions() == 0
assert mc_data.get_num_deaths() == 0
assert mc_data.get_status() == 0
# Test 17: same as 14, but with f that takes argument
print("Test 17: 2 cells, 2 divisions, death clock signal with arg")
tbirth_0 = [-1.5, -1.4]
tau_0 = [1.9, 1.8]
tG1_0 = [1, 1]
clone_0 = [0, 1]
ccm = lambda random_state, clone: 1
simulator = WellMixedSimulator(f, ccm, Tdeath, tG2, tstart, tend, f_args=(.9,))
data = simulator.run(tau_0, tbirth_0, tG1_0, clone_0)
mc_data = WellMixedSimulationData(data)
for key, value in data.items():
print('{}: {}'.format(key, value))
print()
assert mc_data.get_num_divisions() == 2
assert mc_data.get_num_transitions() == 0
assert mc_data.get_num_deaths() == 0
assert mc_data.get_status() == 0
# Test 18: same as 17, but with f_is_stepwise_constant = False
print("Test 18: 2 cells, 2 divisions, death clock signal with arg")
tbirth_0 = [-1.5, -1.4]
tau_0 = [1.9, 1.8]
tG1_0 = [1, 1]
clone_0 = [0, 1]
ccm = lambda random_state, clone: 1
simulator = WellMixedSimulator(f, ccm, Tdeath, tG2, tstart, tend, f_args=(.9,),
f_is_stepwise_constant=False)
data = simulator.run(tau_0, tbirth_0, tG1_0, clone_0)
mc_data = WellMixedSimulationData(data)
for key, value in data.items():
print('{}: {}'.format(key, value))
print()
assert mc_data.get_num_divisions() == 2
assert mc_data.get_num_transitions() == 0
assert mc_data.get_num_deaths() == 0
assert mc_data.get_status() == 0
# Test 19: exponential cell cycle model
print("Test 19: exponential cell cycle model")
from time import time
t1 = time()
simulator = WellMixedSimulator(tstart=0, tend=np.inf, max_cell_count=50)
data = simulator.run()
t2 = time()
for key in data:
print('{}: {}'.format(key, data[key]))
print()
mc_data = WellMixedSimulationData(data)
assert mc_data.get_status() == 2
cell_count = mc_data.get_cell_count()
total_cell_count = mc_data.get_total_cell_count()
final_cell_count = cell_count[-1]
cell_count_alt = np.array([len(cell_indices) for cell_indices in data['cell_indices']])
total_cell_count_alt = len(data['tbirth'])
final_cell_count_alt = cell_count_alt[-1]
t = mc_data.get_t_events()
t_alt = np.array([t_grid[0] for t_grid in data['t_grid']])
print("cell_count.shape: {}".format(cell_count.shape))
print("cell_count_alt.shape: {}".format(cell_count_alt.shape))
print("total_cell_count: {}".format(total_cell_count))
print("total_cell_count_alt: {}".format(total_cell_count_alt))
print("final_cell_count: {}".format(final_cell_count))
print("final_cell_count_alt: {}".format(final_cell_count_alt))
print("t.shape: {}".format(t.shape))
print("t_alt.shape: {}".format(t_alt.shape))
print("t t_alt: cell_count cell_count_alt")
for cur_t, cur_t_alt, cur_cell_count, cur_cell_count_alt in \
zip(t, t_alt, cell_count, cell_count_alt):
print("{:.2f} {:.2f}: {} {}".format(
cur_t,
cur_t_alt,
cur_cell_count,
cur_cell_count_alt
))
print("t: cell_count")
for cur_t, cur_cell_count, in zip(t, cell_count):
print("{:.2f}: {}".format(
cur_t,
cur_cell_count
))
clone_header_str = "t:"
clones = mc_data.get_unique_clones()
for clone in clones:
clone_header_str += " " + str(clone)
print(clone_header_str)
clone_cell_count = mc_data.get_clone_cell_count()
clone_cell_count_0 = mc_data.get_cell_count_for_clone(0)
clone_cell_count_1 = mc_data.get_cell_count_for_clone(1)
assert np.all(clone_cell_count[:, 0] == clone_cell_count_0)
assert np.all(clone_cell_count[:, 1] == clone_cell_count_1)
for cur_t, cur_clone_cell_count in zip(t, clone_cell_count):
print("{:.2f}: {}".format(cur_t, cur_clone_cell_count))
print("Computation time: {}s".format(t2 - t1))
print("Computation time per final cell: {}".format((t2 - t1) / cell_count[-1]))
print("Computation time per total cell: {}".format((t2 - t1) / total_cell_count))
# Test 20: uniform cell cycle model
print("Test 20: uniform cell cycle model")
tG1_0 = uniform.rvs(loc=50 - 0.5 * 20, scale=20, size=4) + 50
t1 = time()
simulator = WellMixedSimulator(tstart=0, tend=np.inf, max_cell_count=50,
ccm=uniform_ccm)
data = simulator.run(tG1_0=tG1_0)
t2 = time()
for key in data:
print('{}: {}'.format(key, data[key]))
print()
mc_data = WellMixedSimulationData(data)
assert mc_data.get_status() == 2
cell_count = mc_data.get_cell_count()
total_cell_count = mc_data.get_total_cell_count()
final_cell_count = cell_count[-1]
cell_count_alt = np.array([len(cell_indices) for cell_indices in data['cell_indices']])
total_cell_count_alt = len(data['tbirth'])
final_cell_count_alt = cell_count_alt[-1]
t = mc_data.get_t_events()
t_alt = np.array([t_grid[0] for t_grid in data['t_grid']])
print("cell_count.shape: {}".format(cell_count.shape))
print("cell_count_alt.shape: {}".format(cell_count_alt.shape))
print("total_cell_count: {}".format(total_cell_count))
print("total_cell_count_alt: {}".format(total_cell_count_alt))
print("final_cell_count: {}".format(final_cell_count))
print("final_cell_count_alt: {}".format(final_cell_count_alt))
print("t.shape: {}".format(t.shape))
print("t_alt.shape: {}".format(t_alt.shape))
print("t t_alt: cell_count cell_count_alt")
for cur_t, cur_t_alt, cur_cell_count, cur_cell_count_alt in \
zip(t, t_alt, cell_count, cell_count_alt):
print("{:.2f} {:.2f}: {} {}".format(
cur_t,
cur_t_alt,
cur_cell_count,
cur_cell_count_alt
))
print("t: cell_count")
for cur_t, cur_cell_count, in zip(t, cell_count):
print("{:.2f}: {}".format(
cur_t,
cur_cell_count
))
clone_header_str = "t:"
clones = mc_data.get_unique_clones()
for clone in clones:
clone_header_str += " " + str(clone)
print(clone_header_str)
clone_cell_count = mc_data.get_clone_cell_count()
clone_cell_count_0 = mc_data.get_cell_count_for_clone(0)
clone_cell_count_1 = mc_data.get_cell_count_for_clone(1)
assert np.all(clone_cell_count[:, 0] == clone_cell_count_0)
assert np.all(clone_cell_count[:, 1] == clone_cell_count_1)
for cur_t, cur_clone_cell_count in zip(t, clone_cell_count):
print("{:.2f}: {}".format(cur_t, cur_clone_cell_count))
print("Computation time: {}s".format(t2 - t1))
print("Computation time per final cell: {}".format((t2 - t1) / cell_count[-1]))
print("Computation time per total cell: {}".format((t2 - t1) / total_cell_count))
# Test 21: exponential cell cycle model with normalised g2 signal
print("Test 21: exponential cell cycle model with normalised g2 signal")
t1 = time()
simulator = WellMixedSimulator(tstart=0, tend=np.inf, max_cell_count=50,
f=normalised_g2_death_signal, Tdeath=25)
data = simulator.run()
t2 = time()
for key in data:
print('{}: {}'.format(key, data[key]))
print()
mc_data = WellMixedSimulationData(data)
cell_count = mc_data.get_cell_count()
total_cell_count = mc_data.get_total_cell_count()
final_cell_count = cell_count[-1]
cell_count_alt = np.array([len(cell_indices) for cell_indices in data['cell_indices']])
total_cell_count_alt = len(data['tbirth'])
final_cell_count_alt = cell_count_alt[-1]
t = mc_data.get_t_events()
t_alt = np.array([t_grid[0] for t_grid in data['t_grid']])
print("cell_count.shape: {}".format(cell_count.shape))
print("cell_count_alt.shape: {}".format(cell_count_alt.shape))
print("total_cell_count: {}".format(total_cell_count))
print("total_cell_count_alt: {}".format(total_cell_count_alt))
print("final_cell_count: {}".format(final_cell_count))
print("final_cell_count_alt: {}".format(final_cell_count_alt))
print("t.shape: {}".format(t.shape))
print("t_alt.shape: {}".format(t_alt.shape))
print("t: cell_count")
for cur_t, cur_cell_count, in zip(t, cell_count):
print("{:.2f}: {}".format(
cur_t,
cur_cell_count
))
clone_header_str = "t:"
clones = mc_data.get_unique_clones()
for clone in clones:
clone_header_str += " " + str(clone)
print(clone_header_str)
clone_cell_count = mc_data.get_clone_cell_count()
clone_cell_count_0 = mc_data.get_cell_count_for_clone(0)
clone_cell_count_1 = mc_data.get_cell_count_for_clone(1)
assert np.all(clone_cell_count[:, 0] == clone_cell_count_0)
assert np.all(clone_cell_count[:, 1] == clone_cell_count_1)
for cur_t, cur_clone_cell_count in zip(t, clone_cell_count):
print("{:.2f}: {}".format(cur_t, cur_clone_cell_count))
print("Computation time: {}s".format(t2 - t1))
print("Computation time per final cell: {}".format((t2 - t1) / cell_count[-1]))
print("Computation time per total cell: {}".format((t2 - t1) / total_cell_count))
# Test 22: exponential cell cycle model with absolute g2 signal
print("Test 22: exponential cell cycle model with absolute g2 signal")
t1 = time()
simulator = WellMixedSimulator(tstart=0, tend=2000, max_cell_count=50,
f=g2_death_signal, Tdeath=300)
data = simulator.run()
t2 = time()
for key in data:
print('{}: {}'.format(key, data[key]))
print()
mc_data = WellMixedSimulationData(data)
cell_count = mc_data.get_cell_count()
total_cell_count = mc_data.get_total_cell_count()
final_cell_count = cell_count[-1]
cell_count_alt = np.array([len(cell_indices) for cell_indices in data['cell_indices']])
total_cell_count_alt = len(data['tbirth'])
final_cell_count_alt = cell_count_alt[-1]
t = mc_data.get_t_events()
t_alt = np.array([t_grid[0] for t_grid in data['t_grid']])
print("cell_count.shape: {}".format(cell_count.shape))
print("cell_count_alt.shape: {}".format(cell_count_alt.shape))
print("total_cell_count: {}".format(total_cell_count))
print("total_cell_count_alt: {}".format(total_cell_count_alt))
print("final_cell_count: {}".format(final_cell_count))
print("final_cell_count_alt: {}".format(final_cell_count_alt))
print("t.shape: {}".format(t.shape))
print("t_alt.shape: {}".format(t_alt.shape))
print("t: cell_count")
for cur_t, cur_cell_count, in zip(t, cell_count):
print("{:.2f}: {}".format(
cur_t,
cur_cell_count
))
clone_header_str = "t:"
clones = mc_data.get_unique_clones()
for clone in clones:
clone_header_str += " " + str(clone)
print(clone_header_str)
clone_cell_count = mc_data.get_clone_cell_count()
clone_cell_count_0 = mc_data.get_cell_count_for_clone(0)
clone_cell_count_1 = mc_data.get_cell_count_for_clone(1)
assert np.all(clone_cell_count[:, 0] == clone_cell_count_0)
assert np.all(clone_cell_count[:, 1] == clone_cell_count_1)
for cur_t, cur_clone_cell_count in zip(t, clone_cell_count):
print("{:.2f}: {}".format(cur_t, cur_clone_cell_count))
print("Computation time: {}s".format(t2 - t1))
print("Computation time per final cell: {}".format((t2 - t1) / cell_count[-1]))
print("Computation time per total cell: {}".format((t2 - t1) / total_cell_count))
# Print data
print(mc_data)
# Test 23: G1 proportion analysis function
avg_tG1_fun = lambda eta, tG1: tG1 * (1 +
(np.exp(-eta) - 1) / (np.exp(eta) - 1))
gamma_fun = lambda beta, tG: beta * tG
tG = 100
Tdeath = 50
c = 1
N_beta = 10
beta = np.arange( 1 / N_beta, 1, 1 / N_beta)
f = average_f_g1_proportion(avg_tG1_fun, gamma_fun, tG, beta, Tdeath, c)
print("f: {}".format(f))
print("1 - beta: {}".format(1 - beta))
theta_fun = lambda tG, beta, Tdeath, c, f: 1 - np.exp(- Tdeath / (c * f * beta * tG))
theta_approx_fun = lambda tG, beta, Tdeath, c: theta_fun(tG, beta, Tdeath, c, 1 - beta)
theta_approx_fun2 = lambda tG, beta, Tdeath, c: 1 - np.exp( - Tdeath / (c * beta * (1 - beta) * tG))
print("theta(f): {}".format(theta_fun(tG, beta, Tdeath, c, f)))
print("theta(beta): {}".format(theta_approx_fun(tG, beta, Tdeath, c)))
print("theta2(beta): {}".format(theta_approx_fun2(tG, beta, Tdeath, c)))
# Test 24: same as test 2, but with max_cell_count = 2
print("Test 24: 1 division")
f = base_rate_death_signal
ccm = lambda random_state, clone: 1
Tdeath = 2
tG2 = 1
tstart = 0
tend = 1
tau_0 = [0]
tbirth_0 = [-1.5]
tG1_0 = [1]
clone_0 = [0]
simulator = WellMixedSimulator(f, ccm, Tdeath, tG2, tstart, tend, max_cell_count=2)
data = simulator.run(tau_0, tbirth_0, tG1_0, clone_0)
mc_data = WellMixedSimulationData(data)
for key, value in data.items():
print('{}: {}'.format(key, value))
print()
assert mc_data.get_num_divisions() == 1
assert mc_data.get_num_transitions() == 0
assert mc_data.get_num_deaths() == 0
assert mc_data.get_status() == 2
assert np.all(mc_data.get_t_last_alive() == [0.5, 0.5, 0.5])
# Test 25: test cumulative density functions
assert np.isclose(exponential_cdf(0), 0)
assert np.isclose(exponential_cdf(50), 1 - 1 / np.e)
assert np.isclose(exponential_cdf(100), 1 - 1 / np.e / np.e)
assert np.isclose(exponential_cdf(100, 100), 1 - 1 / np.e)
assert np.isclose(exponential_cdf(200, 100), 1 - 1 / np.e / np.e)
assert np.isclose(uniform_cdf(0), 0)
assert np.isclose(uniform_cdf(50), 0.5)
assert np.isclose(uniform_cdf(100), 1)
assert np.isclose(uniform_cdf(100, 100), 0.5)
assert np.isclose(uniform_cdf(200, 100), 1)
assert np.isclose(uniform_cdf(55), 0.75)
assert np.isclose(uniform_cdf(550, 500, 200), 0.75)
# Test 26: interpolate_F_inverse
t_grid = np.array([0, 1, 1, 2, 2, 3, 3, 4, 4, 5])
F_grid = np.array([0, 1, 1, 1, 1, 1, 1, 2, 2, 2])
assert np.isclose(interpolate_F_inverse(0, t_grid, F_grid), 0)
assert np.isclose(interpolate_F_inverse(0.5, t_grid, F_grid), 0.5)
assert np.isclose(interpolate_F_inverse(1, t_grid, F_grid), 1)
assert np.isclose(interpolate_F_inverse(1.5, t_grid, F_grid), 3.5)
assert np.isclose(interpolate_F_inverse(2, t_grid, F_grid), 4)
assert np.isclose(interpolate_F_inverse(3, t_grid, F_grid), np.inf)
# Test 27: zero G2 duration
print("Test 27: zero G2 duration")
f = base_rate_death_signal
ccm = lambda random_state, clone: 1
Tdeath = 2
tG2 = 0
tstart = 0
tend = 2
tau_0 = [0]
tbirth_0 = [0]
tG1_0 = [1]
clone_0 = [0]
simulator = WellMixedSimulator(f, ccm, Tdeath, tG2, tstart, tend)
data = simulator.run(tau_0, tbirth_0, tG1_0, clone_0)
mc_data = WellMixedSimulationData(data)
for key, value in data.items():
print('{}: {}'.format(key, value))
print()
assert mc_data.get_num_divisions() == 1
assert mc_data.get_num_transitions() == 1
assert mc_data.get_num_deaths() == 0
assert mc_data.get_status() == 0
# Test 28: instant death
print("Test 28: instant death")
base_rate_death_signal
f = lambda a, b, c, d, e, f: base_rate_death_signal(a, b, c, d, e, f, base_rate=1000)
ccm = lambda random_state, clone: 1
Tdeath = 2
tG2 = 1
tstart = 0
tend = 10
min_cell_count = 10
#tau_0 = np.zeros(128)
tau_0 = np.linspace(0, 2 - 1/128, 128)
tbirth_0 = np.zeros(128)
tG1_0 = np.ones(128)
clone_0 = np.zeros(128)
simulator = WellMixedSimulator(f, ccm, Tdeath, tG2, tstart, tend,
min_cell_count=min_cell_count)
data = simulator.run(tau_0, tbirth_0, tG1_0, clone_0)
mc_data = WellMixedSimulationData(data)
for key, value in data.items():
print('{}: {}'.format(key, value))
print()
assert mc_data.get_num_divisions() == 0
assert mc_data.get_num_transitions() == 0
assert mc_data.get_num_deaths() == 118
assert mc_data.get_status() == 3
print(mc_data)
# Test 29: exponential cell cycle model with absolute g2 signal and seed
print("Test 29: exponential cell cycle model with normalised g2 signal and seed")
seed = 0
tbirth_0 = np.zeros(4)
tG1_0 = np.zeros(4)
t1 = time()
simulator = WellMixedSimulator(tstart=0, tend=1000, max_cell_count=100,
min_cell_count=2,
f=normalised_g2_death_signal, Tdeath=20)
data = simulator.run(seed=seed, tbirth_0=tbirth_0, tG1_0=tG1_0)
t2 = time()
assert data['param']['f'] == normalised_g2_death_signal
assert data['param']['ccm'] == exponential_ccm
assert data['param']['Tdeath'](np.array([None])) == 20
assert data['param']['tG2']((np.array([None]))) == 50
assert data['param']['tstart'] == 0
assert data['param']['tend'] == 1000
assert data['param']['f_args'] == ()
assert data['param']['ccm_args'] == ()
assert data['param']['max_cell_count'] == 100
assert data['param']['min_cell_count'] == 2
assert data['param']['f_is_stepwise_constant'] == True
assert np.all(data['init_cond']['tau_0'] == np.zeros(4))
assert np.all(data['init_cond']['tbirth_0'] == tbirth_0)
assert np.all(data['init_cond']['tG1_0'] == tG1_0)
assert np.all(data['init_cond']['clone_0'] == np.arange(4))
assert data['init_cond']['seed'] == seed
for key in data:
print('{}: {}'.format(key, data[key]))
print()
mc_data = WellMixedSimulationData(data)
cell_count = mc_data.get_cell_count()
total_cell_count = mc_data.get_total_cell_count()
final_cell_count = cell_count[-1]
cell_count_alt = np.array([len(cell_indices) for cell_indices in data['cell_indices']])
total_cell_count_alt = len(data['tbirth'])
final_cell_count_alt = cell_count_alt[-1]
t = mc_data.get_t_events()
t_alt = np.array([t_grid[0] for t_grid in data['t_grid']])
print("cell_count.shape: {}".format(cell_count.shape))
print("cell_count_alt.shape: {}".format(cell_count_alt.shape))
print("total_cell_count: {}".format(total_cell_count))
print("total_cell_count_alt: {}".format(total_cell_count_alt))
print("final_cell_count: {}".format(final_cell_count))
print("final_cell_count_alt: {}".format(final_cell_count_alt))
print("t.shape: {}".format(t.shape))
print("t_alt.shape: {}".format(t_alt.shape))
print("t: cell_count")
for cur_t, cur_cell_count, in zip(t, cell_count):
print("{:.2f}: {}".format(
cur_t,
cur_cell_count
))
clone_header_str = "t:"
clones = mc_data.get_unique_clones()
for clone in clones:
clone_header_str += " " + str(clone)
print(clone_header_str)
clone_cell_count = mc_data.get_clone_cell_count()
clone_cell_count_0 = mc_data.get_cell_count_for_clone(0)
clone_cell_count_1 = mc_data.get_cell_count_for_clone(1)
assert np.all(clone_cell_count[:, 0] == clone_cell_count_0)
assert np.all(clone_cell_count[:, 1] == clone_cell_count_1)
for cur_t, cur_clone_cell_count in zip(t, clone_cell_count):
print("{:.2f}: {}".format(cur_t, cur_clone_cell_count))
print("Computation time: {}s".format(t2 - t1))
print("Computation time per final cell: {}".format((t2 - t1) / cell_count[-1]))
print("Computation time per total cell: {}".format((t2 - t1) / total_cell_count))
# Print data
print(mc_data)
G1_cell_count = mc_data.get_G1_cell_count()
G2_cell_count = mc_data.get_G2_cell_count()
combined_cell_count = G1_cell_count + G2_cell_count
cell_count = mc_data.get_cell_count()
assert np.all(combined_cell_count == cell_count)
print(G1_cell_count)
print(G2_cell_count)
print(combined_cell_count)
print(cell_count)
import matplotlib.pyplot as plt
plt.figure(figsize=(20,10))
plt.step(t, cell_count, '-', label='cell count', where='post')
plt.step(t, G1_cell_count, 'b-', label='G1', where='post')
plt.step(t, G2_cell_count, 'r-', label='G2', where='post')
plt.step(t, combined_cell_count, '--', label='combined cell count', where='post')
plt.legend()
plt.savefig('cell-count-test.png')
plt.close()
t_grid = mc_data.get_t_grid()
tau_0 = mc_data.get_tau_for_cell_index(0)
tau_4 = mc_data.get_tau_for_cell_index(4)
tau_10 = mc_data.get_tau_for_cell_index(10)
tau_54 = mc_data.get_tau_for_cell_index(54)
last_tau = mc_data.get_last_tau()
last_tau_0 = last_tau[0]
last_tau_4 = last_tau[4]
last_tau_10 = last_tau[10]
last_tau_54 = last_tau[54]
average_f = mc_data.get_average_f()
average_f_0 = average_f[0]
average_f_4 = average_f[4]
average_f_10 = average_f[10]
average_f_54 = average_f[54]
time_in_G1 = mc_data.get_time_in_G1()
time_in_G1_0 = time_in_G1[0]
time_in_G1_4 = time_in_G1[4]
time_in_G1_10 = time_in_G1[10]
time_in_G1_54 = time_in_G1[54]
tbirth = mc_data.get_tbirth()
tbirth_0 = tbirth[0]
tbirth_4 = tbirth[4]
tbirth_10 = tbirth[10]
tbirth_54 = tbirth[54]
print(t_grid[:10])
print(tau_0[:10])
print(tau_4[:10])
print(tau_10[:10])
print(tau_54[:10])
plt.figure(figsize=(20,10))
plt.plot(t_grid, tau_0, 'g', label='0')
plt.plot(t_grid, tau_4, 'r', label='4')
plt.plot(t_grid, tau_10, 'c', label='10')
plt.plot(t_grid, tau_54, 'y', label='54')
plt.hlines(last_tau_0, t_grid[0], t_grid[-1], linestyles='dashed', label='0', colors='g')
plt.hlines(last_tau_4, t_grid[0], t_grid[-1], linestyles='dashed', label='4', colors='r')
plt.hlines(last_tau_10, t_grid[0], t_grid[-1], linestyles='dashed', label='10', colors='c')
plt.hlines(last_tau_54, t_grid[0], t_grid[-1], linestyles='dashed', label='54', colors='y')
plt.plot([tbirth_0, tbirth_0 + time_in_G1_0], [0, time_in_G1_0 * average_f_0], 'g-.', label='0')
plt.plot([tbirth_4, tbirth_4 + time_in_G1_4], [0, time_in_G1_4 * average_f_4], 'r-.', label='4')
plt.plot([tbirth_10, tbirth_10 + time_in_G1_10], [0, time_in_G1_10 * average_f_10], 'c-.', label='10')
plt.plot([tbirth_54, tbirth_54 + time_in_G1_54], [0, time_in_G1_54 * average_f_54], 'y-.', label='54')
plt.legend()
plt.savefig('tau-test.png')
plt.close()
def f(*args):
signal = normalised_g2_death_signal(*args)
if len(signal) > 0:
return signal[0]
else:
return 0
cdf = exponential_cdf
Nplot = 10**4
t_plot = np.linspace(t_grid[0], t_grid[-1], Nplot)
theta = mc_data.get_hindsight_survival_probability(t_plot, f, cdf)
f_signal = mc_data.get_death_clock_signal(f)
int_signal = mc_data.get_integrated_death_clock_signal(f)
print(t_grid)
print(f_signal)
print(int_signal)
fig, ax1 = plt.subplots(figsize=(20,10))
ax2 = ax1.twinx()
ax1.step(t_grid, f_signal / f_signal.max(), label='f')
ax1.step(t, G2_cell_count / (G1_cell_count + G2_cell_count), label='G2 proportion',
where='post')
ax1.step(t_plot, theta, label=r'$\theta$')
ax1.legend()
ax1.set_ylabel('other')
ax2.plot(t_grid, int_signal, label='f integrated')
ax2.legend()
ax2.set_ylabel('f integrated')
fig.savefig('f-signal-test.png')
plt.close(fig)
fig, ax = plt.subplots(figsize=(20,10))
mc_data.plot_cell_history(ax, 100)
fig.savefig('cell-history.png')
plt.close(fig)
fig, ax = plt.subplots(figsize=(20,10))
mc_data.plot_cell_counts(ax)
fig.savefig('cell-counts.png')
plt.close(fig)
# Test 30: two cells with constant base rate and heterotypic cell cycle model
print("Test 30: heterotypic cell cycle model")
f = base_rate_death_signal
ccm = lambda random_state, clone: clone + 1
Tdeath = 2
tG2 = 1
tstart = 0
tend = 3.5
tau_0 = [0, 0]
tbirth_0 = [0, 0]
tG1_0 = [1, 1]
clone_0 = [0, 1]
simulator = WellMixedSimulator(f, ccm, Tdeath, tG2, tstart, tend)
data = simulator.run(tau_0, tbirth_0, tG1_0, clone_0)
mc_data = WellMixedSimulationData(data)
for key, value in data.items():
print('{}: {}'.format(key, value))
print()
assert mc_data.get_num_divisions() == 2
assert mc_data.get_num_divisions_for_clone(0) == 1
assert mc_data.get_num_divisions_for_clone(1) == 1
assert mc_data.get_num_transitions() == 4
assert mc_data.get_num_transitions_for_clone(0) == 3
assert mc_data.get_num_transitions_for_clone(1) == 1
assert mc_data.get_num_deaths() == 0
assert mc_data.get_num_deaths_for_clone(0) == 0
assert mc_data.get_num_deaths_for_clone(1) == 0
assert mc_data.get_status() == 0
assert np.all(mc_data.get_tbirth() == [0.0, 0.0, 2.0, 2.0, 2.0, 2.0])
assert np.all(mc_data.get_tG1() == [1.0, 1.0, 1.0, 1.0, 2.0, 2.0])
assert np.all(mc_data.get_clone() == [0, 1, 0, 0, 1, 1])
assert np.all(mc_data.get_died() == [False] * 6)
assert np.all(mc_data.get_tdeath() == [np.inf] * 6)
assert np.all(mc_data.get_divided() == [True, True, False, False, False, False])
assert np.all(mc_data.get_tdivision() == [2.0, 2.0, np.inf, np.inf, np.inf, np.inf])
assert np.all(mc_data.get_transitioned() == [True, True, True, True, False, False])
assert np.all(mc_data.get_ttransition() == [1.0, 1.0, 3.0, 3.0, np.inf, np.inf])
assert np.all(mc_data.get_t_last_alive() == [2.0, 2.0, 3.5, 3.5, 3.5, 3.5])
assert np.all(mc_data.get_max_age() == [2, 2, 1.5, 1.5, 1.5, 1.5])
assert np.all(mc_data.get_time_in_G1() == [1, 1, 1, 1, 1.5, 1.5])
assert np.all(mc_data.get_time_in_G2() == [1, 1, 0.5, 0.5, 0.0, 0.0])
# Test 31: two cells with constant base rate and heterotypic death threshold
print("Test 31: heterotypic death threshold")
f = base_rate_death_signal
ccm = lambda random_state, clone: 1
Tdeath = lambda clone: clone + 1
tG2 = 1
tstart = 0
tend = 1.5
tau_0 = [0, 0]
tbirth_0 = [0, 0]
tG1_0 = [2, 2]
clone_0 = [0, 1]
simulator = WellMixedSimulator(f, ccm, Tdeath, tG2, tstart, tend)
data = simulator.run(tau_0, tbirth_0, tG1_0, clone_0)
mc_data = WellMixedSimulationData(data)
for key, value in data.items():
print('{}: {}'.format(key, value))
print()
assert mc_data.get_num_divisions() == 0
assert mc_data.get_num_divisions_for_clone(0) == 0
assert mc_data.get_num_divisions_for_clone(1) == 0
assert mc_data.get_num_transitions() == 0
assert mc_data.get_num_transitions_for_clone(0) == 0
assert mc_data.get_num_transitions_for_clone(1) == 0
assert mc_data.get_num_deaths() == 1
assert mc_data.get_num_deaths_for_clone(0) == 1
assert mc_data.get_num_deaths_for_clone(1) == 0
assert mc_data.get_status() == 0
assert np.all(mc_data.get_tbirth() == [0.0, 0.0])
assert np.all(mc_data.get_tG1() == [2.0, 2.0])
assert np.all(mc_data.get_clone() == [0, 1])
assert np.all(mc_data.get_died() == [True, False])
assert np.all(mc_data.get_tdeath() == [1.0, np.inf])
assert np.all(mc_data.get_divided() == [False, False])
assert np.all(mc_data.get_tdivision() == [np.inf, np.inf])
assert np.all(mc_data.get_transitioned() == [False, False])
assert np.all(mc_data.get_ttransition() == [np.inf, np.inf])
assert np.all(mc_data.get_t_last_alive() == [1.0, 1.5])
assert np.all(mc_data.get_max_age() == [1, 1.5])
assert np.all(mc_data.get_time_in_G1() == [1, 1.5])
assert np.all(mc_data.get_time_in_G2() == [0, 0])
# Test 32: two cells with constant base rate and heterotypic G2 duration
print("Test 32: heterotypic G2 duration")
f = base_rate_death_signal
ccm = lambda random_state, clone: 1
Tdeath = 2
tG2 = lambda clone: clone + 1
tstart = 0
tend = 2.5
tau_0 = [0, 0]
tbirth_0 = [0, 0]
tG1_0 = [1, 1]
clone_0 = [0, 1]
simulator = WellMixedSimulator(f, ccm, Tdeath, tG2, tstart, tend)
data = simulator.run(tau_0, tbirth_0, tG1_0, clone_0)
mc_data = WellMixedSimulationData(data)
for key, value in data.items():
print('{}: {}'.format(key, value))
print()
assert mc_data.get_num_divisions() == 1
assert mc_data.get_num_divisions_for_clone(0) == 1
assert mc_data.get_num_divisions_for_clone(1) == 0
assert mc_data.get_num_transitions() == 2
assert mc_data.get_num_transitions_for_clone(0) == 1
assert mc_data.get_num_transitions_for_clone(1) == 1
assert mc_data.get_num_deaths() == 0
assert mc_data.get_num_deaths_for_clone(0) == 0
assert mc_data.get_num_deaths_for_clone(1) == 0
assert mc_data.get_status() == 0
assert np.all(mc_data.get_tbirth() == [0.0, 0.0, 2, 2])
assert np.all(mc_data.get_tG1() == [1, 1, 1, 1])
assert np.all(mc_data.get_clone() == [0, 1, 0, 0])
assert np.all(mc_data.get_died() == [False] * 4)
assert np.all(mc_data.get_tdeath() == [np.inf] * 4)
assert np.all(mc_data.get_divided() == [True, False, False, False])
assert np.all(mc_data.get_tdivision() == [2, np.inf, np.inf, np.inf])
assert np.all(mc_data.get_transitioned() == [True, True, False, False])
assert np.all(mc_data.get_ttransition() == [1, 1, np.inf, np.inf])
assert np.all(mc_data.get_t_last_alive() == [2, 2.5, 2.5, 2.5])
assert np.all(mc_data.get_max_age() == [2, 2.5, 0.5, 0.5])
assert np.all(mc_data.get_time_in_G1() == [1, 1, 0.5, 0.5])
assert np.all(mc_data.get_time_in_G2() == [1, 1.5, 0, 0])
assert np.all(mc_data.get_G1_cell_count() == mc_data.get_G1_cell_count_for_clone(0)
+ mc_data.get_G1_cell_count_for_clone(1))
assert np.all(mc_data.get_G2_cell_count() == mc_data.get_G2_cell_count_for_clone(0)
+ mc_data.get_G2_cell_count_for_clone(1))
# Test 33: test heterotypic cell cycle models and death signal functions
print("Test 33: heterotypic cell cycle models and death signal functions")
# Cell cycle models
# This assert has a very small of failing even if there is no problem
assert np.mean([exponential_ccm_heterotypic(clone=1, tG1_param_clone_1=5) for
i in range(1000)]) < 50
assert np.mean([uniform_ccm_heterotypic(clone=1, tG1_param_clone_1=100, r_clone_1=1) for
i in range(1000)]) > 99.5
# Death signal functions
# Base rate
try:
base_rate_death_signal_heterotypic(
t =
|
np.array([0])
|
numpy.array
|
# -*- coding: utf-8 -*-
# Copyright 2018-2022 the orix developers
#
# This file is part of orix.
#
# orix is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# orix is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with orix. If not, see <http://www.gnu.org/licenses/>.
from diffpy.structure import Lattice, Structure
from matplotlib import __version__ as _MPL_VERSION
from matplotlib import pyplot as plt
import numpy as np
from packaging import version
import pytest
from orix.plot._util import Arrow3D
from orix.plot.unit_cell_plot import (
_calculate_basic_unit_cell_edges,
_calculate_basic_unit_cell_vertices,
)
from orix.quaternion import Orientation
def test_unit_cell_plot_default():
ori = Orientation.random()
fig = ori.plot_unit_cell(return_figure=True)
assert len(fig.axes) == 1
axes = fig.axes[0]
assert len(axes.lines) == 12 # 12 edges in orthorhombic unit cell
# 6 Arrow3D -> 3 for both sample and crystal reference frames
if version.parse(_MPL_VERSION) >= version.parse("3.4"): # pragma: no cover
assert len(axes.patches) == 6
else: # pragma: no cover
assert len(axes.artists) == 6
# test default projection
assert axes.azim == -90
assert round(axes.elev) == 90
plt.close("all")
plt.close("all")
def test_unit_cell_plot_multiple_orientations_raises():
ori = Orientation.random((2,))
with pytest.raises(ValueError, match="Can only plot a single unit cell"):
ori.plot_unit_cell()
plt.close("all")
def test_unit_cell_plot_orthorhombic():
ori = Orientation.random()
lattice = Lattice(1, 2, 3, 90, 90, 90)
structure = Structure(lattice=lattice)
_ = ori.plot_unit_cell(return_figure=True, structure=structure)
def test_unit_cell_plot_hexagonal():
ori = Orientation.random()
lattice = Lattice(1, 1, 2, 90, 90, 120)
structure = Structure(lattice=lattice)
fig = ori.plot_unit_cell(return_figure=True, structure=structure)
axes = fig.axes[0]
# should only be 12 edges in hexagonal unit cell, this test checks
# that the edges parallel to (0000)-(11-20) are not plotted
assert len(axes.lines) == 12
plt.close("all")
def test_unit_cell_plot_crystal_reference_axes_position_center():
ori = Orientation.identity()
a1, a2, a3 = 1, 1.5, 2
lattice = Lattice(a1, a2, a3, 90, 90, 90)
structure = Structure(lattice=lattice)
# test cell center
fig = ori.plot_unit_cell(
return_figure=True,
structure=structure,
crystal_axes_loc="center",
)
if version.parse(_MPL_VERSION) >= version.parse("3.4"): # pragma: no cover
arrows = fig.axes[0].patches
else: # pragma: no cover
arrows = fig.axes[0].artists
crys_ref_ax = [p for p in arrows if "Crystal reference axes" in p.get_label()]
crys_ref_ax_data = np.stack([np.array(a._verts3d) for a in crys_ref_ax])
assert
|
np.allclose(crys_ref_ax_data[:, :, 0], 0)
|
numpy.allclose
|
from scipy.ndimage import gaussian_filter
import matplotlib.pyplot as plt
import numpy as np
from os import listdir
from os.path import isfile, join
import seaborn as sns
sns.set()
from matplotlib.ticker import MultipleLocator
import math
from scipy.stats import linregress
import spikewarp as sw
def draw_spike_count_plots(single_neuron_analysis_figs_dir, data_loaders, investigation):
"""
"""
dictionary_of_info_by_neuron_group = {'L23EXC':{},
'L23INH':{},
'L4EXC':{},
'L4INH':{},
'L5AEXC':{},
'L5AINH':{},
'L5BEXC':{},
'L5BINH':{},
}
for neuron_group_dictionary in dictionary_of_info_by_neuron_group.values():
neuron_group_dictionary['non_zero_spike_counts'] = []
spike_count_means = []
spike_count_variances = []
atleast_one_spike_reliabilities = []
all_non_zero_spike_counts = []
for experiment_index in investigation["experiments_to_process"]:
data_loader = data_loaders[investigation["experiments_to_process"].index(experiment_index)]
for scsuo in data_loader.list_of_all_scsuos:
neuron_group_key = scsuo.neurophys_corresponding_layer_string_of_unit + scsuo.exc_inh_type_of_unit
dictionary_of_info_by_neuron_group[neuron_group_key]['non_zero_spike_counts'].extend(scsuo.spike_counts_per_trial[np.argwhere(scsuo.spike_counts_per_trial > 0)])
spike_count_means.append(scsuo.mean_trial_spike_count_for_condition)
spike_count_variances.append(scsuo.variance_trial_spike_count_for_condition)
atleast_one_spike_reliabilities.append(scsuo.atleast_one_spike_reliability)
all_non_zero_spike_counts.extend(scsuo.spike_counts_per_trial[np.argwhere(scsuo.spike_counts_per_trial > 0)])
sw.draw_spike_count_histograms(dictionary_of_info_by_neuron_group, single_neuron_analysis_figs_dir + "NonZeroSpikeCountHistoByNeuronGroup.pdf")
sw.normal_histo_plot([atleast_one_spike_reliabilities], single_neuron_analysis_figs_dir + "atleast_one_spike_reliabilities", bins=20, density=False, x_axis_label='Did spike once reliability', y_axis_label='Frequency')
n,_ = np.histogram(all_non_zero_spike_counts, bins=np.arange(1, 8) - 0.5, density=True)
sw.draw_neighbouring_bar_chart([n], ('1', '2', '3', '4', '5', '6', '7', '8'), single_neuron_analysis_figs_dir + "NonZeroSpikeCountHisto.pdf", '', (''), 'Spike count', custom_y_tick_locators=[1, .2], optional_y_axis_label='Normalised frequency')
sw.basic_x_y_plot([spike_count_means], [spike_count_variances], single_neuron_analysis_figs_dir + "SpikeCountMeansVsVariances.pdf", draw_y_equals_x=True, y_equals_x_max=3.0)
def single_unit_stationarity_tests(single_neuron_analysis_figs_dir, draw_individual_unit_plots, data_loaders, investigation):
ALL_FIRST_SPIKES_FOR_HISTOGRAM = []
ALL_lr_actual_trial_indices_vs_fs = []
ALL_lr_autocorrelation_lag_1s = []
ALL_adfuller_dataframes = []
ALL_KPSS_pvalues = []
acf_pvalues = []
pacf_pvalues = []
latencies_vs_trial_plots_directory = single_neuron_analysis_figs_dir + "LatenciesVsTrialPlots/"
sw.mkdir(latencies_vs_trial_plots_directory)
number_of_lags = 5
dictionary_of_non_stationarity_dictionaries_by_neuron_group = {'L23EXC':{},
'L23INH':{},
'L4EXC':{},
'L4INH':{},
'L5AEXC':{},
'L5AINH':{},
'L5BEXC':{},
'L5BINH':{},
}
freq_group_keys = ['0-0.2', '1.0', '2.0', '3.0', '4.0', '5.0', '6.0', '7.0', '8.0', '9.0', '10.0']
number_of_blocks = 20
for neuron_group_dictionary in dictionary_of_non_stationarity_dictionaries_by_neuron_group.values():
# print(dictionary)
neuron_group_dictionary['KPSS_pvalues'] = []
neuron_group_dictionary['TI_LR_pvalues'] = []
neuron_group_dictionary['TI_LR_pvalues_by_freq_group_and_block'] = {}
neuron_group_dictionary['TI_LR_pvalues_by_freq_group'] = {}
neuron_group_dictionary['spike_times_by_trial_index_by_freq_group'] = {}
neuron_group_dictionary['spike_counts_by_trial_index_by_freq_group'] = {}
for freq_group_key in freq_group_keys:
neuron_group_dictionary['TI_LR_pvalues_by_freq_group_and_block'][freq_group_key] = {}
neuron_group_dictionary['TI_LR_pvalues_by_freq_group'][freq_group_key] = []
neuron_group_dictionary['spike_times_by_trial_index_by_freq_group'][freq_group_key] = {}
neuron_group_dictionary['spike_counts_by_trial_index_by_freq_group'][freq_group_key] = {}
for trial_index in range(100):
neuron_group_dictionary['spike_times_by_trial_index_by_freq_group'][freq_group_key][str(trial_index)] = []
neuron_group_dictionary['spike_counts_by_trial_index_by_freq_group'][freq_group_key][str(trial_index)] = []
for block_index in range(number_of_blocks):
neuron_group_dictionary['TI_LR_pvalues_by_freq_group_and_block'][freq_group_key][str(block_index)] = []
TI_LR_pvalues_by_freq_group_NOT_neuron_group = {}
for freq_group_key in freq_group_keys:
TI_LR_pvalues_by_freq_group_NOT_neuron_group[freq_group_key] = []
for experiment_index in investigation["experiments_to_process"]:
experiment_latencies_vs_trial_plots_directory = latencies_vs_trial_plots_directory + str(experiment_index) + "/"
sw.mkdir(experiment_latencies_vs_trial_plots_directory)
data_loader = data_loaders[investigation["experiments_to_process"].index(experiment_index)]
for scsuo in data_loader.list_of_all_scsuos:
first_spikes_for_trials_which_spiked_atleast_once_on_trial_flattened = scsuo.first_spikes_for_trials_which_spiked_atleast_once_on_trial.flatten()
if (scsuo.lr_actual_trial_indices_vs_fs != None):
freq_group_key = str(scsuo.stimulation_frequency_of_condition)
if (scsuo.stimulation_frequency_of_condition < 0.3):
freq_group_key = '0-0.2'
if (freq_group_key in freq_group_keys):
ALL_lr_actual_trial_indices_vs_fs.append(scsuo.lr_actual_trial_indices_vs_fs)
ALL_lr_autocorrelation_lag_1s.append(scsuo.lr_autocorrelation_lag_1)
ALL_adfuller_dataframes.append(scsuo.adfuller_dataframe)
ALL_KPSS_pvalues.append(scsuo.kpss_pvalue)
acf_rvalues_0, acf_pvals_0, acf_positive_pvals_0, acf_negative_pvals_0 = sw.calculate_acf_pvalues_for_spikes(first_spikes_for_trials_which_spiked_atleast_once_on_trial_flattened, first_spikes_for_trials_which_spiked_atleast_once_on_trial_flattened, number_of_lags)
pacf_rvalues, pacf_pvals, pacf_positive_pvals, pacf_negative_pvals = sw.calculate_pacf_pvalues_for_spikes(first_spikes_for_trials_which_spiked_atleast_once_on_trial_flattened, first_spikes_for_trials_which_spiked_atleast_once_on_trial_flattened, number_of_lags)
acf_pvalues.append(acf_pvals_0)
pacf_pvalues.append(pacf_pvals)
neuron_group_key = scsuo.neurophys_corresponding_layer_string_of_unit + scsuo.exc_inh_type_of_unit
for trial_index in range(100):
if (trial_index < scsuo.number_of_trials_for_condition):
if (scsuo.true_if_spiked_atleast_once_on_trial[trial_index]):
dictionary_of_non_stationarity_dictionaries_by_neuron_group[neuron_group_key]['spike_times_by_trial_index_by_freq_group'][freq_group_key][str(trial_index)].append(scsuo.first_spikes_for_condition_trials_normalised[trial_index])
dictionary_of_non_stationarity_dictionaries_by_neuron_group[neuron_group_key]['spike_counts_by_trial_index_by_freq_group'][freq_group_key][str(trial_index)].append(scsuo.spike_counts_per_trial_normalised[trial_index])
if (scsuo.number_of_trials_with_atleast_one_spike > 20):
dictionary_of_non_stationarity_dictionaries_by_neuron_group[neuron_group_key]['TI_LR_pvalues_by_freq_group'][freq_group_key].append(scsuo.lr_actual_trial_indices_vs_fs.pvalue)
TI_LR_pvalues_by_freq_group_NOT_neuron_group[freq_group_key].append(scsuo.lr_actual_trial_indices_vs_fs.pvalue)
dictionary_of_non_stationarity_dictionaries_by_neuron_group[neuron_group_key]['KPSS_pvalues'].append(scsuo.kpss_pvalue)
dictionary_of_non_stationarity_dictionaries_by_neuron_group[neuron_group_key]['TI_LR_pvalues'].append(scsuo.lr_actual_trial_indices_vs_fs.pvalue)
number_of_20_trial_spiking_blocks = math.floor(float(len(first_spikes_for_trials_which_spiked_atleast_once_on_trial_flattened)) / 20.0)
for i in range(number_of_20_trial_spiking_blocks):
if (i < 20):
# print(i, number_of_20_trial_spiking_blocks)
lr_pvalue_for_block = linregress(first_spikes_for_trials_which_spiked_atleast_once_on_trial_flattened[i * 20: i * 20 + 20], range(20)).pvalue
if (freq_group_key in freq_group_keys):
dictionary_of_non_stationarity_dictionaries_by_neuron_group[neuron_group_key]['TI_LR_pvalues_by_freq_group_and_block'][freq_group_key][str(i)].append(lr_pvalue_for_block)
# print(scsuo.lr_actual_trial_indices_vs_fs.rvalue)
# autocorrelation_shift_1 = np.corrcoef(scsuo.first_spikes_for_trials_which_spiked_atleast_once_on_trial[1:], scsuo.first_spikes_for_trials_which_spiked_atleast_once_on_trial[:-1])[0, 1]
# ALL_autocorrelation_shift_1s.append(autocorrelation_shift_1)
# if (scsuo.lr_actual_trial_indices_vs_fs.pvalue < 0.1):
if (draw_individual_unit_plots):
if (experiment_index == 1):
sns.set()
sns.set_style("ticks")
plt.figure(figsize=(2, 4))
plt.scatter(scsuo.first_spikes_for_trials_which_spiked_atleast_once_on_trial, scsuo.indices_of_trials_with_atleast_one_spike, s=2)
plt.title(scsuo.neurophys_corresponding_layer_string_of_unit + " " + scsuo.exc_inh_type_of_unit + ", Stim Freq: " + str(scsuo.stimulation_frequency_of_condition) + '\nPrinciple Column: ' + str(scsuo.principle_condition_unit_combination) + '\nAdfuller p: ' + str(scsuo.adfuller_dataframe[1]) + '\nKPSS p-value: ' + str(scsuo.kpss_pvalue) + "\nLag-1 autocorrelation p-value: " + str(scsuo.lr_autocorrelation_lag_1.pvalue) + "\nTrial index vs spike time p-value: " + str(scsuo.lr_actual_trial_indices_vs_fs.pvalue))
plt.gca().set_xlabel('ms', fontsize=24)
plt.gca().set_ylabel('Trial', fontsize=24)
plt.gca().set_ylim([0, plt.gca().get_ylim()[1]])
plt.gca().set_xlim([5.0, 55.8])
# plt.gca().xaxis.set_major_locator(FixedLocator([5.0, 30.0]))
# plt.gca().yaxis.set_major_locator(FixedLocator([0, scsuo.number_of_trials_for_condition]))
plt.gca().xaxis.set_minor_locator(MultipleLocator(5.0))
# plt.gca().yaxis.set_minor_locator(MultipleLocator(20))
for tick in plt.gca().xaxis.get_major_ticks():
tick.label.set_fontsize(24)
for tick in plt.gca().yaxis.get_major_ticks():
tick.label.set_fontsize(24)
if (scsuo.number_of_trials_for_condition == 100):
plt.savefig(experiment_latencies_vs_trial_plots_directory + str(experiment_index) + "_" + str(scsuo.channel_unit_pair_index) + "_" + str(scsuo.condition_index) + ".pdf", bbox_inches='tight')
plt.close()
count_or_time_keys = ['spike_times_by_trial_index_by_freq_group', 'spike_counts_by_trial_index_by_freq_group']
count_or_time_y_axis_label = ['Normalised\nspike time', 'Normalised\nspike count']
for count_or_time_index, count_or_time_key in enumerate(count_or_time_keys):
block_dict = {}
for neuron_group_key, neuron_group_dictionary in dictionary_of_non_stationarity_dictionaries_by_neuron_group.items():
block_dict[neuron_group_key] = {}
block_dict[neuron_group_key]['block_means'] = []
block_dict[neuron_group_key]['block_pos_95_cis'] = []
for block_index in range(10):
block_spike_times = []
for freq_group_index, freq_group_key in enumerate(freq_group_keys):
for trial_index in range(10):
trial_spike_times = neuron_group_dictionary[count_or_time_key][freq_group_key][str(block_index*10 + trial_index)]
block_spike_times.extend(trial_spike_times)
se = 0.0
mean = 0.0
if (len(block_spike_times) > 0):
se = np.std(block_spike_times) / np.sqrt(len(block_spike_times))
mean = np.mean(block_spike_times)
block_dict[neuron_group_key]['block_means'].append(mean)
block_dict[neuron_group_key]['block_pos_95_cis'].append(1.96 * se)
plt.figure(figsize=(13, 14))
fig, axes = plt.subplots(4, 1)
for ei_type_index, (ei_type, ei_type_color) in enumerate(zip(['EXC', 'INH'], ['Reds', 'Blues'])):
colour_map_string = ei_type_color
my_cmap = plt.cm.get_cmap(colour_map_string, 4)
xvals = np.arange(0, 10)
for neuron_group_index, neuron_group_key in enumerate(dictionary_of_non_stationarity_dictionaries_by_neuron_group.keys()):
if (ei_type in neuron_group_key):
# print(6 - math.floor(neuron_group_index / 2) )
# c = my_cmap(6 - math.floor(neuron_group_index / 2))
c = my_cmap(2)
ax_index = math.floor(neuron_group_index / 2)
ax = axes[math.floor(neuron_group_index / 2)]
ax.errorbar(xvals + ei_type_index * 0.1 - 0.05, block_dict[neuron_group_key]['block_means'], yerr=block_dict[neuron_group_key]['block_pos_95_cis'], c=c, label=neuron_group_key, ls='-', marker='o', markersize=2, lw=.3) # '.'
ax.set_ylabel(count_or_time_y_axis_label[count_or_time_index], fontsize=10)
# ax.set_ylim([-0.25, 0.6])
ax.set_ylim([-0.4, 0.4])
ax.yaxis.set_major_locator(MultipleLocator(0.4))
ax.yaxis.set_minor_locator(MultipleLocator(0.2))
ax.grid(which='major', axis='y', linestyle='-', c='lightgrey', linewidth=0.5)
ax.grid(which='minor', axis='y', linestyle='-', c='lightgrey', linewidth=0.5)
ax.set_xticks(xvals)
if (ax_index == 3):
ax.set_xticklabels([str(block_index*10) + '-' + str(block_index*10 + 9) for block_index in range(10)])
ax.set_xlabel('Trial group', fontsize=10)
else:
ax.set_xticklabels(['' for i in range(len(freq_group_keys))])
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(10)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(10)
plt.savefig(single_neuron_analysis_figs_dir + 'AllNeuronGroup' + count_or_time_key + '.pdf')
plt.close()
# meta_freq_groups = [['1.0', '3.0', '6.0', '10.0'], ['0-0.2', '2.0', '4.0'], ['5.0', '7.0', '8.0', '9.0']] # Mainz Paper
meta_freq_groups = [['0-0.2', '1.0', '2.0', '3.0'], ['4.0', '5.0', '6.0', '7.0'], ['8.0', '9.0', '10.0']] # Thesis
for meta_freq_group_index, meta_freq_group in enumerate(meta_freq_groups):
for freq_group_index, freq_group_key in enumerate(freq_group_keys):
block_dict[freq_group_key] = {}
block_dict[freq_group_key]['block_means'] = []
block_dict[freq_group_key]['block_pos_95_cis'] = []
for block_index in range(10):
block_spike_times = []
for neuron_group_key, neuron_group_dictionary in dictionary_of_non_stationarity_dictionaries_by_neuron_group.items():
for trial_index in range(10):
trial_spike_times = neuron_group_dictionary[count_or_time_key][freq_group_key][str(block_index*10 + trial_index)]
block_spike_times.extend(trial_spike_times)
se = np.std(block_spike_times) / np.sqrt(len(block_spike_times))
block_dict[freq_group_key]['block_means'].append(np.mean(block_spike_times))
block_dict[freq_group_key]['block_pos_95_cis'].append(1.96 * se)
plt.figure(figsize=(8, 3))
colour_map_string = "Greens"
my_cmap = plt.cm.get_cmap(colour_map_string, len(meta_freq_group) * 2)
xvals = np.arange(0, 10)
for freq_group_index, freq_group_key in enumerate(freq_group_keys):
if (freq_group_key in meta_freq_group):
index_in_meta_group = meta_freq_group.index(freq_group_key)
c = my_cmap(index_in_meta_group + len(meta_freq_group))
plt.errorbar(xvals + index_in_meta_group * 0.08 - .12, block_dict[freq_group_key]['block_means'], yerr=block_dict[freq_group_key]['block_pos_95_cis'], c=c, fmt='.', label=freq_group_key, ls='-', marker='o', markersize=2, lw=.3)
ax = plt.gca()
ax.set_ylabel(count_or_time_y_axis_label[count_or_time_index], fontsize=10)
# plt.gca().set_ylim([-0.25, 0.65])
ax.yaxis.set_major_locator(MultipleLocator(0.2))
# ax.set_ylim([-0.5, 0.4])
# ax.yaxis.set_major_locator(MultipleLocator(0.4))
# ax.yaxis.set_minor_locator(MultipleLocator(0.2))
ax.grid(which='major', axis='y', linestyle='-', c='lightgrey', linewidth=0.5)
ax.grid(which='minor', axis='y', linestyle='-', c='lightgrey', linewidth=0.5)
ax.set_xticks(xvals)
if (ax_index == 3):
ax.set_xticklabels([str(block_index*10) + '-' + str(block_index*10 + 9) for block_index in range(10)])
ax.set_xlabel('Trial group', fontsize=10)
else:
ax.set_xticklabels(['' for i in range(len(freq_group_keys))])
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(10)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(10)
plt.legend(edgecolor='k')
plt.savefig(single_neuron_analysis_figs_dir + str(meta_freq_group_index) + 'AllByFreqGroup' + count_or_time_key + '.pdf')
plt.close()
for neuron_group_key, neuron_group_dictionary in dictionary_of_non_stationarity_dictionaries_by_neuron_group.items():
neuron_group_KPSS_pvalues = np.asarray(neuron_group_dictionary['KPSS_pvalues'])
neuron_group_LR_pvalues = np.asarray(neuron_group_dictionary['TI_LR_pvalues'])
number_of_KPSS_samples_for_neuron_group = len(neuron_group_KPSS_pvalues)
number_of_LR_samples_for_neuron_group = len(neuron_group_LR_pvalues)
neuron_group_dictionary['Proportion_KPSS_pvalues_less_than_threshold'] = 0.0
neuron_group_dictionary['Proportion_LR_pvalues_less_than_threshold'] = 0.0
neuron_group_dictionary['Proportion_KPSS_pvalues_less_than_threshold_positive_confidence_interval'] = 0.0
neuron_group_dictionary['Proportion_LR_pvalues_less_than_threshold_positive_confidence_interval'] = 0.0
if (number_of_KPSS_samples_for_neuron_group > 0):
neuron_group_dictionary['Proportion_KPSS_pvalues_less_than_threshold'] = np.sum(neuron_group_KPSS_pvalues < 0.05) / number_of_KPSS_samples_for_neuron_group
neuron_group_dictionary['Proportion_KPSS_pvalues_less_than_threshold_positive_confidence_interval'] = 1.96 * math.sqrt((neuron_group_dictionary['Proportion_KPSS_pvalues_less_than_threshold'] * (1-neuron_group_dictionary['Proportion_KPSS_pvalues_less_than_threshold']) / number_of_KPSS_samples_for_neuron_group))
if (number_of_LR_samples_for_neuron_group > 0):
neuron_group_dictionary['Proportion_LR_pvalues_less_than_threshold'] = np.sum(neuron_group_LR_pvalues < 0.05) / number_of_LR_samples_for_neuron_group
neuron_group_dictionary['Proportion_LR_pvalues_less_than_threshold_positive_confidence_interval'] = 1.96 * math.sqrt((neuron_group_dictionary['Proportion_LR_pvalues_less_than_threshold'] * (1-neuron_group_dictionary['Proportion_LR_pvalues_less_than_threshold']) / number_of_LR_samples_for_neuron_group))
# Used https://www.dummies.com/education/math/statistics/how-to-determine-the-confidence-interval-for-a-population-proportion/
# sw.draw_neighbouring_bar_chart(stimulation_frequency_of_condition_tests_passed_and_correlated_for_each_key, ('0-0.2', '1', '2', '3', '4', '5', '6', '7', '8'), dh.unit_type_analysis_directory + "stimulation_frequency_grouped.pdf", '', ('Stationary', 'ARIMA'), 'Hz', custom_y_tick_locators=[20, 5])
sw.draw_neighbouring_bar_chart([[neuron_group_dictionary['Proportion_KPSS_pvalues_less_than_threshold'] for neuron_group_dictionary in dictionary_of_non_stationarity_dictionaries_by_neuron_group.values()],
[neuron_group_dictionary['Proportion_LR_pvalues_less_than_threshold'] for neuron_group_dictionary in dictionary_of_non_stationarity_dictionaries_by_neuron_group.values()]],
dictionary_of_non_stationarity_dictionaries_by_neuron_group.keys(),
single_neuron_analysis_figs_dir + "NonStationarityByNeuronGroup.pdf",
'',
('KPSS', 'Trial Index Linear Regression'),
"",
custom_y_tick_locators=[.3, .05],
rotate_x_labels=True,
y_tick_right=True,
optional_y_axis_label="Proportion",
positive_confidence_intervals=[[neuron_group_dictionary['Proportion_KPSS_pvalues_less_than_threshold_positive_confidence_interval'] for neuron_group_dictionary in dictionary_of_non_stationarity_dictionaries_by_neuron_group.values()],
[neuron_group_dictionary['Proportion_LR_pvalues_less_than_threshold_positive_confidence_interval'] for neuron_group_dictionary in dictionary_of_non_stationarity_dictionaries_by_neuron_group.values()]],
threshold_lin_value=0.05)
proportion_TI_LR_pvalues_by_freq_group_NOT_neuron_group_less_than_threshold = {}
proportion_TI_LR_pvalues_by_freq_group_NOT_neuron_group_less_than_threshold_positive_confidence_interval = {}
for freq_group_key in freq_group_keys:
TI_LR_pvalues_for_freq_group = np.asarray(TI_LR_pvalues_by_freq_group_NOT_neuron_group[freq_group_key])
number_TI_LR_pvalues_by_freq_group_NOT_neuron_group = len(TI_LR_pvalues_for_freq_group)
proportion_TI_LR_pvalues_by_freq_group_NOT_neuron_group_less_than_threshold[freq_group_key] = np.sum(TI_LR_pvalues_for_freq_group < 0.05) / number_TI_LR_pvalues_by_freq_group_NOT_neuron_group
proportion_TI_LR_pvalues_by_freq_group_NOT_neuron_group_less_than_threshold_positive_confidence_interval[freq_group_key] = 1.96 * math.sqrt((proportion_TI_LR_pvalues_by_freq_group_NOT_neuron_group_less_than_threshold[freq_group_key] * (1-proportion_TI_LR_pvalues_by_freq_group_NOT_neuron_group_less_than_threshold[freq_group_key]) / number_TI_LR_pvalues_by_freq_group_NOT_neuron_group))
sw.draw_neighbouring_bar_chart([[proportion_TI_LR_pvalues_by_freq_group_NOT_neuron_group_less_than_threshold[freq_group_key] for freq_group_key in freq_group_keys]],
freq_group_keys,
single_neuron_analysis_figs_dir + "NonStationarityByFrequency.pdf",
'',
('Trial Index Linear Regression'),
"",
custom_y_tick_locators=[.3, .05],
rotate_x_labels=True,
y_tick_right=True,
optional_y_axis_label="Proportion",
positive_confidence_intervals=[[proportion_TI_LR_pvalues_by_freq_group_NOT_neuron_group_less_than_threshold[freq_group_key] for freq_group_key in freq_group_keys]],
threshold_lin_value=0.05)
acf_pvalues = np.asarray(acf_pvalues)
pacf_pvalues = np.asarray(pacf_pvalues)
sw.cumulative_histo_plot([acf_pvalues[:, lag_index_zeroed] for lag_index_zeroed in range(number_of_lags)], single_neuron_analysis_figs_dir + "_ACF_PVal_CumHist", bins=200, x_axis_label="p-value", y_axis_label="Normalised\ncumulative sum", custom_x_tick_locators=[1.0, 0.2], add_chi_squared_text=True)
sw.cumulative_histo_plot([pacf_pvalues[:, lag_index_zeroed] for lag_index_zeroed in range(number_of_lags)], single_neuron_analysis_figs_dir + "_PACF_PVal_CumHist", bins=200, x_axis_label="p-value", y_axis_label="Normalised\ncumulative sum", custom_x_tick_locators=[1.0, 0.2], add_chi_squared_text=True)
for lag_index_zeroed in range(number_of_lags):
sw.normal_histo_plot([acf_pvalues[:, lag_index_zeroed]], single_neuron_analysis_figs_dir + "_ACFLag" + str(lag_index_zeroed + 1), bins=20, histo_range=[0.0, 1.0], x_axis_label="p-value", y_axis_label="Frequency", custom_x_tick_locators=[1.0, 0.2], custom_y_tick_locators=[20, 20], alpha=0.78, add_chi_squared_text=True)
sw.normal_histo_plot([pacf_pvalues[:, lag_index_zeroed]], single_neuron_analysis_figs_dir + "_PACFLag" + str(lag_index_zeroed + 1), bins=20, histo_range=[0.0, 1.0], x_axis_label="p-value", y_axis_label="Frequency", custom_x_tick_locators=[1.0, 0.2], custom_y_tick_locators=[20, 20], alpha=0.78, add_chi_squared_text=True)
ALL_pvalues_actual_trial_indices_vs_fs = [lr.pvalue for lr in ALL_lr_actual_trial_indices_vs_fs if lr != None]
ALL_rvalues_actual_trial_indices_vs_fs = [lr.rvalue for lr in ALL_lr_actual_trial_indices_vs_fs if lr != None]
ALL_rsquared_values_actual_trial_indices_vs_fs = [lr.rvalue**2 for lr in ALL_lr_actual_trial_indices_vs_fs if lr != None]
PVALTHRESH_rvalues_actual_trial_indices_vs_fs = [lr.rvalue for lr in ALL_lr_actual_trial_indices_vs_fs if (lr != None) & (lr.pvalue < 0.005)]
PVALTHRESH_rsquared_values_actual_trial_indices_vs_fs = [lr.rvalue**2 for lr in ALL_lr_actual_trial_indices_vs_fs if (lr != None) & (lr.pvalue < 0.005)]
ALL_adfuller_pvalues = [adfuller_dataframe['p-value'] for adfuller_dataframe in ALL_adfuller_dataframes]
PVALTHRESH_rvalues_actual_trial_indices_vs_fs = np.asarray(PVALTHRESH_rvalues_actual_trial_indices_vs_fs)
ALL_KPSS_pvalues = np.asarray(ALL_KPSS_pvalues)
ALL_LR_pvalues =
|
np.asarray(ALL_pvalues_actual_trial_indices_vs_fs)
|
numpy.asarray
|
# Copyright 2016-2019 The <NAME> at the California Institute of
# Technology (Caltech), with support from the Paul Allen Family Foundation,
# Google, & National Institutes of Health (NIH) under Grant U24CA224309-01.
# All rights reserved.
#
# Licensed under a modified Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.github.com/vanvalenlab/deepcell-tf/LICENSE
#
# The Work provided may be used for non-commercial academic purposes only.
# For any other use of the Work, including commercial use, please contact:
# <EMAIL>
#
# Neither the name of Caltech nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Custom metrics for pixel-based and object-based classification accuracy.
The schema for this analysis was adopted from the description of object-based
statistics in Caicedo et al. (2018) Evaluation of Deep Learning Strategies for
Nucleus Segmentation in Fluorescence Images. BioRxiv 335216.
The SEG metric was adapted from Maška et al. (2014). A benchmark for comparison
of cell tracking algorithms. Bioinformatics 30, 1609–1617.
The linear classification schema used to match objects in truth and prediction
frames was adapted from Jaqaman et al. (2008). Robust single-particle tracking
in live-cell time-lapse sequences. Nature Methods 5, 695–702.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import os
import json
import datetime
import operator
import math
import decimal
import numpy as np
import pandas as pd
import networkx as nx
from scipy.optimize import linear_sum_assignment
import skimage.io
import skimage.measure
from skimage.segmentation import relabel_sequential
from sklearn.metrics import confusion_matrix
from tensorflow.python.platform import tf_logging as logging
def stats_pixelbased(y_true, y_pred):
"""Calculates pixel-based statistics
(Dice, Jaccard, Precision, Recall, F-measure)
Takes in raw prediction and truth data in order to calculate accuracy
metrics for pixel based classfication. Statistics were chosen according
to the guidelines presented in Caicedo et al. (2018) Evaluation of Deep
Learning Strategies for Nucleus Segmentation in Fluorescence Images.
BioRxiv 335216.
Args:
y_true (3D np.array): Binary ground truth annotations for a single
feature, (batch,x,y)
y_pred (3D np.array): Binary predictions for a single feature,
(batch,x,y)
Returns:
dictionary: Containing a set of calculated statistics
Raises:
ValueError: Shapes of `y_true` and `y_pred` do not match.
Warning:
Comparing labeled to unlabeled data will produce low accuracy scores.
Make sure to input the same type of data for `y_true` and `y_pred`
"""
if y_pred.shape != y_true.shape:
raise ValueError('Shape of inputs need to match. Shape of prediction '
'is: {}. Shape of y_true is: {}'.format(
y_pred.shape, y_true.shape))
pred = y_pred
truth = y_true
if pred.sum() == 0 and truth.sum() == 0:
logging.warning('DICE score is technically 1.0, '
'but prediction and truth arrays are empty. ')
# Calculations for IOU
intersection = np.logical_and(pred, truth)
union = np.logical_or(pred, truth)
# Sum gets count of positive pixels
dice = (2 * intersection.sum() / (pred.sum() + truth.sum()))
jaccard = intersection.sum() / union.sum()
precision = intersection.sum() / pred.sum()
recall = intersection.sum() / truth.sum()
Fmeasure = (2 * precision * recall) / (precision + recall)
return {
'dice': dice,
'jaccard': jaccard,
'precision': precision,
'recall': recall,
'Fmeasure': Fmeasure
}
class ObjectAccuracy(object):
"""Classifies object prediction errors as TP, FP, FN, merge or split
The schema for this analysis was adopted from the description of
object-based statistics in Caicedo et al. (2018) Evaluation of Deep
Learning Strategies for Nucleus Segmentation in Fluorescence Images.
BioRxiv 335216.
The SEG metric was adapted from Maška et al. (2014). A benchmark for
comparison of cell tracking algorithms.
Bioinformatics 30, 1609–1617.
The linear classification schema used to match objects in truth and
prediction frames was adapted from Jaqaman et al. (2008).
Robust single-particle tracking in live-cell time-lapse sequences.
Nature Methods 5, 695–702.
Args:
y_true (2D np.array): Labeled ground truth annotation
y_pred (2D np.array): Labled object prediction, same size as y_true
cutoff1 (:obj:`float`, optional): Threshold for overlap in cost matrix,
smaller values are more conservative, default 0.4
cutoff2 (:obj:`float`, optional): Threshold for overlap in unassigned
cells, smaller values are better, default 0.1
test (:obj:`bool`, optional): Utility variable to control running
analysis during testing
seg (:obj:`bool`, optional): Calculates SEG score for cell tracking
competition
Raises:
ValueError: If y_true and y_pred are not the same shape
Warning:
Position indicies are not currently collected appropriately
Todo:
Implement recording of object indices for each error group
"""
def __init__(self,
y_true,
y_pred,
cutoff1=0.4,
cutoff2=0.1,
test=False,
seg=False):
self.cutoff1 = cutoff1
self.cutoff2 = cutoff2
self.seg = seg
if y_pred.shape != y_true.shape:
raise ValueError('Input shapes must match. Shape of prediction '
'is: {}. Shape of y_true is: {}'.format(
y_pred.shape, y_true.shape))
# Relabel y_true and y_pred so the labels are consecutive
y_true, _, _ = relabel_sequential(y_true)
y_pred, _, _ = relabel_sequential(y_pred)
self.y_true = y_true
self.y_pred = y_pred
self.n_true = len(np.unique(self.y_true)) - 1
self.n_pred = len(np.unique(self.y_pred)) - 1
self.n_obj = self.n_true + self.n_pred
# Initialize error counters
self.correct_detections = 0
self.missed_detections = 0
self.gained_detections = 0
self.merge = 0
self.split = 0
self.catastrophe = 0
self.gained_det_from_split = 0
self.missed_det_from_merge = 0
self.true_det_in_catastrophe = 0
self.pred_det_in_catastrophe = 0
# Initialize lists and dicts to store indices where errors occur
self.correct_indices = {}
self.correct_indices['y_true'] = []
self.correct_indices['y_pred'] = []
self.missed_indices = {}
self.missed_indices['y_true'] = []
self.gained_indices = {}
self.gained_indices['y_pred'] = []
self.merge_indices = {}
self.merge_indices['y_true'] = []
self.split_indices = {}
self.split_indices['y_true'] = []
self.catastrophe_indices = {}
self.catastrophe_indices['y_true'] = []
self.catastrophe_indices['y_pred'] = []
# Check if either frame is empty before proceeding
if self.n_true == 0:
logging.info('Ground truth frame is empty')
self.gained_detections += self.n_pred
self.empty_frame = 'n_true'
elif self.n_pred == 0:
logging.info('Prediction frame is empty')
self.missed_detections += self.n_true
self.empty_frame = 'n_pred'
elif test is False:
self.empty_frame = False
self._calc_iou()
self._make_matrix()
self._linear_assignment()
# Check if there are loners before proceeding
if (self.loners_pred.shape[0] == 0) & (self.loners_true.shape[0] == 0):
pass
else:
self._assign_loners()
self._array_to_graph()
self._classify_graph()
else:
self.empty_frame = False
def _calc_iou(self):
"""Calculates IoU matrix for each pairwise comparison between true and
predicted. Additionally, if `seg`==True, records a 1 for each pair of
objects where $|T\bigcap P| > 0.5 * |T|$
"""
self.iou = np.zeros((self.n_true, self.n_pred))
if self.seg is True:
self.seg_thresh = np.zeros((self.n_true, self.n_pred))
# Make all pairwise comparisons to calc iou
for t in range(1, self.y_true.max() + 1):
for p in range(1, self.y_pred.max() + 1):
intersection = np.logical_and(self.y_true == t, self.y_pred == p)
union = np.logical_or(self.y_true == t, self.y_pred == p)
# Subtract 1 from index to account for skipping 0
self.iou[t - 1, p - 1] = intersection.sum() / union.sum()
if (self.seg is True) & \
(intersection.sum() > 0.5 * np.sum(self.y_true == t)):
self.seg_thresh[t - 1, p - 1] = 1
def _make_matrix(self):
"""Assembles cost matrix using the iou matrix and cutoff1
The previously calculated iou matrix is cast into the top left and
transposed for the bottom right corner. The diagonals of the two
remaining corners are populated according to `cutoff1`. The lower the
value of `cutoff1` the more likely it is for the linear sum assignment
to pick unmatched assignments for objects.
"""
self.cm = np.ones((self.n_obj, self.n_obj))
# Assign 1 - iou to top left and bottom right
self.cm[:self.n_true, :self.n_pred] = 1 - self.iou
self.cm[-self.n_pred:, -self.n_true:] = 1 - self.iou.T
# Calculate diagonal corners
bl = self.cutoff1 * \
np.eye(self.n_pred) + np.ones((self.n_pred, self.n_pred)) - \
np.eye(self.n_pred)
tr = self.cutoff1 * \
np.eye(self.n_true) + np.ones((self.n_true, self.n_true)) - \
np.eye(self.n_true)
# Assign diagonals to cm
self.cm[-self.n_pred:, :self.n_pred] = bl
self.cm[:self.n_true, -self.n_true:] = tr
def _linear_assignment(self):
"""Runs linear sun assignment on cost matrix, identifies true positives
and unassigned true and predicted cells.
True positives correspond to assignments in the top left or bottom
right corner. There are two possible unassigned positions: true cell
unassigned in bottom left or predicted cell unassigned in top right.
"""
self.results = linear_sum_assignment(self.cm)
# Map results onto cost matrix
self.cm_res = np.zeros(self.cm.shape)
self.cm_res[self.results[0], self.results[1]] = 1
# Identify direct matches as true positives
correct_index =
|
np.where(self.cm_res[:self.n_true, :self.n_pred] == 1)
|
numpy.where
|
# Two layer instability
# Channel, with y=[-1,1], non-dimensional
# Bottom slope in y-direction (h=alpha*y)
# with 1D topography h=h(y)
#
# (U1-c)*(P1_yy - k^2 P1 - F1 P1 + F1 P2) + (1-U1_yy) P1 = 0
# (U2-c)*(P2_yy - k^2 P2 - F2 P2 + F2 P1) + (1+alpha-U2_yy+hy) P2 = 0
#
# Author: <NAME>, 1/18
# Translation to Python: <NAME>, 12/21
import numpy as np
from numpy import matrix, array, diag, eye, block, arange, zeros, ones
from scipy.linalg import eig, norm
import matplotlib.pyplot as plt
def cheb(n):
"""
compute Dx = differentiation matrix, x = Chebyshev grid
Matlab function written by <NAME>.
Translated to Python by <NAME> (5/2021).
"""
if n==0:
x = matrix([1.0])
Dx = matrix([0.0])
else:
N = arange(n + 1)
x = matrix(np.cos(np.pi*N/n)).T
m1 = (-1)**N
c = matrix(np.concatenate(([2], ones(n - 1), [2]))*m1).T
X = np.tile(x, (1, n + 1))
dX = X - X.T
Dx = array(c*(1/c).T)/array(dX + eye(n + 1)) # off-diagonal entries
Dx = matrix(Dx)
Dx = Dx - diag(array(Dx.T.sum(axis=0)).squeeze()) # diagonal entries
return x, Dx
def sech(x):
return 1/np.cosh(x)
#---
plt.close("all")
#allhts = [0, 10]
allhts = [10]
# lt = 10*np.pi # bump wavenumber
lt = 50*np.pi
allF1s = [25, 75, 400]
PLOT = False
N = 512 # no. grid points in y
h1o2 = 1/4 # layer depth ratio
bet = 0 # beta
al = 0 # bottom slope
kmax = 60 # max wavenumber
dk = 1 # resolution wavenumber
U0 = 1 # amplitude surface jet
du = 0 # U2=du*U1
L = 0.2 # width surface jet
efac = N**4/2
for F1 in allF1s:
for ht in allhts:
print("F1 = ", str(F1), " lt = ", str(int(lt/np.pi)), "pi, ht = ", str(ht))
kp = matrix(np.arange(dk, kmax+dk, dk)).T
d1 = h1o2/(1 + h1o2)
d2 = 1/(1 + h1o2)
F = d1*F1
Lx = 2*np.pi
Nm = N - 1
y, D = cheb(N)
D2 = D**2
D2 = D2[1:N, 1:N]
D = D[1:N, 1:N]
y = y[1:N, :]
y0 = array(y).flatten()
dy = np.gradient(y0)[:, np.newaxis]
F1e = F1*matrix(eye(Nm))
F2 = h1o2*F1
F2e = F2*matrix(eye(Nm))
n2 = int(np.floor(N/2))
Np = kp.size
ci = zeros(Np)
cr, hf, m1, m2, rat, pha = ci.copy(), ci.copy(), ci.copy(), ci.copy(), ci.copy(), ci.copy()
ke1, ke2, pe = ci.copy(), ci.copy(), ci.copy()
U1 = U0*sech(y0/L)**2
U1yy = 2/L**2*U1*(3*np.tanh(y0/L)**2 - 1) # Bickley jet.
U1, U1yy = map(matrix, (U1, U1yy))
U1, U1yy = U1.T, U1yy.T
U2 = du*U1
U2yy = du*U1yy
qs1y = bet - U1yy + F1*(U1 - U2)
hy = -ht*lt*np.sin(lt*y)
qs2y = bet - U2yy + F2*(U2 - U1) + al + hy
qs1ye = diag(array(qs1y).flatten())
qs2ye = diag(array(qs2y).flatten())
hye = diag(array(hy).flatten())
U1e = diag(array(U1).flatten())
U2e = diag(array(U2).flatten())
qs1ye, qs2ye, hye, U1e, U2e = map(matrix, (qs1ye, qs2ye, hye, U1e, U2e))
U1, U2 = map(array, (U1, U2))
for n in range(Np):
k = array(kp[n]).flatten()[0]
k2e = matrix(k**2*eye(Nm))
A11 = U1e*(D2 - k2e - F1e) + qs1ye
A12 = U1e*F1e
A21 = U2e*F2e
A22 = U2e*(D2 - k2e - F2e) + qs2ye
A = block([[A11, A12], [A21, A22]])
B11 = D2 - k2e - F1e
B12 = F1e
B21 = F2e
B22 = D2 - k2e - F2e
B = block([[B11, B12], [B21, B22]])
c, V = eig(A, b=B, check_finite=False)
V = matrix(V)
j = np.where(c.imag > 0)[0]
if len(j)>0:
nm = c[j].imag.argmax()
jnm = j[nm]
ci[n] = c[jnm].imag
cr[n] = c[jnm].real
Vs1 = V[:Nm, jnm]
Vs2 = V[Nm:, jnm]
u1y = -D2*V[:Nm, :]
u1y = u1y[:, jnm]
u2y = -D2*V[Nm:, :]
u2y = u2y[:, jnm]
Vs1, Vs2, u1y, u2y = map(array, (Vs1, Vs2, u1y, u2y))
# Calculate diagnostic quantities for each mode.
hf[n] = np.real(F*np.sum(1j*k*(U1 - U2)*(Vs2.conj()*Vs1 - Vs2*Vs1.conj())*dy)) # Thickness flux.
m1[n] = np.real(d1*np.sum(1j*k*U1*(Vs1.conj()*u1y - Vs1*u1y.conj())*dy)) # Upper-layer momentum flux.
m2[n] = np.real(d2*np.sum(1j*k*U2*(Vs2.conj()*u2y - Vs2*u2y.conj())*dy)) # Lower-layer momentum flux.
rat[n] = norm(Vs2, ord=2)/norm(Vs1, ord=2)
pha[n] = np.arctan2(Vs1[n2].imag, Vs1[n2].real) - np.arctan2(Vs2[n2].imag, Vs2[n2].real)
# Energies.
u1 = -D*V[:Nm, :] # -dpsi1/dy
u2 = -D*V[Nm:, :] # -dpsi2/dy
u1sq = np.array(np.abs(u1[:, jnm]))**2
u2sq = np.array(np.abs(u2[:, jnm]))**2
v1 = 1j*k*V[:Nm, :] # dpsi1/dx
v2 = 1j*k*V[Nm:, :] # dpsi2/dx
v1sq = np.array(
|
np.abs(v1[:, jnm])
|
numpy.abs
|
import numpy as np
import astropy.units as u
from ..interpu import interpu
def test_interpu():
x = np.arange(20) * u.km
y = x**3 - 3*x**2*u.km + 5*u.km**3
points = np.random.rand(10)*20.
result0 = np.interp(points, x.value, y.value)
points1 = points*u.km
points1 = points1.to(u.imperial.mi)
result1 = interpu(points1, x, y)
assert np.all(
|
np.abs(result0 - result1.value)
|
numpy.abs
|
# -*- coding: utf-8 -*-
"""
Created on Tue May 11 20:59:58 2021
@author: johan
"""
import numpy
import time
import numpy as np
import json
import math
from matplotlib import pyplot as plt
import matplotlib
inf=numpy.nan
random_partitions=True
if random_partitions:
casename = "partition_points_random"
fname= "timing_data/datatableLarger_n1.txt"
fname= "timing_data/tmpdata1_select_on_iter_rnd.txt"
fname= "timing_data/tmpdata1_rnd.txt"
fname= "timing_data/tmpdata1_quickselect_rnd.txt"
label="random partition points"
else:
casename = "partition_points_equidistant"
fname= "timing_data/datatableLarger_n1_equi.txt"
fname= "timing_data/tmpdata1_select_on_iter.txt"
label="equidistant partition points"
datafile="\n".join([l for l in open(fname).readlines() if not "#" in l]).replace("inf","10000.0")
d=json.loads(datafile)
data=np.array(d)
oRounds=sorted({k[0] for k in data})
Ns=sorted({k[1] for k in data})
def removeOutliers(data,outliers):
if len(data)<2*outliers:
raise Exception("too little data")
return numpy.partition(data,[outliers,len(data)-outliers-1])[outliers:len(data)-outliers]
def saveFig(fg,name):
fg.savefig(name+".tmp.png", dpi=600,bbox_inches='tight')
fig, ax1 = plt.subplots()
fig.set_figwidth(4)
fig.set_figheight(4)
ax1.grid(color='#aaaaaa', linestyle='-', linewidth=0.37)
colors=['gray','green','blue','black','purple','red','magenta']
colors_i=0
for i,N in enumerate(Ns):
ys=[]
x=[]
msActual=[]
if N < 150:
continue
color=colors[colors_i]
colors_i=colors_i+1
for round_i in oRounds:
m=sorted([[k[3],k[4],k[3]] for k in data if k[0]==round_i and k[1] == N])
x,y,mActual = list(zip(*m))
x=np.array(x)
ys.append(y)
msActual.append(mActual)
msActualNo = numpy.array(msActual)
msActualMean=numpy.mean(msActual,axis=0)
#msActualStd=numpy.std(msActual,axis=0)
x=msActualMean
ax1.set_ylabel("speed factor vs std::sort")
ax1.set_xlabel("number of partition points")
ax1.set_xscale('log')
ys=numpy.array(ys)
outliers=0
trials= ys.shape[0]
ysStd=numpy.std(ys,axis=0)
ysMean=numpy.mean(ys,axis=0)
line=plt.errorbar(x,ysMean,ysStd, fmt='.-',ecolor=color,color=color,linewidth=0.5)
#line=plt.errorbar(x,ysMean,ysStd, msActualStd, fmt='.-',ecolor=colors[i],color=colors[i])
line.set_label(("N="+"%1.0e"%int(N)).replace("+0",""))
#ref=len(ysMean)-3
import bisect
#ref2= bisect.bisect(x,N/100.0)
ref=len(ysMean)-4
#ref=min(ref,ref2)
thinLine=numpy.divide(ysMean[ref]*numpy.log(x[ref]),numpy.log(x))
for x_i in range(len(x)):
if x[x_i]<20 or thinLine[x_i]<0.6:
thinLine[x_i]=math.nan
ax1.plot(x,thinLine,':',color=color,linewidth=0.2)
ax1.set_xlim([0.9, 120000*10])
#ax1.set_xlim([0.9, 12000])
#ax1.set_xlim([0.9, 1200])
ax1.set_yticks(range(0,12))
ax1.set_xticks(numpy.power(10.0,numpy.arange(-1,10,1)))
ax1.set_xticks(numpy.power(10.0,numpy.arange(-1,10,0.1)),minor=True)
ax1.set_ylim([0., 12])
ax1.set_xlim([0.9, 120000*10])
ax1.legend(loc=1, prop={'size': 6})
ax1.yaxis.get_ticklocs(minor=True)
ax1.xaxis.get_ticklocs(minor=True)
ax1.minorticks_on()
y_minor = matplotlib.ticker.LogLocator(base = 10.0, subs = numpy.arange(1.0, 10.0) , numticks = 10)
ax1.xaxis.set_minor_locator(y_minor)
ax1.grid(visible=True, which='minor', color='gray', linestyle='-',linewidth=0.2, alpha=0.2)
ax1.set_title(label,fontsize=4)
#ax1.set_xlim([20, 1200000])
#ax1.set_ylim([0.65, 3])
#plt.errorbar(x,y,y*0.1, fmt='k.-')
fig.show()
saveFig(fig,"images/"+casename+"_speed_for_m_vs_sort"+".png")
fig, ax1 = plt.subplots()
fig.set_figwidth(4)
fig.set_figheight(4)
ax1.grid(color='#aaaaaa', linestyle='-', linewidth=0.37)
Ms=sorted({k[2] for k in data})
xMinShow=0
colors=['red','green','blue','#888800','purple','#bbbb00',"#888888","#ff9999","#66ff66","#6666ff","#ff66ff","black","#006600","#660000","#000066","#660066"]
color_i=0
for i,M in enumerate(Ms):
ys=[]
x=[]
msActual=[]
for round_i in oRounds:
theseNs=sorted([[k[1],k[4],k[3]] for k in data if k[0]==round_i and k[2] == M])
x,y,mActual = list(zip(*theseNs))
x=np.array(x)
ys.append(y)
msActual.append(mActual)
if len(x)<2:
continue
msActualNp=numpy.array(msActual)
msActualMean=numpy.mean(msActualNp,axis=0)
color=colors[color_i]
color_i=color_i+1
ax1.set_ylabel("speed factor vs std::sort")
ax1.set_xlabel("N")
ax1.set_xscale('log')
#ax1.set_yscale('log')
outliers=0
ys=numpy.array(ys)
trials= ys.shape[0]
ysStd=numpy.std(ys,axis=0)
ysMean=numpy.mean(ys,axis=0)
#ysStd=numpy.std(ys,axis=0)
for j in range(len(ysMean)):
if x[j]<M*4 or x[j]< xMinShow:
ysMean[j]=math.nan
x[j]=math.nan
line=plt.errorbar(x,ysMean,ysStd, fmt='.-',ecolor=colors[i],color=color,linewidth=0.5)
line.set_label("m="+str(int(M)))
ref=len(ysMean)-2
ax1.plot(x,numpy.multiply(ysMean[ref]/numpy.log(x[ref]),numpy.log(x)),':',color=color,linewidth=0.3)
ax1.set_xlim([200, 3500000*10])
#ax1.set_xlim([0.9, 12000])
#ax1.set_xlim([0.9, 1200])
ax1.set_yticks(range(0,12))
ax1.set_ylim([0.65, 12])
ax1.legend(loc=2, prop={'size': 6})
ax1.yaxis.get_ticklocs(minor=True)
ax1.minorticks_on()
ax1.grid(visible=True, which='minor', color='gray', linestyle='-',linewidth=0.2, alpha=0.2)
y_minor = matplotlib.ticker.LogLocator(base = 10.0, subs = numpy.arange(1.0, 10.0) , numticks = 10)
ax1.xaxis.set_minor_locator(y_minor)
ax1.set_title(label,fontsize=4)
fig.show()
saveFig(fig,"images/"+casename+"_speed_for_n_vs_sort"+".png")
fig, ax1 = plt.subplots()
fig.set_figwidth(4)
fig.set_figheight(4)
ax1.grid(color='#aaaaaa', linestyle='-', linewidth=0.37)
Ms=sorted({k[2] for k in data})
xMinShow=0
colors=['red','green','blue','#888800','purple','#bbbb00',"#888888","#ff9999","#66ff66","#6666ff","#ff66ff","black","#006600","#660000","#000066","#660066"]
color_i=0
for i,M in enumerate(Ms):
ys=[]
ysS=[]
x=[]
msActual=[]
for round_i in oRounds:
theseNs=sorted([[k[1],k[4]*k[6],k[3],k[6]] for k in data if k[0]==round_i and k[2] == M])
x,y,mActual,Tnth = list(zip(*theseNs))
x=np.array(x)
ys.append(Tnth)
ysS.append(y)
msActual.append(mActual)
if len(x)<2:
continue
msActualNp=numpy.array(msActual)
msActualMean=numpy.mean(msActualNp,axis=0)
ax1.set_ylabel("time")
ax1.set_xlabel("N")
ax1.set_xscale('log')
ax1.set_yscale('log')
#ax1.set_yscale('log')
color=colors[color_i]
color_i=color_i+1
outliers=3
ys=numpy.array(ys)
trials= ys.shape[0]
ysStd=numpy.std(ys,axis=0)
#ys=numpy.partition(ys,[outliers,trials-outliers-1],axis=0)[outliers:trials-outliers-1]
ysMean=numpy.mean(ys,axis=0)
#ysStd=numpy.std(ys,axis=0)
#for j in range(len(ysMean)):
# if x[j]<M*4 or x[j]< xMinShow:
## ysMean[j]=math.nan
# x[j]=math.nan
line=ax1.errorbar(x,ysMean,ysStd, fmt='.-',ecolor=color,color=color,markersize=1.5,linewidth=0.5)
line.set_label("m="+str(int(M)))
ref=len(ysMean)-2
def shape(x):
return x
ax1.plot(x,numpy.multiply(ysMean[ref]/shape(x[ref]),shape(x)),':',color=color,linewidth=0.3)
ysOrg=ys
if M == 1000:
outliers=3
ys=ysS
ys=numpy.array(ys)
trials= ys.shape[0]
ysStd=numpy.std(ys,axis=0)
#ys=numpy.partition(ys,[outliers,trials-outliers-1],axis=0)[outliers:trials-outliers-1]
ysMean=numpy.mean(ys,axis=0)
#ysStd=numpy.std(ys,axis=0)
#for j in range(len(ysMean)):
# if x[j]<M*4 or x[j]< xMinShow:
# ysMean[j]=math.nan
# x[j]=math.nan
color="#aaaaaa"
# trick to work around plotting issue, just draw first point first:
line=ax1.errorbar(x[0],ysMean[0],ysStd[0], fmt='.-',ecolor=color,color=color,markersize=2.5,linewidth=0.5)
line.set_label("std::sort")
# then error bar without line:
line=ax1.errorbar(x,ysMean,ysStd, fmt='.',ecolor=color,color=color,markersize=2.5,linewidth=0.5)
# then line:
ax1.plot(x,ysMean,ysStd,'-',color=color,linewidth=0.6)
ref=len(ysMean)-2
def shape(x):
return x*
|
numpy.log(x)
|
numpy.log
|
import matplotlib
matplotlib.use("TkAgg")
from matplotlib import pyplot as plt
import numpy as np
import seaborn as sns
import argparse
#from utils.factor_plot import factor_plot
from bayespy import plot as bpplt
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow_probability import edward2 as ed
import warnings
from sklearn.externals import joblib
warnings.filterwarnings('ignore')
def build_toy_dataset():
np.random.seed(0)
N = 400
D = 30
gap = 3
# In B, all the data pts are from the same distribution, which has different variances in three subspaces.
B = np.zeros((N, D))
B[:, 0:10] = np.random.normal(0, 10, (N, 10))
B[:, 10:20] = np.random.normal(0, 3, (N, 10))
B[:, 20:30] = np.random.normal(0, 1, (N, 10))
# In A there are four clusters.
A = np.zeros((N, D))
A[:, 0:10] = np.random.normal(0, 10, (N, 10))
# group 1
A[0:100, 10:20] = np.random.normal(0, 1, (100, 10))
A[0:100, 20:30] = np.random.normal(0, 1, (100, 10))
# group 2
A[100:200, 10:20] = np.random.normal(0, 1, (100, 10))
A[100:200, 20:30] = np.random.normal(gap, 1, (100, 10))
# group 3
A[200:300, 10:20] = np.random.normal(2 * gap, 1, (100, 10))
A[200:300, 20:30] = np.random.normal(0, 1, (100, 10))
# group 4
A[300:400, 10:20] = np.random.normal(2 * gap, 1, (100, 10))
A[300:400, 20:30] = np.random.normal(gap, 1, (100, 10))
labels = [0] * 100 + [1] * 100 + [2] * 100 + [3] * 100
# Perform mean-centering
mB = np.mean(B, axis=0)
B = B - mB
mA = np.mean(A, axis=0)
A = A - mA
return A, B, labels
"""
Variable names consistent with those in
"Unsupervised Learning with Contrastive Latent Variable Models"
except loading factor dimensionalities k and t --> k_shared and k_target
x, y = oberved data with dimensions x: d x n and y: d x m
zi, zj = shared latent variables with dimensions: k_shared
ti = target latent variables with dimensions: k_target
qzi, qzj, qti = variational gaussian rep for zi, zj, ti respectively
s = shared factor loading with dimensions: d x k_shared
w = target factor loading with dimensions: d x k_target
noise = noise
"""
class apply_clvm:
def __init__(self, modelpkl, target_dataset, background_dataset=None,
target_missing=False, background_missing=False):
"""
Initialization for applying an existing clvm to new data
:param target_dataset: numpy array of size n (observations) x d (measurements)
:param background_dataset: numpy array of size m (observations) x d (measurements)
:param k_shared: integer specifying the dimensionality of the shared latent space
:param k_target: integer specifying the dimensionality of the target latent space
:param robust_flag: boolean indicating if inverse gamma prior for noise is used
:param sharedARD: boolean indicating if ARD prior is used for shared factor loading
:param targetARD: boolean indicating if ARD prior is used for target factor loading
:param target_missing: boolean indicating if there is missing data in the target dataset;
missing data should be indicated by elements equal to np.NaN
:param background_missing: boolean indicating if there is missing data in the background dataset;
missing data should be indicated by elements equal to np.NaN
"""
self.n, self.d = target_dataset.shape
self.target_dataset = target_dataset
if background_dataset is not None:
self.m = background_dataset.shape[0]
self.background_dataset = background_dataset
self.background_predict = True
else:
self.background_predict = False
self.background_dataset = None
self.k_shared = modelpkl['k_shared']
self.k_target = modelpkl['k_target']
#get posterior estimates from pkl
self.w_inferred = modelpkl['W']
self.s_inferred = modelpkl['S']
self.noise_inferred = modelpkl['noise']
self.alpha_inferred = modelpkl['alpha']
self.beta_inferred = modelpkl['beta']
#flags for model variants
self.robust = modelpkl['robust']
self.targetARD = modelpkl['targetARD']
self.sharedARD = modelpkl['sharedARD']
self.target_missing = target_missing
self.background_missing = background_missing
if self.target_missing:
tobs = np.ones(self.target_dataset.shape).astype(np.int64)
tobs[
|
np.isnan(self.target_dataset)
|
numpy.isnan
|
import pandas as pd # importing the Pandas library required for file reading and file importing into the code
import numpy as np # mathematical library used for calling basic maths functions
#-----------------------------------------------------------------------------------------------------------------------------------------------
#BASIC FUNCTIONS REQUIRED
def sigmoid(x) : #calculates the conditional probability of the prediction.
denom = (1.0 + np.exp(-x)) #The greater the odds of a positive event occuring the greater the odds
return 1.0/denom
def V(X,w) : # Given an input feature vector.
net = np.dot(X,w)
return sigmoid(net) #this function returns the sigmoid of weighted sum(the input vector and the weight vectors are inclusive of the bias term for this code)
def Error(X,w,y) : # Cost Function
f1 = np.sum(np.dot(y.T,np.log(V(X,w)))) # This is the main function that gives the information about how far away the parameters are from their locallly optimized values.
f2 = np.sum(np.dot((1-y).T,np.log(1-V(X,w)))) # Also known as negative log likelihood function. This is obtained since the outcomes are conditional probabilities for each class and each feature vector is independent of the others.
return -(f1 + f2)/y.size # The main idea of this implementation is the minimization of this cost function to obtain optimized parameters.
def gradError(X,w,y) : # The partial derivative of cost function w.r.t the Weights.
prediction = V(X,w)
X_trans = X.T # Transpose of feature vector
return (np.dot(X_trans,(V(X,w) - y)))/(y.size)
# Gradient of Cost Function, X: feature vector, w: weight matrix, y: function class to be learned, V(X,w): predicted class
#-----------------------------------------------------------------------------------------------------------------------------------------------
# FUNCTION REQUIRED FOR NORMALIZATION OF INPUT DATA
def normalized(X):
X_mean=X.mean(axis=0) # Calculates the mean value for the input data set
X_std=X.std(axis=0) # Calculates the standard deviation for the input data set
return (X-X_mean)/X_std # Returns the normalized data set
# -----------------------------------------------------------------------------------------------------------------------------------------------
# DATA HANDLING PART OF THE CODE USING PANDAS LIBRARY
# The pandas library function "read" takes as argument the local file path to where the data was stored on my computer during training
# This needs to be essentially updated if the location of the data files is updated.
data_train_features = pd.read_csv("/Users/vishalsharma/Documents/ELL409/Assignment1/dataset/train_data.csv", names=['x1','x2','x3','x4','x5','x6','x7','x8','x9','x10','x11','x12','x13','x14','x15','x16']) # Importing the training feature vectors and storing them in the corresponding variable
data_test_features = pd.read_csv("/Users/vishalsharma/Documents/ELL409/Assignment1/dataset/test_data.csv", names=['x1','x2','x3','x4','x5','x6','x7','x8','x9','x10','x11','x12','x13','x14','x15','x16']) # Importing the test feature vectors and storing them in the corresponding variable
data_train_features_matrix = data_train_features.as_matrix() # Creating a matrix of the obtained features for both the training and the test data sets.
data_test_features_matrix = data_test_features.as_matrix() # Trainging/Test feature matrix shape = (number of training/test inputs, 16) # Exclusive of the bias term '1'
data_train_labels = pd.read_csv("/Users/vishalsharma/Documents/ELL409/Assignment1/dataset/train_labels.csv", names=['y']) # Importing the training labels and storing them in the corresponding variable
data_test_labels = pd.read_csv("/Users/vishalsharma/Documents/ELL409/Assignment1/dataset/test_labels.csv", names=['y']) # Importing the test labels and storing them in the corresponding variable
#Y_df = pd.DataFrame(data_train_labels.y)
#print(Y_df.head())
data_train_labels_matrix = data_train_labels.as_matrix() # Creating a matrix of the obtained labels for both the training and the test data sets.
data_test_labels_matrix = data_test_labels.as_matrix() # Trainging/Test label matrix shape = (number of training/test inputs, 1)
data_train_features_matrix[:,1:] = normalized(data_train_features_matrix[:,1:]) # Normalizing the training feature data set
X_train = np.zeros((data_train_features_matrix.shape[0],17))
X_train[:,16] = 1.0
for i in range(16):
X_train[:,i] = data_train_features_matrix[:,i] # Training feature matrix shape = (number of training inputs, 17) # Inclusive of the bias term '1'
data_test_features_matrix[:,1:] = normalized(data_test_features_matrix[:,1:]) # Normalizing the test feature data set
X_test = np.zeros((data_test_features_matrix.shape[0],17))
X_test[:,16] = 1.0
for i in range(16):
X_test[:,i] = data_test_features_matrix[:,i] # Test feature matrix shape = (number of test inputs, 17) # Inclusive of the bias term '1'
Y_train = np.zeros((data_train_labels_matrix.shape[0],10)) # In this step an output matrix for each of the training and test sets is created based on the value of label for that data point
for i in range(10):
Y_train[:,i] = np.where(data_train_labels_matrix[:,0]==i, 1,0) # The new matrix has the shape = (number of training/test labels , 10)
Y_test = np.zeros((data_test_labels_matrix.shape[0],10)) # So, a new matrix is constructed having 10 coloumns with the coloumn number corresponding to the label value to be 1 and the rest to be zero.
for j in range(10):
Y_test[:,j] = np.where(data_test_labels_matrix[:,0]==j, 1,0)
#------------------------------------------------------------------------------------------------------------------------------------------------
# MAIN LEARNING PART OF THE CODE. HERE I IMPLEMENT THE USUAL GRADIENT DESCENT ALGORITHM TO MAKE THE COST FUNCTION CONVERGE TO A LOCAL MINIMA
W_opt= np.zeros((X_train.shape[1],10)) # The optimized weight matrix is stored in this variable.
W_opt2= np.zeros((X_train.shape[1],10)) # Again, each coloumn of this matrix is a decision boundary separating that particular class from the rest of the classes.
# The shape of the optimized W_opt matrix = (17,10) in this case with 16 dimensional feature vectors that are required to be classified into either of the 10 distinct classes
def grad_desc(X, w, y, Tolerance, LearningRate) :
error = Error(X, w, y) # Computing the value of the cost function right at the start of the gradient descent algorithm for the first step.
iterations = 1 # Starting the counter for iterations with 1
error_diff = 2 # difference in error between two consecutive iterations(intially set to a random value greater than convergence) (important for loop termination), will be updated inside the loop
while(error_diff > Tolerance):
error_prev = error # assigns the value of the existing error to the variable error_prev
w = w - (LearningRate * gradError(X, w, y)) # update the weights according to the equation (w(j+1) = w(j) - LearningRate(gradError)) # step towards parameter optimization
error = Error(X, w, y) # new value of error will be equal to the newly calculated one with updated weights
error_diff = error_prev - error # defintion of error_diff
iterations+=1 # updating the iteration number
print('Total Interations required for learning this decision boundary: ', iterations)
return w
for i in range(10):
print('\nLearning the parameters for Class-{} versus the rest\n'.format(i))
W_opt2[:,i] = grad_desc(X_train, W_opt[:,i], Y_train[:,i], Tolerance=1e-6, LearningRate=.001) # I have selected the convergence/tolerance and the learning rate to values that give best efficiency, but the learning is slow with these hyperparameters.
# Taking between 35,000 - 55,000 iterations for learning each class. We can change these values for a trade-off between training time and efficiency
#------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# VARIOUS SCORING METHODS TO TEST FOR THE EFFICIENCY OF THE LEARNED ALGORITHM
def Prob_list(X,w,y): # A function that calculates the probability of a feature vector belonging to a given class
h_prob_list = np.zeros(y.shape) # Simply by computing the sigmoid of the weighted sum over the input vector
for CLASS in range(10):
h_prob_list[:,CLASS]= V(X,w[:,CLASS])
return h_prob_list
def Pred_list(X,w,y): # Converts the probability of the highest coloumn to 1 and the rest to zero.
h_prob_list2 = Prob_list(X,w,y) # This is classification based on the maximum probability corresponding to a class.
pred_list = np.zeros(y.shape)
for Class in range(10):
for i in range(y[:,[1]].shape[0]):
if h_prob_list2[i,Class] == np.amax(h_prob_list2[i,:]):
pred_list[i,Class] = 1
else:
pred_list[i,Class] = 0
return pred_list # This function does the classification based on the probability distributions from the previous function
def true_Pos(pred_list, y, Class): # As the name suggests, gives the total number of true Positives for a class in train/test data
totalTruePos = 0
for i in range(y.shape[0]):
if (pred_list[i,Class] == 1 and y[i] == 1):
totalTruePos += 1
return totalTruePos
def false_Pos(pred_list, y, Class): # As the name suggests, gives the total number of false Positives for a class in train/test data
totalFalsePos = 0
for i in range(y.shape[0]):
if (pred_list[i,Class] == 1 and y[i] == 0):
totalFalsePos += 1
return totalFalsePos
def false_Neg(pred_list, y, Class): # As the name suggests, gives the total number of false Negatives for a class in train/test data
totalFalseNeg = 0
for i in range(y.shape[0]):
if (pred_list[i,Class] == 0 and y[i] == 1):
totalFalseNeg += 1
return totalFalseNeg
def true_Neg(pred_list, y, Class): # As the name suggests, gives the total number of true Negatives for a class in train/test data
totalTrueNeg = 0
for i in range(y.shape[0]):
if (pred_list[i,Class] == 0 and y[i] == 0):
totalTrueNeg += 1
return totalTrueNeg
#---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# A FEW SCORING METHODS WITH THEIR MATHEMATICAL DEFINITIONS
def accuracy(pred_list, y, Class):
acc = (true_Pos(pred_list, y, Class) + true_Neg(pred_list, y, Class))/y.size
return acc
def precision(pred_list, y, Class):
prec = true_Pos(pred_list, y,Class)/(false_Pos(pred_list, y, Class) + true_Pos(pred_list, y, Class))
return prec
def recall(pred_list, y, Class):
recall = true_Pos(pred_list, y, Class)/(true_Pos(pred_list, y,Class)+false_Neg(pred_list, y, Class))
return recall
def f1_score(pred_list, y, Class):
score = 2*recall(pred_list, y, Class)*precision(pred_list, y,Class)/(recall(pred_list, y,Class)+precision(pred_list, y,Class))
return score
#---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# PART OF THE CODE THAT COMPUTES THE SCORES VIA AFOREMENTIONED METHODS FOR BOTH TRAINING AND TEST DATA.
def scoringMethods(X,w,y):
pred_list = Pred_list(X,w,y)
ACCURACY = np.zeros(10)
PRECISION = np.zeros(10)
RECALL = np.zeros(10)
F_SCORE = np.zeros(10)
for Class in range(10):
pos_TRUE = true_Pos(pred_list, y[:,Class],Class)
pos_FALSE = false_Pos(pred_list, y[:,Class], Class)
neg_FALSE = false_Neg(pred_list, y[:,Class], Class)
neg_TRUE = true_Neg(pred_list, y[:,Class], Class)
ACCURACY[Class] = accuracy(pred_list, y[:,Class],Class)*100
PRECISION[Class] = precision(pred_list, y[:,Class], Class)
RECALL[Class] = recall(pred_list, y[:,Class], Class)
F_SCORE[Class] = f1_score(pred_list, y[:,Class], Class)
return ACCURACY, PRECISION, RECALL, F_SCORE
ACCURACY_train = np.zeros(10)
PRECISION_train = np.zeros(10)
RECALL_train = np.zeros(10)
F_SCORE_train = np.zeros(10)
ACCURACY_test = np.zeros(10)
PRECISION_test = np.zeros(10)
RECALL_test = np.zeros(10)
F_SCORE_test =
|
np.zeros(10)
|
numpy.zeros
|
# -*- coding: utf-8 -*-
#%%
import numpy as np
import matplotlib.pyplot as plt
import utils
#%% 载入测试数据
X = np.load('test.npy')
colors = ['r']*16 + ['g']*17 + ['b']*17
plt.figure()
for i in range(50):
plt.scatter(X[i,0],X[i,1],c=colors[i])
plt.title('original X')
plt.show()
#%% 归一化
X = (X - X.mean(axis=0)) / X.std(axis=0)
#%% 计算协方差矩阵(强调零均值化的必要性)
sigma = np.matmul(np.transpose(X), X) / X.shape[0]
#%% 利用SVD分解计算sigma的特征值与特征向量
# http://www.visiondummy.com/2014/04/geometric-interpretation-covariance-matrix/
U, s, V = np.linalg.svd(sigma)
# 利用linalg.svd而不用linalg.eig分解sigma,是因为svd分解后
# s中的特征值是按从大到小的顺序排列的,便于选择主成分
# U和V互为转置的关系
# 任务1:在console键入 np.linalg.eig? 阅读其文档,利用该函数计算特征向量U和特征值s。
# 特征值按从大到小顺序排列,对应特征向量也排序。
# 与 np.linalg.svd 的结果对比。
#%% 旋转数据
Xrot = np.matmul(X, U)
plt.figure()
for i in range(50):
plt.scatter(Xrot[i,0],Xrot[i,1],c=colors[i])
plt.title('rotated X')
plt.show()
#%% 选择主成分个数
print(s)
reserved_var = s[0] / s.sum()
print('保留的方差:%.4f' % reserved_var)
#%% 降维:取Xrot前k列(此处 k=1),相当于其它列置零
# 将第二个维度的数据置零,观察第一个维度数据的分布
Xrot[:,1] = 0
plt.figure()
for i in range(50):
plt.scatter(Xrot[i,0], Xrot[i,1], c=colors[i])
plt.title('rotated X')
plt.show()
#%% 利用Xrot第一列数据还原数据
Xrec = np.outer(Xrot[:,0], V[0, :])
plt.figure()
for i in range(50):
plt.scatter(Xrec[i,0], Xrec[i,1], c=colors[i])
plt.title('rotated X')
plt.show()
#%% 载入数据
# Olivetti Faces dataset: 40 people, 10 photos each, 64 by 64 large
faces = np.load('olivettifaces.npy')
n_sample, n_feature = faces.shape
#%% 划分数据
np.random.seed(1)
idxs = np.random.permutation(n_sample)
trainX = faces[idxs[: 300]]
testX = faces[idxs[300 :]]
# 数据归一化
mu = np.mean(trainX, axis=0)
std = np.std(trainX, axis=0)
trainX = trainX - mu # 零均值化即可
testX = testX - mu
#%% 计算sigma,SVD分解,选择主成分个数
sigma = np.matmul(np.transpose(trainX), trainX) / trainX.shape[0]
U, s, V = np.linalg.svd(sigma)
# 任务2:实现 utils.dimrd_degree
K = utils.dimrd_degree(s, threshold=0.99) # 保留99%的方差
print('降到%d个维度。' % K)
#%% 展示“特征脸”
im_num = 10
plt.figure()
for i in range(im_num):
im = U[:,i]
plt.subplot(im_num/2, 2, i+1)
plt.xticks([])
plt.yticks([])
plt.title('eigen-face %d' % i)
plt.imshow(im.reshape((64,64)), cmap='gray', interpolation='None')
plt.show()
#%% 数据降维(只与前K个特征向量相乘,即投影到特征向量方向,或理解为”特征脸“的线性组合)
# Xdrd 某一行数据是某个图像在前K个“特征脸”上的系数
# 注意:在测试集上降维,观察结果
Xdrd = np.matmul(testX, U[:,:K])
#%% 还原数据,观察还原效果
Xrec =
|
np.matmul(Xdrd, V[:K, :])
|
numpy.matmul
|
from __future__ import print_function
import unittest
from nose.plugins.skip import SkipTest
import numpy
import theano
# Skip test if cuda_ndarray is not available.
from nose.plugins.skip import SkipTest
import theano.sandbox.cuda as cuda_ndarray
if cuda_ndarray.cuda_available == False:
raise SkipTest('Optional package cuda disabled')
from theano.sandbox.cuda.var import float32_shared_constructor
from .unshared_conv import FilterActs
from .unshared_conv import WeightActs
from .unshared_conv import ImgActs
from .gpu_unshared_conv import (
GpuFilterActs,
GpuWeightActs,
GpuImgActs,
)
import test_unshared_conv
if theano.config.mode == 'FAST_COMPILE':
mode_with_gpu = theano.compile.mode.get_mode('FAST_RUN').including('gpu')
else:
mode_with_gpu = theano.compile.mode.get_default_mode().including('gpu')
class TestGpuFilterActs(test_unshared_conv.TestFilterActs):
"""
This class tests GpuWeightActs via the gradient of GpuFilterAct
The correctness of GpuFilterActs is tested in TestMatchFilterActs
"""
ishape = (1, 1, 4, 4, 2) # 2 4x4 greyscale images
fshape = (2, 2, 1, 3, 3, 1, 16) # 5 3x3 filters at each location in a 2x2 grid
module_stride = 1
dtype = 'float32'
mode = theano.compile.get_default_mode().including('gpu_opt',
'fast_run', 'inplace').including('gpu_after_fusion',
'fast_run', 'inplace')
def setUp(self):
test_unshared_conv.TestFilterActs.setUp(self)
self.gpu_op = GpuFilterActs(
module_stride=self.module_stride,
partial_sum=1)
self.s_images = float32_shared_constructor(
self.s_images.get_value())
self.s_filters = float32_shared_constructor(
self.s_filters.get_value())
def test_gpu_shape(self):
import theano.sandbox.cuda as cuda_ndarray
if cuda_ndarray.cuda_available == False:
raise SkipTest('Optional package cuda disabled')
gpuout = self.gpu_op(self.s_images, self.s_filters)
assert 'Cuda' in str(self.s_filters.type)
f = theano.function([], gpuout, mode=mode_with_gpu)
outval = f()
assert outval.shape == (
self.fshape[-2], self.fshape[-1],
self.fshape[0], self.fshape[1],
self.ishape[-1])
def test_insert_gpu_filter_acts(self):
out = self.op(self.s_images, self.s_filters)
f = self.function([], out)
try:
fgraph = f.maker.fgraph
except:
# this needs to work for older versions of theano too
fgraph = f.maker.env
assert isinstance(
fgraph.toposort()[0].op,
GpuFilterActs)
def test_gpu_op_eq(self):
assert GpuFilterActs(1, 1) == GpuFilterActs(1, 1)
assert not (GpuFilterActs(1, 1) != GpuFilterActs(1, 1))
assert (GpuFilterActs(1, 2) != GpuFilterActs(1, 1))
assert (GpuFilterActs(2, 1) != GpuFilterActs(1, 1))
assert GpuFilterActs(2, 1) != None
class TestGpuWeightActs(unittest.TestCase):
"""
"""
ishape = (1, 1, 4, 4, 2) # 2 4x4 greyscale images
hshape = (1, 16, 2, 2, 2)
fshape = (2, 2, 1, 3, 3, 1, 16) # 5 3x3 filters at each location in a 2x2 grid
frows = 3
fcols = 3
module_stride = 1
partial_sum = 1
dtype = 'float32'
def setUp(self):
self.gwa = GpuWeightActs(
module_stride=self.module_stride,
partial_sum=self.partial_sum)
self.gpu_images = float32_shared_constructor(
numpy.random.rand(*self.ishape).astype(self.dtype))
self.gpu_hidact = float32_shared_constructor(
numpy.random.rand(*self.hshape).astype(self.dtype))
def test_shape(self):
dfilters = self.gwa(self.gpu_images, self.gpu_hidact,
self.frows, self.fcols)
f = theano.function([], dfilters)
outval = f()
assert outval.shape == self.fshape
class TestGpuImgActs(unittest.TestCase):
"""
"""
ishape = (1, 1, 4, 4, 2) # 2 4x4 greyscale images
hshape = (1, 16, 2, 2, 2)
fshape = (2, 2, 1, 3, 3, 1, 16) # 5 3x3 filters at each location in a 2x2 grid
irows = 4
icols = 4
module_stride = 1
partial_sum = 1
dtype = 'float32'
def setUp(self):
self.gia = GpuImgActs(
module_stride=self.module_stride,
partial_sum=self.partial_sum)
self.gpu_images = float32_shared_constructor(
numpy.random.rand(*self.ishape).astype(self.dtype))
self.gpu_hidact = float32_shared_constructor(
numpy.random.rand(*self.hshape).astype(self.dtype))
self.gpu_filters = float32_shared_constructor(
numpy.random.rand(*self.fshape).astype(self.dtype))
def test_shape(self):
dimages = self.gia(self.gpu_filters, self.gpu_hidact,
self.irows, self.icols)
f = theano.function([], dimages)
outval = f()
assert outval.shape == self.ishape
if 1:
class TestMatchFilterActs(unittest.TestCase):
def setUp(self):
numpy.random.seed(77)
def run_match(self, images, filters, module_stride, retvals=False, partial_sum=1):
gfa = GpuFilterActs(module_stride, partial_sum)
fa = FilterActs(module_stride)
gpu_images = float32_shared_constructor(images)
gpu_filters = float32_shared_constructor(filters)
cpu_images = theano.shared(images)
cpu_filters = theano.shared(filters)
gpu_out = gfa(gpu_images, gpu_filters)
cpu_out = fa(cpu_images, cpu_filters)
f = theano.function([], [cpu_out, gpu_out])
cpuval, gpuval = f()
gpuval =
|
numpy.asarray(gpuval)
|
numpy.asarray
|
# -*- coding: utf-8 -*-
import os
import sys
import time
import numpy as np
import pickle as pkl
import multiprocessing
from itertools import product
from sklearn.model_selection import KFold
from sklearn.metrics import roc_auc_score
try:
sys.path.append(os.getcwd())
import sparse_module
try:
from sparse_module import c_algo_solam
from sparse_module import c_algo_spam
from sparse_module import c_algo_sht_auc
from sparse_module import c_algo_opauc
from sparse_module import c_algo_sto_iht
from sparse_module import c_algo_hsg_ht
from sparse_module import c_algo_fsauc
except ImportError:
print('cannot find some function(s) in sparse_module')
pass
except ImportError:
print('cannot find the module: sparse_module')
pass
"""
Related genes are found by the following paper:
Agarwal, Shivani, and <NAME>.
"Ranking genes by relevance to a disease."
Proceedings of the 8th annual international
conference on computational systems bioinformatics. 2009.
"""
related_genes = {683: "01 -- Hsa.1036 -- Phospholipase A2",
1235: "01 -- Hsa.290 -- Phospholipase A2",
295: "01 -- Hsa.994 -- Phospholipase A2",
451: "02 -- Hsa.3328 -- Keratin 6 isoform",
608: "03 -- Hsa.24944 -- Protein-tyrosine phosphatase PTP-H1",
1041: "04 -- Hsa.549 -- Transcription factor IIIA",
1043: "05 -- Hsa.13522 -- Viral (v-raf) oncogene homolog 1",
1165: "06 -- Hsa.7348 -- Dual specificity mitogen-activated protein kinase kinase 1",
1279: "07 -- Hsa.1280 -- Transmembrane carcinoembryonic antigen",
917: "07 -- Hsa.3068 -- Transmembrane carcinoembryonic antigen",
1352: "08 -- Hsa.2957 -- Oncoprotein 18",
1386: "09 -- Hsa.1902 -- Phosphoenolpyruvate carboxykinase",
1870: "10 -- Hsa.865 -- Extracellular signal-regulated kinase 1",
1393: "10 -- Hsa.42746 -- Extracellular signal-regulated kinase 1",
554: "11 -- Hsa.1098 -- 26 kDa cell surface protein TAPA-1",
268: "12 -- Hsa.2806 -- Id1",
146: "13 -- Hsa.558 -- Interferon-inducible protein 9-27",
1463: "14 -- Hsa.558 -- Nonspecific crossreacting antigen",
112: "15 -- Hsa.68 -- cAMP response element regulatory protein (CREB2)",
325: "16 -- Hsa.256 -- Splicing factor (CC1.4)",
137: "17 -- Hsa.957 -- Nucleolar protein (B23)",
209: "18 -- Hsa.2846 -- Lactate dehydrogenase-A (LDH-A)",
158: "19 -- Hsa.45604 -- Guanine nucleotide-binding protein G(OLF)",
170: "19 -- Hsa.45604 -- Guanine nucleotide-binding protein G(OLF)",
175: "19 -- Hsa.25451 -- Guanine nucleotide-binding protein G(OLF)",
1143: "20 -- Hsa.393 -- LI-cadherin",
316: "21 -- Hsa.891 -- Lysozyme",
225: "22 -- Hsa.3295 -- Prolyl 4-hydroxylase (P4HB)",
207: "23 -- Hsa.338 -- Eukaryotic initiation factor 4AII",
163: "24 -- Hsa.5821 -- Interferon-inducible protein 1-8D",
701: "25 -- Hsa.109 -- Dipeptidase",
1790: "26 -- Hsa.2794 -- Heat shock 27 kDa protein",
534: "27 -- Hsa.5633 -- Tyrosine-protein kinase receptor TIE-1 precursor",
512: "28 -- Hsa.831 -- Mitochondrial matrix protein P1 precursor",
1: "29 -- Hsa.13491 -- Eukaryotic initiation factor EIF-4A homolog",
2: "29 -- Hsa.13491 -- Eukaryotic initiation factor EIF-4A homolog",
282: "29 -- Hsa.80 -- Eukaryotic initiation factor EIF-4A homolog",
613: "29 -- Hsa.9251 -- Eukaryotic initiation factor EIF-4A homolog"}
def process_data_20_colon():
"""
https://github.com/ramhiser/datamicroarray
http://genomics-pubs.princeton.edu/oncology/affydata/index.html
:return:
"""
data_path = '/enter/your/directory/to/20_colon/'
data = {'feature_ids': None, 'x_tr': [], 'y_tr': [], 'feature_names': []}
import csv
with open(data_path + 'colon_x.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
for row in csv_reader:
if line_count == 0:
data['feature_ids'] = [int(_) for _ in row[1:]]
line_count += 1
elif 1 <= line_count <= 62:
data['x_tr'].append([float(_) for _ in row[1:]])
line_count += 1
data['x_tr'] = np.asarray(data['x_tr'])
for i in range(len(data['x_tr'])):
data['x_tr'][i] = data['x_tr'][i] / np.linalg.norm(data['x_tr'][i])
with open(data_path + 'colon_y.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
for row in csv_reader:
if line_count == 0:
line_count += 1
continue
elif 1 <= line_count <= 62:
line_count += 1
if row[1] == 't':
data['y_tr'].append(1.)
else:
data['y_tr'].append(-1.)
data['y_tr'] = np.asarray(data['y_tr'])
with open(data_path + 'colon_names.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
for row in csv_reader:
if line_count == 0:
line_count += 1
continue
elif 1 <= line_count <= 2000:
line_count += 1
if row[1] == 't':
data['feature_names'].append(row[1])
else:
data['feature_names'].append(row[1])
data['n'] = 62
data['p'] = 2000
data['num_trials'] = 20
data['num_posi'] = len([_ for _ in data['y_tr'] if _ == 1.])
data['num_nega'] = len([_ for _ in data['y_tr'] if _ == -1.])
trial_i = 0
while True:
# since original data is ordered, we need to shuffle it!
rand_perm = np.random.permutation(data['n'])
train_ind, test_ind = rand_perm[:50], rand_perm[50:]
if len([_ for _ in data['y_tr'][train_ind] if _ == 1.]) == 33 or \
len([_ for _ in data['y_tr'][train_ind] if _ == 1.]) == 32:
data['trial_%d' % trial_i] = {'tr_index': rand_perm[train_ind], 'te_index': rand_perm[test_ind]}
print(len([_ for _ in data['y_tr'][train_ind] if _ == 1.]),
len([_ for _ in data['y_tr'][train_ind] if _ == -1.]),
len([_ for _ in data['y_tr'][test_ind] if _ == 1.]),
len([_ for _ in data['y_tr'][test_ind] if _ == -1.])),
success = True
kf = KFold(n_splits=5, shuffle=False)
for fold_index, (train_index, test_index) in enumerate(kf.split(range(len(train_ind)))):
if len([_ for _ in data['y_tr'][train_ind[test_index]] if _ == -1.]) < 3:
success = False
break
data['trial_%d_fold_%d' % (trial_i, fold_index)] = {'tr_index': train_ind[train_index],
'te_index': train_ind[test_index]}
print(len([_ for _ in data['y_tr'][train_ind[train_index]] if _ == 1.]),
len([_ for _ in data['y_tr'][train_ind[train_index]] if _ == -1.]),
len([_ for _ in data['y_tr'][train_ind[test_index]] if _ == 1.]),
len([_ for _ in data['y_tr'][train_ind[test_index]] if _ == -1.])),
print(trial_i)
if success:
trial_i += 1
if trial_i >= data['num_trials']:
break
pkl.dump(data, open(data_path + 'colon_data.pkl', 'wb'))
def cv_sht_auc(para):
data, trial_id, fold_id = para
num_passes, step_len, verbose, record_aucs, stop_eps = 100, 1e2, 0, 1, 1e-6
__ = np.empty(shape=(1,), dtype=float)
all_results = dict()
s_list = [1, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 60, 70, 80, 90, 100, 200, 300, 400, 500]
for para_s in s_list:
tr_index = data['trial_%d_fold_%d' % (trial_id, fold_id)]['tr_index']
te_index = data['trial_%d_fold_%d' % (trial_id, fold_id)]['te_index']
x_tr = np.asarray(data['x_tr'][tr_index], dtype=float)
y_tr = np.asarray(data['y_tr'][tr_index], dtype=float)
results = dict()
best_b, best_auc = None, None
for para_b in range(5, 41, 5):
global_paras = np.asarray([num_passes, step_len, verbose, 0, stop_eps], dtype=float)
wt, _, _, _ = c_algo_sht_auc(x_tr, __, __, __, y_tr, 0, data['p'], global_paras,
0, para_s, para_b, 1., 0.0)
auc_score = roc_auc_score(y_true=data['y_tr'][te_index], y_score=np.dot(data['x_tr'][te_index], wt))
if best_b is None or best_auc is None or best_auc < auc_score:
best_b, best_auc = para_b, auc_score
tr_index = data['trial_%d' % trial_id]['tr_index']
te_index = data['trial_%d' % trial_id]['te_index']
x_tr = np.asarray(data['x_tr'][tr_index], dtype=float)
y_tr = np.asarray(data['y_tr'][tr_index], dtype=float)
global_paras = np.asarray([num_passes, step_len, verbose, record_aucs, stop_eps], dtype=float)
wt, aucs, rts, epochs = c_algo_sht_auc(x_tr, __, __, __, y_tr, 0, data['p'], global_paras,
0, para_s, best_b, 1., 0.0)
results[(trial_id, fold_id)] = {'algo_para': [trial_id, fold_id, para_s, best_b],
'auc_wt': roc_auc_score(y_true=data['y_tr'][te_index],
y_score=np.dot(data['x_tr'][te_index], wt)),
'wt': wt}
print('best_b: %02d nonzero: %.4e test_auc: %.4f' %
(best_b, float(np.count_nonzero(wt)), results[(trial_id, fold_id)]['auc_wt']))
all_results[para_s] = results
return trial_id, fold_id, all_results
def cv_sto_iht(para):
data, trial_id, fold_id = para
num_passes, step_len, verbose, record_aucs, stop_eps = 100, 1e2, 0, 1, 1e-6
global_paras = np.asarray([num_passes, step_len, verbose, record_aucs, stop_eps], dtype=float)
__ = np.empty(shape=(1,), dtype=float)
all_results = dict()
s_list = [1, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 60, 70, 80, 90, 100, 200, 300, 400, 500]
for para_s in s_list:
tr_index = data['trial_%d_fold_%d' % (trial_id, fold_id)]['tr_index']
te_index = data['trial_%d_fold_%d' % (trial_id, fold_id)]['te_index']
x_tr = np.asarray(data['x_tr'][tr_index], dtype=float)
y_tr = np.asarray(data['y_tr'][tr_index], dtype=float)
results = dict()
best_b, best_auc = None, None
for para_b in range(5, 41, 5):
wt, _, _, _ = c_algo_sto_iht(x_tr, __, __, __, y_tr, 0, data['p'], global_paras, para_s, para_b, 1., 0.0)
auc_score = roc_auc_score(y_true=data['y_tr'][te_index], y_score=np.dot(data['x_tr'][te_index], wt))
if best_b is None or best_auc is None or best_auc < auc_score:
best_b, best_auc = para_b, auc_score
tr_index = data['trial_%d' % trial_id]['tr_index']
te_index = data['trial_%d' % trial_id]['te_index']
x_tr = np.asarray(data['x_tr'][tr_index], dtype=float)
y_tr = np.asarray(data['y_tr'][tr_index], dtype=float)
wt, aucs, rts, epochs = c_algo_sto_iht(x_tr, __, __, __, y_tr, 0, data['p'],
global_paras, para_s, best_b, 1., 0.0)
results[(trial_id, fold_id)] = {'algo_para': [trial_id, fold_id, para_s, best_b],
'auc_wt': roc_auc_score(y_true=data['y_tr'][te_index],
y_score=np.dot(data['x_tr'][te_index], wt)), 'wt': wt}
print('best_b: %02d nonzero: %.4e test_auc: %.4f' %
(best_b, float(np.count_nonzero(wt)), results[(trial_id, fold_id)]['auc_wt']))
all_results[para_s] = results
return trial_id, fold_id, all_results
def cv_hsg_ht(para):
data, trial_id, fold_id = para
num_passes, step_len, verbose, record_aucs, stop_eps = 100, 1e2, 0, 1, 1e-6
global_paras = np.asarray([num_passes, step_len, verbose, record_aucs, stop_eps], dtype=float)
__ = np.empty(shape=(1,), dtype=float)
all_results = dict()
s_list = [1, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 60, 70, 80, 90, 100, 200, 300, 400, 500]
for para_s in s_list:
tr_index = data['trial_%d_fold_%d' % (trial_id, fold_id)]['tr_index']
te_index = data['trial_%d_fold_%d' % (trial_id, fold_id)]['te_index']
x_tr = np.asarray(data['x_tr'][tr_index], dtype=float)
y_tr = np.asarray(data['y_tr'][tr_index], dtype=float)
results = dict()
best_c, best_auc = None, None
for para_tau in [1., 10., 100., 1000.]:
para_c, para_zeta = 3.0, 1.033
wt, _, _, _ = c_algo_hsg_ht(x_tr, __, __, __, y_tr, 0, data['p'], global_paras,
para_s, para_tau, para_zeta, para_c, 0.0)
auc_score = roc_auc_score(y_true=data['y_tr'][te_index], y_score=np.dot(data['x_tr'][te_index], wt))
if best_auc is None or best_auc < auc_score:
best_c, best_auc = para_c, auc_score
tr_index = data['trial_%d' % trial_id]['tr_index']
te_index = data['trial_%d' % trial_id]['te_index']
x_tr = np.asarray(data['x_tr'][tr_index], dtype=float)
y_tr = np.asarray(data['y_tr'][tr_index], dtype=float)
wt, aucs, rts, epochs = c_algo_hsg_ht(x_tr, __, __, __, y_tr, 0, data['p'], global_paras,
para_s, para_tau, para_zeta, best_c, 0.0)
results[(trial_id, fold_id)] = {'algo_para': [trial_id, fold_id, best_c, para_s],
'auc_wt': roc_auc_score(y_true=data['y_tr'][te_index],
y_score=np.dot(data['x_tr'][te_index], wt)), 'wt': wt}
print('best_c: %02d nonzero: %.4e test_auc: %.4f' %
(best_c, float(np.count_nonzero(wt)), results[(trial_id, fold_id)]['auc_wt']))
all_results[para_s] = results
return trial_id, fold_id, all_results
def cv_spam_l1(para):
data, trial_id, fold_id = para
num_passes, step_len, verbose, record_aucs, stop_eps = 100, 1e2, 0, 1, 1e-6
global_paras = np.asarray([num_passes, step_len, verbose, record_aucs, stop_eps], dtype=float)
__ =
|
np.empty(shape=(1,), dtype=float)
|
numpy.empty
|
# Taken from SHAP to enable saving figures
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
colors = []
for l in np.linspace(1, 0, 100):
colors.append((30. / 255, 136. / 255, 229. / 255, l))
for l in
|
np.linspace(0, 1, 100)
|
numpy.linspace
|
import copy
import glob
import re
import numpy as np
from skimage import io, transform, color
import pathlib
from save_load.load_csv import load_csv_params
def data_loading(vec_idx_patient, cfg):
if cfg.load_mode == 'folder':
if not cfg.binary_class:
print('\nLoading data from normal patients')
x_healthy, y_healthy, vec_str_healthy_patient = load_specific_label_folder(vec_idx_patient, cfg.str_healthy,
cfg.label_healthy, cfg)
print('Total number of healthy patients: {}\n'.format(x_healthy[0].shape[0]))
print('\nLoading data from dry AMD patients')
x_dry_amd, y_dry_amd, vec_str_dry_amd_patient = load_specific_label_folder(vec_idx_patient, cfg.str_dry_amd,
cfg.label_dry_amd, cfg)
print('Total number of dry AMD patients: {}\n'.format(x_dry_amd[0].shape[0]))
print('\nLoading data from CNV patients')
x_cnv, y_cnv, vec_str_cnv_patient = load_specific_label_folder(vec_idx_patient, cfg.str_cnv, cfg.label_cnv,
cfg)
print('Total number of CNV patients: {}\n'.format(x_cnv[0].shape[0]))
cfg.n_dry_amd = x_dry_amd[0].shape[0]
cfg.n_cnv = x_cnv[0].shape[0]
cfg.n_healthy = x_healthy[0].shape[0]
# unpack once more
x_angiography = np.concatenate((x_healthy[0], x_dry_amd[0], x_cnv[0]), axis=0)
x_structure = np.concatenate((x_healthy[1], x_dry_amd[1], x_cnv[1]), axis=0)
x_bscan = np.concatenate((x_healthy[2], x_dry_amd[2], x_cnv[2]), axis=0)
x_bscan3d = np.concatenate((x_healthy[3], x_dry_amd[3], x_cnv[3]), axis=0)
y = np.concatenate((y_healthy, y_dry_amd, y_cnv), axis=0)
cfg.vec_str_patient = np.concatenate(
(vec_str_healthy_patient, vec_str_dry_amd_patient, vec_str_cnv_patient), axis=0)
else:
# TODO: code below is ugly...
if cfg.binary_mode == 0:
print('\nLoading data from normal patients')
x_healthy, y_healthy, vec_str_healthy_patient = load_specific_label_folder(vec_idx_patient,
cfg.str_healthy,
cfg.label_healthy, cfg)
print('Total number of healthy patients: {}\n'.format(x_healthy[0].shape[0]))
print('\nLoading data from dry AMD patients')
x_dry_amd, y_dry_amd, vec_str_dry_amd_patient = load_specific_label_folder(vec_idx_patient,
cfg.str_dry_amd,
cfg.label_dry_amd, cfg)
print('Total number of dry AMD patients: {}\n'.format(x_dry_amd[0].shape[0]))
x_angiography = np.concatenate((x_healthy[0], x_dry_amd[0]), axis=0)
x_structure = np.concatenate((x_healthy[1], x_dry_amd[1]), axis=0)
x_bscan = np.concatenate((x_healthy[2], x_dry_amd[2]), axis=0)
x_bscan3d = np.concatenate((x_healthy[3], x_dry_amd[3]), axis=0)
y = np.concatenate((y_healthy, y_dry_amd), axis=0)
cfg.vec_str_patient = np.concatenate((vec_str_healthy_patient, vec_str_dry_amd_patient), axis=0)
elif cfg.binary_mode == 1:
print('\nLoading data from normal patients')
x_healthy, y_healthy, vec_str_healthy_patient = load_specific_label_folder(vec_idx_patient,
cfg.str_healthy,
cfg.label_healthy, cfg)
print('Total number of healthy patients: {}\n'.format(x_healthy[0].shape[0]))
print('\nLoading data from CNV patients')
x_cnv, y_cnv, vec_str_cnv_patient = load_specific_label_folder(vec_idx_patient, cfg.str_cnv,
cfg.label_cnv, cfg)
print('Total number of CNV patients: {}\n'.format(x_cnv[0].shape[0]))
x_angiography = np.concatenate((x_healthy[0], x_cnv[0]), axis=0)
x_structure = np.concatenate((x_healthy[1], x_cnv[1]), axis=0)
x_bscan = np.concatenate((x_healthy[2], x_cnv[2]), axis=0)
x_bscan3d = np.concatenate((x_healthy[3], x_cnv[3]), axis=0)
y = np.concatenate((y_healthy, y_cnv), axis=0)
cfg.vec_str_patient = np.concatenate((vec_str_healthy_patient, vec_str_cnv_patient), axis=0)
elif cfg.binary_mode == 2:
print('\nLoading data from dry AMD patients')
x_dry_amd, y_dry_amd, vec_str_dry_amd_patient = load_specific_label_folder(vec_idx_patient,
cfg.str_dry_amd,
cfg.label_dry_amd, cfg)
print('Total number of dry AMD patients: {}\n'.format(x_dry_amd[0].shape[0]))
print('\nLoading data from CNV patients')
x_cnv, y_cnv, vec_str_cnv_patient = load_specific_label_folder(vec_idx_patient, cfg.str_cnv,
cfg.label_cnv, cfg)
print('Total number of CNV patients: {}\n'.format(x_cnv[0].shape[0]))
x_angiography = np.concatenate((x_dry_amd[0], x_cnv[0]), axis=0)
x_structure = np.concatenate((x_dry_amd[1], x_cnv[1]), axis=0)
x_bscan = np.concatenate((x_dry_amd[2], x_cnv[2]), axis=0)
x_bscan3d = np.concatenate((x_dry_amd[3], x_cnv[3]), axis=0)
y = np.concatenate((y_dry_amd, y_cnv), axis=0)
cfg.vec_str_patient = np.concatenate((vec_str_dry_amd_patient, vec_str_cnv_patient), axis=0)
else:
raise Exception('Undefined mode for binary classification')
X = [x_angiography, x_structure, x_bscan, x_bscan3d]
elif cfg.load_mode == 'csv':
if cfg.d_csv is None or cfg.f_csv is None:
raise Exception('Need to provide path of csv file if using csv load mode')
vec_str_patient_id, _, _, _, _, _ = load_csv_params(cfg, bool_mode_full=False)
X, vec_str_patients, vec_out_csv_str = load_all_data_csv(vec_idx_patient, vec_str_patient_id, cfg)
cfg.vec_str_patients = vec_str_patients
cfg.vec_out_csv_str = vec_out_csv_str
y = None
else:
raise Exception('Undefined load mode')
return X, y
def generate_labels(str_feature, cfg, bool_append_csv_to_cfg=False):
cfg_local = copy.deepcopy(cfg)
cfg_local.str_feature = str_feature
if cfg_local.load_mode == 'csv':
if cfg_local.d_csv is None or cfg_local.f_csv is None:
raise Exception('Need to provide path of csv file if using csv load mode')
if cfg_local.str_feature not in cfg_local.vec_all_str_feature:
raise Exception('Invalid feature label provided')
_, vec_OD_feature, vec_OS_feature, pd_csv, out_csv, vec_csv_col = \
load_csv_params(cfg_local, bool_mode_full=True)
# now generate the labels corresponding to the correct feature
y, vec_out_csv_idx = _load_all_data_label_csv(cfg_local.vec_out_csv_str, vec_OD_feature, vec_OS_feature,
vec_csv_col)
# perform sanity check using the input csv structure
for i in range(len(vec_out_csv_idx)):
idx_csv_out = vec_out_csv_idx[i]
y_true_curr = pd_csv.iat[idx_csv_out[0], idx_csv_out[1]]
if str_feature == 'disease':
if y_true_curr == 0:
y_true_curr = np.nan
else:
y_true_curr -= 1
if not np.allclose(y[i], y_true_curr) and not np.all(np.isnan([y_true_curr, y[i]])):
raise ValueError("These should be equal")
if bool_append_csv_to_cfg:
cfg.str_feature = str_feature
cfg.vec_out_csv_idx = vec_out_csv_idx
cfg.pd_csv = pd_csv.copy()
cfg.out_csv = out_csv
cfg.vec_csv_col = vec_csv_col
else:
raise NotImplementedError("Unsupported mode")
return y
def correct_data_label(X, y, cfg, bool_use_ref_label=False, y_label_ref=False):
"""
Perform correction to the full X and y data based on flags set in the configuration
:param X:
:param y:
:param cfg:
:return:
"""
# make local copies of the input variables
[x_angiography, x_structure, x_bscan, x_bscan3d] = X
x_angiography_local = x_angiography.copy()
x_structure_local = x_structure.copy()
x_bscan_local = x_bscan.copy()
x_bscan3d_local = x_bscan3d.copy()
y_local = y.copy()
# also make local copies of the hyperparameters
vec_str_patients_local = copy.deepcopy(cfg.vec_str_patients)
vec_out_csv_str_local = copy.deepcopy(cfg.vec_out_csv_str)
vec_out_csv_idx_local = copy.deepcopy(cfg.vec_out_csv_idx)
# check for NaN in the generated label
if np.any(np.isnan(y)) or bool_use_ref_label:
# obtain the labels that are actually valid
if bool_use_ref_label:
idx_valid_label = np.logical_not((np.isnan(y_label_ref)))
else:
idx_valid_label = np.logical_not((np.isnan(y_local)))
# now correct for the data variables
x_angiography_local = x_angiography_local[idx_valid_label, ...]
x_structure_local = x_structure_local[idx_valid_label, ...]
x_bscan_local = x_bscan_local[idx_valid_label, ...]
x_bscan3d_local = x_bscan3d_local[idx_valid_label, ...]
y_local = y_local[idx_valid_label, ...]
# now correct for hyperparameter variables
vec_str_patients_local_temp = []
vec_out_csv_str_local_temp = []
vec_out_csv_idx_local_temp = []
for i in range(len(vec_str_patients_local)):
if idx_valid_label[i]:
vec_str_patients_local_temp.append(vec_str_patients_local[i])
vec_out_csv_str_local_temp.append(vec_out_csv_str_local[i])
vec_out_csv_idx_local_temp.append(vec_out_csv_idx_local[i])
vec_str_patients_local = vec_str_patients_local_temp
vec_out_csv_str_local = vec_out_csv_str_local_temp
vec_out_csv_idx_local = vec_out_csv_idx_local_temp
# sanity check
if not x_angiography_local.shape[0] == x_structure_local.shape[0] == x_bscan_local.shape[0] == x_bscan3d_local.shape[0]:
raise ValueError("These should be equal")
if not x_angiography_local.shape[0] == y_local.shape[0]:
raise ValueError("These should be equal")
if not len(vec_str_patients_local) == len(vec_out_csv_str_local) == len(vec_out_csv_idx_local):
raise ValueError("These should be equal")
if bool_use_ref_label and not y_local.shape[0] == y_label_ref[idx_valid_label].shape[0]:
raise ValueError("These should be equal ")
# if mode is not binary class then check for label consistency and correct any identified inconsistencies
if not cfg.binary_class:
cfg.y_unique_label = np.unique(y_local)
# if three unique labels and feature type is disease then do nothing
if len(np.unique(y_local)) == 3 and cfg.str_feature == 'disease':
pass
# check if there are only two labels present, which is the case for many features
elif len(np.unique(y_local)) == 2 and cfg.str_feature != 'disease':
cfg.num_classes = 2
cfg.binary_class = True
vec_str_labels_temp = []
for i in range(cfg.num_classes):
vec_str_labels_temp.append(cfg.vec_str_labels[int(np.unique(y_local)[i])])
cfg.vec_str_labels = vec_str_labels_temp
# correct for labels where there are skips
y_local_temp = y_local.copy()
if not np.all(np.unique(y_local) == np.arange(0, cfg.num_classes)):
for i in range(cfg.num_classes):
y_local_temp[y_local_temp == np.unique(y_local)[i]] = np.arange(0, cfg.num_classes)[i]
y_local = y_local_temp
elif len(np.unique(y_local)) == 2 and cfg.str_feature == 'disease':
raise Exception('There should be three disease labels')
elif len(np.unique(y_local)) == 4:
raise Exception('Too many labels')
else:
raise ValueError("Unknown failure mode")
# In the case of binary mode then also have to correct for the data for training
else:
cfg.y_unique_label = np.arange(0, 2, 1)
y_local_temp = y_local.copy()
if cfg.binary_mode == 0:
idx_label_0 = y_local_temp == 0
idx_label_1 = y_local_temp == 1
cfg.vec_str_labels = ['Normal', 'NNV AMD']
elif cfg.binary_mode == 1:
idx_label_0 = y_local_temp == 0
idx_label_1 = y_local_temp == 2
cfg.vec_str_labels = ['Normal', 'NV AMD']
elif cfg.binary_mode == 2:
idx_label_0 = y_local_temp == 1
idx_label_1 = y_local_temp == 2
cfg.vec_str_labels = ['NNV AMD', 'NV AMD']
else:
raise ValueError("Unknown mode")
X_angio_label0 = x_angiography_local[idx_label_0, ...]
X_struct_label0 = x_structure_local[idx_label_0, ...]
X_bscan_label0 = x_bscan_local[idx_label_0, ...]
X_bscan3d_label0 = x_bscan3d_local[idx_label_0, ...]
y_label0 = np.zeros_like(y_local[idx_label_0])
X_angio_label1 = x_angiography_local[idx_label_1, ...]
X_struct_label1 = x_structure_local[idx_label_1, ...]
X_bscan_label1 = x_bscan_local[idx_label_1, ...]
X_bscan3d_label1 = x_bscan3d_local[idx_label_1, ...]
y_label1 = np.ones_like(y_local[idx_label_1])
idx_full = np.arange(0, len(vec_str_patients_local), 1)
idx_binary_label0 = idx_full[idx_label_0]
idx_binary_label1 = idx_full[idx_label_1]
idx_binary =
|
np.concatenate([idx_binary_label0, idx_binary_label1])
|
numpy.concatenate
|
# -*- coding: utf-8 -*-
# Copyright (C) 2017 Machine Learning Group of the University of Oldenburg.
# Licensed under the Academic Free License version 3.0
from __future__ import division
import numpy as np
from mpi4py import MPI
import evo.utils.parallel as parallel
import evo.utils.tracing as tracing
from evo.models import Model
class BSC(Model):
def __init__(self, D, H, S, to_learn=["W", "pi", "sigma"], comm=MPI.COMM_WORLD):
"""Based on https://github.com/ml-uol/prosper/blob/master/prosper/em/camodels/bsc_et.py::
BSC_ET.__init__
For LICENSING and COPYRIGHT for the respective function in prosper see prosper's license
at: https://github.com/ml-uol/prosper/blob/master/LICENSE.txt
"""
Model.__init__(self, D, H, S, to_learn, comm)
log_tiny = np.finfo(np.float64).min
self.eps_lpj = log_tiny
@tracing.traced
def generate_from_hidden(self, model_params, my_hdata):
"""Based on https://github.com/ml-uol/prosper/blob/master/prosper/em/camodels/bsc_et.py::
BSC_ET.generate_from_hidden
For LICENSING and COPYRIGHT for the respective function in prosper see prosper's license
at: https://github.com/ml-uol/prosper/blob/master/LICENSE.txt
"""
W = model_params["W"].T
sigma = model_params["sigma"]
H_gen, D = W.shape
s = my_hdata["s"]
my_N, _ = s.shape
# Create output arrays, y is data
y = np.zeros((my_N, D))
for n in range(my_N):
# Linear superposition
for h in range(H_gen):
if s[n, h]:
y[n] += W[h]
y_mean = y.copy()
# Add noise according to the model parameters
y += np.random.normal(scale=sigma, size=(my_N, D))
# Build return structure
return {"y": y, "s": s, "y_mean": y_mean}
@tracing.traced
def log_pseudo_joint_permanent_states(self, model_params, my_suff_stat, my_data):
this_y = my_data["this_y"]
this_x_infr = my_data["this_x_infr"]
pre1 = model_params["pre1"]
permanent = my_suff_stat["permanent"]
S_perm = my_suff_stat["S_perm"]
lpj = np.empty((S_perm,))
# all-zero state
if permanent["allzero"]:
lpj[0] = pre1 * (this_y[this_x_infr] ** 2).sum()
lpj = self.lpj_reset_check(lpj, my_suff_stat)
return lpj
@tracing.traced
def log_pseudo_joint(self, model_params, my_suff_stat, my_data):
this_y = my_data["this_y"]
this_x_infr = my_data["this_x_infr"]
W = model_params["W"].T
pre1 = model_params["pre1"]
pil_bar = model_params["pil_bar"]
states = my_suff_stat["this_states"]
state_abs = states.sum(axis=1) # is (curr_S,)
pre_lpjpt = pil_bar * state_abs
Wbar = np.dot(states, W[:, this_x_infr])
lpjpt = pre1 * ((Wbar - this_y[this_x_infr]) ** 2).sum(axis=1)
lpj = lpjpt + pre_lpjpt
return self.lpj_reset_check(lpj, my_suff_stat)
@tracing.traced
def E_step_precompute(self, model_params, my_suff_stat, my_data):
comm = self.comm
D = self.D
H = self.H
pi = model_params["pi"]
sigma = model_params["sigma"]
my_x_infr = my_data["x_infr"]
my_N = my_x_infr.shape[0]
N = comm.allreduce(my_N)
incmpl_data = not my_x_infr.all()
model_params["piH"] = pi * H
model_params["pre1"] = -1.0 / 2.0 / sigma / sigma
model_params["pil_bar"] = np.log(pi / (1.0 - pi))
if incmpl_data:
sum_n_d = comm.allreduce(my_x_infr.sum())
model_params["ljc"] = (
H * np.log(1.0 - pi) - np.log(2 * np.pi * sigma * sigma) * sum_n_d / N / 2
)
else:
model_params["ljc"] = H * np.log(1.0 - pi) - D / 2 * np.log(2 * np.pi * sigma * sigma)
my_suff_stat["reset_lpj_isnan"] = 0
my_suff_stat["reset_lpj_smaller_eps_lpj"] = 0
my_suff_stat["reset_lpj_isinf"] = 0
@tracing.traced
def M_step(self, model_params, my_suff_stat, my_data):
"""M-step: Update Thetas using given K^{n} and respective log-pseudo joints.
:param model_params: Current Thetas
:type model_params: dict
:param my_suff_stat: Storage containing current K^{n} and respective log-pseudo joints
:param my_suff_stat: dict
:param my_data: Local dataset including indices of reliable (non-missing) entries and
entries to be reconstructed
:type my_data: np.ndarray
:return: Updated Thetas^{new}
:type return: dict
Inspired by
https://github.com/ml-uol/prosper/blob/master/prosper/em/camodels/bsc_et.py::BSC_ET.M_step
For LICENSING and COPYRIGHT for the respective function in prosper see prosper's license
at: https://github.com/ml-uol/prosper/blob/master/LICENSE.txt
"""
# Array handling
comm = self.comm
my_x_infr = my_data["x_infr"]
my_N, D = my_x_infr.shape
N = comm.allreduce(my_N)
H = self.H
W = model_params["W"].T
pi = model_params["pi"]
sigma = model_params["sigma"]
lpj = my_suff_stat["lpj"] # is (my_N x (S+H+1))
ss = my_suff_stat["ss"] # is (my_N x S x H)
S_perm = my_suff_stat["S_perm"]
permanent = my_suff_stat["permanent"]
incmpl_data = not my_x_infr.all()
# Check if lpj have been manually adjusted
no_reset_lpj_isnan = comm.allreduce(my_suff_stat["reset_lpj_isnan"])
no_reset_lpj_smaller_eps_lpj = comm.allreduce(my_suff_stat["reset_lpj_smaller_eps_lpj"])
no_reset_lpj_isinf = comm.allreduce(my_suff_stat["reset_lpj_isinf"])
if no_reset_lpj_isnan > 0:
parallel.pprint("no reset_lpj_isnan = %i" % no_reset_lpj_isnan)
if no_reset_lpj_smaller_eps_lpj > 0:
parallel.pprint("no reset_lpj_smaller_eps_lpj = %i" % no_reset_lpj_smaller_eps_lpj)
if no_reset_lpj_isinf > 0:
parallel.pprint("no reset_lpj_isinf = %i" % no_reset_lpj_isinf)
# Some data handling
B = np.minimum(self.B_max - lpj.max(axis=1), self.B_max_shft) # is: (my_N,)
pjc = np.exp(lpj + B[:, None]) # is: (my_N, S+H+1)
my_Wp = np.zeros_like(W) # is (H, D)
my_Wq = np.zeros((H, H)) # is (H, H)
my_pies = np.zeros((H)) # is (H, D)
my_sigma = 0.0
# Check missing-data case
if incmpl_data:
assert "y_reconstructed" in my_data.keys()
my_y = my_data["y_reconstructed"]
else:
my_y = my_data["y"]
# Iterate over all datapoints
tracing.tracepoint("M_step:iterating")
for n in range(my_N):
this_y = my_y[n, :] # is (D,)
this_x_infr = my_x_infr[n, :]
this_pjc = pjc[n, :] # is (S,)
this_ss = ss[n, :, :] # is (S, H)
this_Wp = np.zeros_like(my_Wp) # numerator for current datapoint (H, D)
this_Wq = np.zeros_like(my_Wq) # denominator for current datapoint (H, H)
this_pies = np.zeros((H))
this_sigma = 0.0
# Zero active hidden causes
if permanent["allzero"]:
this_sigma += this_pjc[0] * (this_y[this_x_infr] ** 2).sum()
# Handle hidden states with more than 1 active cause
this_pies += (this_pjc[S_perm:].T * this_ss.T).sum(axis=1)
this_Wp += np.outer((this_pjc[S_perm:].T * this_ss.T).sum(axis=1), this_y)
this_Wq += np.dot(this_pjc[S_perm:].T * this_ss.T, this_ss)
# this_pi += np.inner(this_pjc[S_perm:], this_ss.sum(axis=1))
this_sigma += (
this_pjc[S_perm:]
* ((this_y[this_x_infr] - np.dot(this_ss, W[:, this_x_infr])) ** 2).sum(axis=1)
).sum()
this_pjc_sum = this_pjc.sum()
my_pies += this_pies / this_pjc_sum
my_Wp += this_Wp / this_pjc_sum
my_Wq += this_Wq / this_pjc_sum
my_sigma += this_sigma / this_pjc_sum
# Calculate updated W
if "W" in self.to_learn:
tracing.tracepoint("M_step:update W")
Wp = np.empty_like(my_Wp)
Wq = np.empty_like(my_Wq)
comm.Allreduce([my_Wp, MPI.DOUBLE], [Wp, MPI.DOUBLE])
comm.Allreduce([my_Wq, MPI.DOUBLE], [Wq, MPI.DOUBLE])
if float(np.__version__[2:]) >= 14.0:
rcond = None
else:
rcond = -1
try:
W_new = np.linalg.lstsq(Wq, Wp, rcond=rcond)[0]
except np.linalg.linalg.LinAlgError:
eps_W = 5e-5
try:
noise = np.random.normal(0, eps_W, H)
noise = np.outer(noise, noise)
Wq_inv = np.linalg.pinv(Wq + noise)
W_new = np.dot(Wq_inv, Wp)
parallel.pprint("Use pinv and additional noise for W update.")
except np.linalg.linalg.LinAlgError:
# Sum of the expected values of the second moments was not invertable.
# Skip the update of parameter W but add some noise to it.
W_new = W + (eps_W *
|
np.random.normal(0, 1, [H, D])
|
numpy.random.normal
|
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from PIL import Image
import os
import numpy as np
import matplotlib.pyplot as plt
import json
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('modelpath', '', 'Path to the model')
flags.DEFINE_string('target', '', 'Target to evaluate')
flags.DEFINE_string('resultsave', '', 'Path to save results')
modelPath = FLAGS.modelpath
if not os.path.exists(modelPath):
raise Exception('Model does not exist')
def load(filename):
npImage = Image.open(filename)
npImage = np.array(npImage).astype('float32')/255
# print(npImage.shape)
npImage = npImage[...,:3]
# print(npImage.shape)
npImage =
|
np.expand_dims(npImage, axis=0)
|
numpy.expand_dims
|
# Built-in
import sys
import os
import warnings
if sys.version[0] == '3':
import inspect
else:
# Python 2 back-porting
import funcsigs as inspect
# Common
import numpy as np
# tofu
try:
import tofu.geom._core as _core
except Exception:
from . import _core
__all__ = ['coords_transform',
'get_nIne1e2', 'get_X12fromflat',
'compute_RaysCones',
'create_config',
'create_CamLOS1D', 'create_CamLOS2D']
_sep = '_'
_dict_lexcept_key = []
_lok = np.arange(0,9)
_lok = np.array([_lok, _lok+10])
_here = os.path.abspath(os.path.dirname(__file__))
_root = _here[:_here.rfind('/tofu')]
_path_testcases = os.path.join(_root,'tofu/geom/inputs')
###########################################################
# COCOS
###########################################################
class CoordinateInputError(Exception):
_cocosref = "<NAME>, <NAME>, "
_cocosref += "Computer Physics Communications 184 (2103) 293-302"
msg = "The provided coords flag should be a str\n"
msg += "It should match a known flag:\n"
msg += " - 'cart' / 'xyz' : cartesian coordinates\n"
msg += " - cocos flag indicating the cocos number (1-8, 11-18)\n"
msg += " Valid cocos flags include:\n"
msg += " '11', '02', '5', '14', ..."
msg += "\n"
msg += "The cocos (COordinates COnvetionS) are descibed in:\n"
msg += " [1] %s"%_cocosref
def __init__(self, msg, errors):
# Call the base class constructor with the parameters it
# needs
super(CoordinateInputError, self).__init__(msg + '\n\n' + self.msg)
# Now for your custom code...
self.errors = errors
def _coords_checkformatcoords(coords='11'):
if not type(coords) is str:
msg = "Arg coords must be a str !"
raise CoordinateInputError(msg)
coords = coords.lower()
iint = np.array([ss.isdigit() for ss in coords]).nonzero()[0]
if coords in ['cart','xyz']:
coords = 'xyz'
elif iint.size in [1,2]:
coords = int(''.join([coords[jj] for jj in iint]))
if not coords in _lok.ravel():
msg = 'Not allowed number ({0) !'.format(coords)
raise CoordinateInputError(msg)
else:
msg = "Not allowed coords ({0}) !".format(coords)
raise CoordinateInputError(msg)
return coords
def _coords_cocos2cart(pts, coords=11):
R = pts[0,:]
if (coords%0)%2==1:
indphi, indZi, sig = 1, 2, 1.
else:
indphi, indZ , sig= 2, 1, -1.
phi = sig*pts[indphi,:]
X = R*np.cos(phi)
Y = R*np.sin(phi)
Z = pts[indZ,:]
return np.array([X,Y,Z])
def _coords_cart2cocos(pts, coords=11):
R = np.hypot(pts[0,:],pts[1,:])
phi = np.arctan2(pts[1,:],pts[0,:])
Z = pts[2,:]
if (coords%0)%2==1:
indphi, indZ, sig = 1, 2, 1.
else:
indphi, indZ , sig= 2, 1, -1.
pts_out = np.empty((3,pts.shape[1]),dtype=float)
pts_out[0,:] = R
pts_out[indphi,:] = sig*phi
pts_out[indZ,:] = Z
return pts_out
def coords_transform(pts, coords_in='11', coords_out='11'):
coords_in = _coords_checkformatcoords(coords=coords_in)
coords_out = _coords_checkformatcoords(coords=coords_out)
if coords_in==coords_out:
pass
elif coords_in=='xyz':
pts = _coords_cart2cocos(pts, coords_out)
elif coords_out=='xyz':
pts = _coords_cocos2cart(pts, coords_out)
else:
pts = _coords_cocos2cart(pts, coords_in)
pts = _coords_cocos2cart(pts, coords_out)
return pts
###########################################################
###########################################################
# Useful functions
###########################################################
def get_nIne1e2(P, nIn=None, e1=None, e2=None):
assert np.hypot(P[0],P[1])>1.e-12
phi = np.arctan2(P[1],P[0])
ephi = np.array([-np.sin(phi), np.cos(phi), 0.])
ez = np.array([0.,0.,1.])
if nIn is None:
nIn = -P
nIn = nIn / np.linalg.norm(nIn)
if e1 is None:
if np.abs(np.abs(nIn[2])-1.) < 1.e-12:
e1 = ephi
else:
e1 = np.cross(nIn,ez)
e1 = e1 if np.sum(e1*ephi) > 0. else -e1
e1 = e1 / np.linalg.norm(e1)
if not np.abs(np.sum(nIn*e1))<1.e-12:
msg = "Identified local base does not seem valid!\n"
msg += "nIn = %s\n"%str(nIn)
msg += "e1 = %s\n"%str(e1)
msg += "np.sum(nIn*e1) = sum(%s) = %s"%(nIn*e1, np.sum(nIn*e1))
raise Exception(msg)
if e2 is None:
e2 = np.cross(nIn,e1)
e2 = e2 / np.linalg.norm(e2)
return nIn, e1, e2
def get_X12fromflat(X12, x12u=None, nx12=None):
if x12u is None:
x1u, x2u = np.unique(X12[0,:]), np.unique(X12[1,:])
if x1u.size*x2u.size != X12.shape[1]:
tol = np.linalg.norm(np.diff(X12[:,:2],axis=1))/100.
tolmag = int(np.log10(tol))-1
x1u = np.unique(np.round(X12[0,:], -tolmag))
x2u = np.unique(np.round(X12[1,:], -tolmag))
indx1 = np.digitize(X12[0,:], 0.5*(x1u[1:]+x1u[:-1]))
indx2 = np.digitize(X12[1,:], 0.5*(x2u[1:]+x2u[:-1]))
indx1u, indx2u = np.unique(indx1), np.unique(indx2)
x1u = np.unique([np.mean(X12[0,indx1==ii]) for ii in indx1u])
x2u = np.unique([np.mean(X12[1,indx2==ii]) for ii in indx2u])
else:
x1u, x2u = x12u
if nx12 is None:
nx1, nx2 = x1u.size, x2u.size
else:
nx1, nx2 = nx12
Dx12 = (x1u[1]-x1u[0], x2u[1]-x2u[0])
ind = np.zeros((nx1,nx2),dtype=int)
indr = np.array([np.digitize(X12[0,:], 0.5*(x1u[1:]+x1u[:-1])),
np.digitize(X12[1,:], 0.5*(x2u[1:]+x2u[:-1]))])
ind[indr[0,:],indr[1,:]] = np.arange(0,X12.shape[1])
return x1u, x2u, ind, Dx12
###########################################################
###########################################################
# Fast computation of cones with rays
###########################################################
def compute_RaysCones(Ds, us, angs=np.pi/90., nP=40):
# Check inputs
Ddim, udim = Ds.ndim, us.ndim
assert Ddim in [1,2]
assert Ds.shape[0]==3 and Ds.size%3==0
assert udim in [1,2]
assert us.shape[0]==3 and us.size%3==0
assert type(angs) in [int,float,np.int64,np.float64]
if udim==2:
assert Ds.shape==us.shape
if Ddim==1:
Ds = Ds.reshape((3,1))
nD = Ds.shape[1]
# Compute
phi = np.linspace(0.,2.*np.pi, nP)
phi = np.tile(phi,nD)[np.newaxis,:]
if udim==1:
us = us[:,np.newaxis]/np.linalg.norm(us)
us = us.repeat(nD,axis=1)
else:
us = us/np.sqrt(np.sum(us**2,axis=0))[np.newaxis,:]
us = us.repeat(nP, axis=1)
e1 = np.array([us[1,:],-us[0,:],np.zeros((us.shape[1],))])
e2 = np.array([-us[2,:]*e1[1,:], us[2,:]*e1[0,:],
us[0,:]*e1[1,:]-us[1,:]*e1[0,:]])
ub = (us*np.cos(angs)
+ (np.cos(phi)*e1+np.sin(phi)*e2)*np.sin(angs))
Db = Ds.repeat(nP,axis=1)
return Db, ub
###########################################################
###########################################################
# Fast computation of poly
###########################################################
def _compute_VesPoly(R=2.4, r=1., elong=0., Dshape=0.,
divlow=True, divup=True, nP=200):
""" Utility to compute three 2D (R,Z) polygons
One represents a vacuum vessel, one an outer bumper, one a baffle
The vessel polygon is centered on (R,0.), with minor radius r
It can have a vertical (>0) or horizontal(<0) elongation in [-1;1]
It can be D-shaped (Dshape in [0.,1.], typically 0.2)
It can be non-convex, with:
* a lower divertor-like shape
* a upper divertor-like shape
The elongation also affects the outer bumper and baffle
Parameters
----------
R: int / float
Major radius used as a center of the vessel
r : int / float
Minor radius of the vessel
elong: int / float
Dimensionless elongation parameter in [-1;1]
Dshape: int / float
Dimensionless parameter for the D-shape (in-out asymmetry) in [0;1]
divlow: bool
Flag indicating whether to incude a lower divertor-like shape
divup: bool
Flag indicating whether to incude an upper divertor-like shape
nP : int
Parameter specifying approximately the number of points of the vessel
Return
------
poly: np.ndarray
Closed (2,nP) polygon of the vacuum vessel, optionnally with divertors
pbump: np.ndarray
Closed (2,N) polygon defining the outer bumper
pbaffle: np.ndarray
Closed (2,N) polygon defining the lower baffle
"""
# Basics (center, theta, unit vectors)
cent = np.r_[R,0.]
theta = np.linspace(-np.pi,np.pi,nP)
poly = np.array([np.cos(theta), np.sin(theta)])
# Divertors
pdivR = np.r_[-0.1,0.,0.1]
pdivZ = np.r_[-0.1,0.,-0.1]
if divlow:
ind = (np.sin(theta)<-0.85).nonzero()[0]
pinsert = np.array([pdivR, -1.+pdivZ])
poly = np.concatenate((poly[:,:ind[0]], pinsert, poly[:,ind[-1]+1:]),
axis=1)
if divup:
theta = np.arctan2(poly[1,:], poly[0,:])
ind = (np.sin(theta)>0.85).nonzero()[0]
pinsert = np.array([pdivR[::-1], 1.-pdivZ])
poly = np.concatenate((poly[:,:ind[0]], pinsert, poly[:,ind[-1]+1:]),
axis=1)
# Modified radius (by elongation and Dshape)
rbis = r*np.hypot(poly[0,:],poly[1,:])
theta = np.arctan2(poly[1,:],poly[0,:])
rbis = rbis*(1+elong*0.15*np.sin(2.*theta-np.pi/2.))
if Dshape>0.:
ind = np.cos(theta)<0.
coef = 1 + Dshape*(np.sin(theta[ind])**2-1.)
rbis[ind] = rbis[ind]*coef
er = np.array([np.cos(theta), np.sin(theta)])
poly = cent[:,np.newaxis] + rbis[np.newaxis,:]*er
# Outer bumper
Dbeta = 2.*np.pi/6.
beta = np.linspace(-Dbeta/2.,Dbeta/2., 20)
pbRin = 0.85*np.array([np.cos(beta), np.sin(beta)])
pbRout = 0.95*np.array([np.cos(beta), np.sin(beta)])[:,::-1]
pinsert = np.array([[0.95,1.05,1.05,0.95],
[0.05,0.05,-0.05,-0.05]])
ind = (np.abs(pbRout[1,:])<0.05).nonzero()[0]
pbump = (pbRin, pbRout[:,:ind[0]], pinsert,
pbRout[:,ind[-1]+1:], pbRin[:,0:1])
pbump = np.concatenate(pbump, axis=1)
theta = np.arctan2(pbump[1,:],pbump[0,:])
er = np.array([np.cos(theta), np.sin(theta)])
rbis = r*(np.hypot(pbump[0,:],pbump[1,:])
*(1.+elong*0.15*np.sin(2.*theta-np.pi/2.)))
pbump = cent[:,np.newaxis] + rbis[np.newaxis,:]*er
# Baffle
offR, offZ = 0.1, -0.85
wR, wZ = 0.2, 0.05
pbaffle = np.array([offR + wR*np.r_[-1,1,1,-1,-1],
offZ + wZ*np.r_[1,1,-1,-1,1]])
theta = np.arctan2(pbaffle[1,:],pbaffle[0,:])
er = np.array([np.cos(theta), np.sin(theta)])
rbis = r*(np.hypot(pbaffle[0,:],pbaffle[1,:])
*(1.+elong*0.15*np.sin(2.*theta-np.pi/2.)))
pbaffle = cent[:,np.newaxis] + rbis[np.newaxis,:]*er
return poly, pbump, pbaffle
###########################################################
###########################################################
# Fast computation of camera parameters
###########################################################
def _compute_PinholeCam_checkformatinputs(P=None, F=0.1, D12=None, N12=100,
angs=0, nIn=None, VType='Tor', defRY=None, Lim=None):
assert type(VType) is str
VType = VType.lower()
assert VType in ['tor','lin']
if np.sum([angs is None, nIn is None])!=1:
msg = "Either angs xor nIn should be provided !"
raise Exception(msg)
# Pinhole
if P is None:
if defRY is None:
msg = "If P is not provided, a value msut be set for defRY!"
raise Exception(msg)
if VType=='tor':
P = np.array([defRY,0.,0.])
else:
if Lim is None:
msg = "If P is not provided, Lim must be set!"
raise Exception(msg)
Lim = np.array(Lim).ravel()
assert Lim.size==2 and Lim[0]<Lim[1]
P = np.array([np.sum(Lim)/2., defRY, 0.])
else:
P = np.asarray(P, dtype=float).ravel()
assert P.size==3
# Camera inner parameters
assert type(F) in [int, float, np.int64, np.float64]
F = float(F)
if D12 is None:
D12 = F
if type(D12) in [int, float, np.int64, np.float64]:
D12 = np.array([D12,D12],dtype=float)
else:
assert hasattr(D12,'__iter__') and len(D12)==2
D12 = np.asarray(D12).astype(float)
if type(N12) in [int, float, np.int64, np.float64]:
N12 = np.array([N12,N12],dtype=int)
else:
assert hasattr(N12,'__iter__') and len(N12)==2
N12 = np.asarray(N12).astype(int)
# Angles
if angs is None:
assert hasattr(nIn,'__iter__')
nIn = np.asarray(nIn, dtype=float).ravel()
assert nIn.size==3
else:
if type(angs) in [int, float, np.int64, np.float64]:
angs = np.array([angs,angs,angs],dtype=float)
angs = np.asarray(angs).astype(float).ravel()
assert angs.size==3
angs = np.arctan2(np.sin(angs),np.cos(angs))
if VType=='tor':
R = np.hypot(P[0],P[1])
phi = np.arctan2(P[1],P[0])
eR = np.array([
|
np.cos(phi)
|
numpy.cos
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module provides classes to store, generate, and manipulate material interfaces.
"""
from itertools import product
from typing import Iterator, Optional, Tuple, Union
import numpy as np
from scipy.linalg import polar
from pymatgen.analysis.elasticity.strain import Deformation
from pymatgen.analysis.interfaces.zsl import ZSLGenerator, fast_norm
from pymatgen.core import Structure
from pymatgen.core.interface import Interface, label_termination
from pymatgen.core.surface import SlabGenerator
Vector3D = Tuple[float, float, float]
Matrix3D = Tuple[Vector3D, Vector3D, Vector3D]
Matrix2D = Tuple[Vector3D, Vector3D]
class CoherentInterfaceBuilder:
"""
This class constructs the coherent interfaces between two crystalline slabs
Coherency is defined by matching lattices not sub-planes.
"""
def __init__(
self,
substrate_structure: Structure,
film_structure: Structure,
film_miller: Tuple[int, int, int],
substrate_miller: Tuple[int, int, int],
zslgen: Optional[ZSLGenerator] = None,
):
"""
Args:
substrate_structure: structure of substrate
film_structure: structure of film
film_miller: miller index of the film layer
substrate_miller: miller index for the substrate layer
zslgen: BiDirectionalZSL if you want custom lattice matching tolerances for coherency
"""
# Bulk structures
self.substrate_structure = substrate_structure
self.film_structure = film_structure
self.film_miller = film_miller
self.substrate_miller = substrate_miller
self.zslgen = zslgen or ZSLGenerator(bidirectional=True)
self._find_matches()
self._find_terminations()
def _find_matches(self) -> None:
"""
Finds and stores the ZSL matches
"""
self.zsl_matches = []
film_sg = SlabGenerator(
self.film_structure,
self.film_miller,
min_slab_size=1,
min_vacuum_size=3,
in_unit_planes=True,
center_slab=True,
primitive=True,
reorient_lattice=False, # This is necessary to not screw up the lattice
)
sub_sg = SlabGenerator(
self.substrate_structure,
self.substrate_miller,
min_slab_size=1,
min_vacuum_size=3,
in_unit_planes=True,
center_slab=True,
primitive=True,
reorient_lattice=False, # This is necessary to not screw up the lattice
)
film_slab = film_sg.get_slab(shift=0)
sub_slab = sub_sg.get_slab(shift=0)
film_vectors = film_slab.lattice.matrix
substrate_vectors = sub_slab.lattice.matrix
# Generate all possible interface matches
self.zsl_matches = list(self.zslgen(film_vectors[:2], substrate_vectors[:2], lowest=False))
for match in self.zsl_matches:
xform = get_2d_transform(film_vectors, match.film_vectors)
strain, rot = polar(xform)
assert np.allclose(
strain, np.round(strain)
), "Film lattice vectors changed during ZSL match, check your ZSL Generator parameters"
xform = get_2d_transform(substrate_vectors, match.substrate_vectors)
strain, rot = polar(xform)
assert np.allclose(
strain, strain.astype(int)
), "Substrate lattice vectors changed during ZSL match, check your ZSL Generator parameters"
def _find_terminations(self):
"""
Finds all terminations
"""
film_sg = SlabGenerator(
self.film_structure,
self.film_miller,
min_slab_size=1,
min_vacuum_size=3,
in_unit_planes=True,
center_slab=True,
primitive=True,
reorient_lattice=False, # This is necessary to not screw up the lattice
)
sub_sg = SlabGenerator(
self.substrate_structure,
self.substrate_miller,
min_slab_size=1,
min_vacuum_size=3,
in_unit_planes=True,
center_slab=True,
primitive=True,
reorient_lattice=False, # This is necessary to not screw up the lattice
)
film_slabs = film_sg.get_slabs()
sub_slabs = sub_sg.get_slabs()
film_shits = [s.shift for s in film_slabs]
film_terminations = [label_termination(s) for s in film_slabs]
sub_shifts = [s.shift for s in sub_slabs]
sub_terminations = [label_termination(s) for s in sub_slabs]
self._terminations = {
(film_label, sub_label): (film_shift, sub_shift)
for (film_label, film_shift), (sub_label, sub_shift) in product(
zip(film_terminations, film_shits), zip(sub_terminations, sub_shifts)
)
}
self.terminations = list(self._terminations.keys())
def get_interfaces(
self,
termination: Tuple[str, str],
gap: float = 2.0,
vacuum_over_film: float = 20.0,
film_thickness: Union[float, int] = 1,
substrate_thickness: Union[float, int] = 1,
in_layers: bool = True,
) -> Iterator[Interface]:
"""
Generates interface structures given the film and substrate structure
as well as the desired terminations
Args:
terminations: termination from self.termination list
gap: gap between film and substrate
vacuum_over_film: vacuum over the top of the film
film_thickness: the film thickness
substrate_thickness: substrate thickness
in_layers: set the thickness in layer units
"""
film_sg = SlabGenerator(
self.film_structure,
self.film_miller,
min_slab_size=film_thickness,
min_vacuum_size=3,
in_unit_planes=in_layers,
center_slab=True,
primitive=True,
reorient_lattice=False, # This is necessary to not screw up the lattice
)
sub_sg = SlabGenerator(
self.substrate_structure,
self.substrate_miller,
min_slab_size=substrate_thickness,
min_vacuum_size=3,
in_unit_planes=in_layers,
center_slab=True,
primitive=True,
reorient_lattice=False, # This is necessary to not screw up the lattice
)
film_shift, sub_shift = self._terminations[termination]
film_slab = film_sg.get_slab(shift=film_shift)
sub_slab = sub_sg.get_slab(shift=sub_shift)
for match in self.zsl_matches:
# Build film superlattice
super_film_transform = np.round(
from_2d_to_3d(get_2d_transform(film_slab.lattice.matrix[:2], match.film_sl_vectors))
).astype(int)
film_sl_slab = film_slab.copy()
film_sl_slab.make_supercell(super_film_transform)
assert np.allclose(
film_sl_slab.lattice.matrix[2], film_slab.lattice.matrix[2]
), "2D transformation affected C-axis for Film transformation"
assert np.allclose(
film_sl_slab.lattice.matrix[:2], match.film_sl_vectors
), "Transformation didn't make proper supercell for film"
# Build substrate superlattice
super_sub_transform = np.round(
from_2d_to_3d(get_2d_transform(sub_slab.lattice.matrix[:2], match.substrate_sl_vectors))
).astype(int)
sub_sl_slab = sub_slab.copy()
sub_sl_slab.make_supercell(super_sub_transform)
assert np.allclose(
sub_sl_slab.lattice.matrix[2], sub_slab.lattice.matrix[2]
), "2D transformation affected C-axis for Film transformation"
assert np.allclose(
sub_sl_slab.lattice.matrix[:2], match.substrate_sl_vectors
), "Transformation didn't make proper supercell for substrate"
# Add extra info
match_dict = match.as_dict()
interface_properties = {k: match_dict[k] for k in match_dict.keys() if not k.startswith("@")}
dfm = Deformation(match.match_transformation)
strain = dfm.green_lagrange_strain
interface_properties["strain"] = strain
interface_properties["von_mises_strain"] = strain.von_mises_strain
interface_properties["termination"] = termination
interface_properties["film_thickness"] = film_thickness
interface_properties["substrate_thickness"] = substrate_thickness
yield (
Interface.from_slabs(
substrate_slab=sub_sl_slab,
film_slab=film_sl_slab,
gap=gap,
vacuum_over_film=vacuum_over_film,
interface_properties=interface_properties,
)
)
def get_rot_3d_for_2d(film_matrix, sub_matrix) -> np.ndarray:
"""
Finds a trasnformation matrix that will rotate and strain the film to the subtrate while preserving the c-axis
"""
film_matrix = np.array(film_matrix)
film_matrix = film_matrix.tolist()[:2]
film_matrix.append(np.cross(film_matrix[0], film_matrix[1]))
# Generate 3D lattice vectors for substrate super lattice
# Out of plane substrate super lattice has to be same length as
# Film out of plane vector to ensure no extra deformation in that
# direction
sub_matrix = np.array(sub_matrix)
sub_matrix = sub_matrix.tolist()[:2]
temp_sub =
|
np.cross(sub_matrix[0], sub_matrix[1])
|
numpy.cross
|
import torch.nn as nn
import torch
import os
from .model import Model
from IPython import embed
from collections import defaultdict
import numpy as np
import pickle
import copy
class IterE(Model):
"""`Iteratively Learning Embeddings and Rules for Knowledge Graph Reasoning. (WWW'19)`_ (IterE).
Attributes:
args: Model configuration parameters.
epsilon: Caculate embedding_range.
margin: Caculate embedding_range and loss.
embedding_range: Uniform distribution range.
ent_emb: Entity embedding, shape:[num_ent, emb_dim].
rel_emb: Relation_embedding, shape:[num_rel, emb_dim].
.. _Iteratively Learning Embeddings and Rules for Knowledge Graph Reasoning. (WWW'19): https://dl.acm.org/doi/10.1145/3308558.3313612
"""
def __init__(self, args, train_sampler, test_sampler):
super(IterE, self).__init__(args)
self.args = args
self.ent_emb = None
self.rel_emb = None
self.init_emb()
#print(self.args)
#print(train_sampler)
#print('run get_axiom()')
self.train_sampler = train_sampler
self.train_triples_base = copy.deepcopy(train_sampler.train_triples)
self.select_probability = self.args.select_probability
self.max_entialments = self.args.max_entialments
self.axiom_types = self.args.axiom_types
self.axiom_weight = self.args.axiom_weight
self.inject_triple_percent = self.args.inject_triple_percent
self.sparsity = 0.995
self.num_entity = self.args.num_ent
self.relation2id=train_sampler.rel2id
self.train_ids=train_sampler.train_triples
self.valid_ids=train_sampler.valid_triples
self.test_ids=train_sampler.test_triples
#print(len(self.train_ids))
#print(len(self.valid_ids))
#print(len(self.test_ids))
self.train_ids_labels_inject = np.reshape([], [-1, 4])
# generate r_ht, hr_t
print('# generate r_ht, hr_t')
self.r_ht, self.hr_t, self.tr_h, self.hr_t_all, self.tr_h_all = self._generate(self.train_ids, self.valid_ids, self.test_ids)
# generate entity2frequency and entity2sparsity dict
print('# generate entity2frequency and entity2sparsity dict')
self.entity2frequency, self.entity2sparsity = self._entity2frequency()
print('# get_axiom')
self.get_axiom()
#self.rule, self.conf = self.get_rule(self.relation2id)
def _entity2frequency(self):
ent2freq = {ent:0 for ent in range(self.num_entity)}
ent2sparsity = {ent:-1 for ent in range(self.num_entity)}
for h,r,t in self.train_ids:
ent2freq[h] += 1
ent2freq[t] += 1
ent_freq_list = np.asarray([ent2freq[ent] for ent in range(self.num_entity)])
ent_freq_list_sort = np.argsort(ent_freq_list)
max_freq = max(list(ent2freq))
min_freq = min(list(ent2freq))
for ent, freq in ent2freq.items():
sparsity = 1 - (freq-min_freq)/(max_freq - min_freq)
ent2sparsity[ent] = sparsity
return ent2freq, ent2sparsity
def _generate(self, train, valid, test):
r_ht = defaultdict(set)
hr_t = defaultdict(set)
tr_h = defaultdict(set)
hr_t_all = defaultdict(list)
tr_h_all = defaultdict(list)
for (h,r,t) in train:
r_ht[r].add((h,t))
hr_t[(h,r)].add(t)
tr_h[(t,r)].add(h)
hr_t_all[(h,r)].append(t)
tr_h_all[(t,r)].append(h)
for (h,r,t) in test+valid:
hr_t_all[(h,r)].append(t)
tr_h_all[(t, r)].append(h)
return r_ht, hr_t, tr_h, hr_t_all, tr_h_all
def get_axiom(self, ):
self.axiom_dir = os.path.join(self.args.data_path, 'axiom_pool')
self.reflexive_dir, self.symmetric_dir, self.transitive_dir, self.inverse_dir, self.subproperty_dir, self.equivalent_dir, self.inferencechain1, self.inferencechain2, self.inferencechain3, self.inferencechain4 = map(lambda x: os.path.join(self.axiom_dir, x),
['axiom_reflexive.txt',
'axiom_symmetric.txt',
'axiom_transitive.txt',
'axiom_inverse.txt',
'axiom_subProperty.txt',
'axiom_equivalent.txt',
'axiom_inferenceChain1.txt',
'axiom_inferenceChain2.txt',
'axiom_inferenceChain3.txt',
'axiom_inferenceChain4.txt'])
# read and materialize axioms
print('# self._read_axioms()')
self._read_axioms()
print('# self._read_axioms()')
self._materialize_axioms()
print('# self._read_axioms()')
self._init_valid_axioms()
def _read_axioms(self):
# for each axiom, the first id is the basic relation
self.axiompool_reflexive = self._read_axiompool_file(self.reflexive_dir)
self.axiompool_symmetric = self._read_axiompool_file(self.symmetric_dir)
self.axiompool_transitive = self._read_axiompool_file(self.transitive_dir)
self.axiompool_inverse = self._read_axiompool_file(self.inverse_dir)
self.axiompool_equivalent = self._read_axiompool_file(self.equivalent_dir)
self.axiompool_subproperty = self._read_axiompool_file(self.subproperty_dir)
self.axiompool_inferencechain1 = self._read_axiompool_file(self.inferencechain1)
self.axiompool_inferencechain2 = self._read_axiompool_file(self.inferencechain2)
self.axiompool_inferencechain3 = self._read_axiompool_file(self.inferencechain3)
self.axiompool_inferencechain4 = self._read_axiompool_file(self.inferencechain4)
self.axiompool = [self.axiompool_reflexive, self.axiompool_symmetric, self.axiompool_transitive,
self.axiompool_inverse, self.axiompool_subproperty, self.axiompool_equivalent,
self.axiompool_inferencechain1,self.axiompool_inferencechain2,
self.axiompool_inferencechain3,self.axiompool_inferencechain4]
def _read_axiompool_file(self, file):
f = open(file, 'r')
axioms = []
for line in f.readlines():
line_list = line.strip().split('\t')
axiom_ids = list(map(lambda x: self.relation2id[x], line_list))
#axiom_ids = self.relation2id[line_list]
axioms.append(axiom_ids)
# for the case reflexive pool is empty
if len(axioms) == 0:
np.reshape(axioms, [-1, 3])
return axioms
# for each axioms in axiom pool
# generate a series of entailments for each axiom
def _materialize_axioms(self, generate=True, dump=True, load=False):
if generate:
self.reflexive2entailment = defaultdict(list)
self.symmetric2entailment = defaultdict(list)
self.transitive2entailment = defaultdict(list)
self.inverse2entailment = defaultdict(list)
self.equivalent2entailment = defaultdict(list)
self.subproperty2entailment = defaultdict(list)
self.inferencechain12entailment = defaultdict(list)
self.inferencechain22entailment = defaultdict(list)
self.inferencechain32entailment = defaultdict(list)
self.inferencechain42entailment = defaultdict(list)
self.reflexive_entailments, self.reflexive_entailments_num = self._materialize_sparse(self.axiompool_reflexive, type='reflexive')
self.symmetric_entailments, self.symmetric_entailments_num = self._materialize_sparse(self.axiompool_symmetric, type='symmetric')
self.transitive_entailments, self.transitive_entailments_num = self._materialize_sparse(self.axiompool_transitive, type='transitive')
self.inverse_entailments, self.inverse_entailments_num = self._materialize_sparse(self.axiompool_inverse, type='inverse')
self.subproperty_entailments, self.subproperty_entailments_num = self._materialize_sparse(self.axiompool_subproperty, type='subproperty')
self.equivalent_entailments, self.equivalent_entailments_num = self._materialize_sparse(self.axiompool_equivalent, type='equivalent')
self.inferencechain1_entailments, self.inferencechain1_entailments_num = self._materialize_sparse(self.axiompool_inferencechain1, type='inferencechain1')
self.inferencechain2_entailments, self.inferencechain2_entailments_num = self._materialize_sparse(self.axiompool_inferencechain2, type='inferencechain2')
self.inferencechain3_entailments, self.inferencechain3_entailments_num = self._materialize_sparse(self.axiompool_inferencechain3, type='inferencechain3')
self.inferencechain4_entailments, self.inferencechain4_entailments_num = self._materialize_sparse(self.axiompool_inferencechain4, type='inferencechain4')
print('reflexive entailments for sparse: ', self.reflexive_entailments_num)
print('symmetric entailments for sparse: ', self.symmetric_entailments_num)
print('transitive entailments for sparse: ', self.transitive_entailments_num)
print('inverse entailments for sparse: ', self.inverse_entailments_num)
print('subproperty entailments for sparse: ', self.subproperty_entailments_num)
print('equivalent entailments for sparse: ', self.equivalent_entailments_num)
print('inferencechain1 entailments for sparse: ', self.inferencechain1_entailments_num)
print('inferencechain2 entailments for sparse: ', self.inferencechain2_entailments_num)
print('inferencechain3 entailments for sparse: ', self.inferencechain3_entailments_num)
print('inferencechain4 entailments for sparse: ', self.inferencechain4_entailments_num)
print("finish generate axioms entailments for sparse")
if dump:
pickle.dump(self.reflexive_entailments, open(os.path.join(self.axiom_dir, 'reflexive_entailments'), 'wb'))
pickle.dump(self.symmetric_entailments, open(os.path.join(self.axiom_dir, 'symmetric_entailments'), 'wb'))
pickle.dump(self.transitive_entailments, open(os.path.join(self.axiom_dir, 'transitive_entailments'), 'wb'))
pickle.dump(self.inverse_entailments, open(os.path.join(self.axiom_dir, 'inverse_entailments'), 'wb'))
pickle.dump(self.subproperty_entailments, open(os.path.join(self.axiom_dir, 'subproperty_entailments'), 'wb'))
#pickle.dump(self.inferencechain_entailments, open(os.path.join(self.axiom_dir, 'inferencechain_entailments'), 'wb'))
pickle.dump(self.equivalent_entailments, open(os.path.join(self.axiom_dir, 'equivalent_entailments'), 'wb'))
pickle.dump(self.inferencechain1_entailments,
open(os.path.join(self.axiom_dir, 'inferencechain1_entailments'), 'wb'))
pickle.dump(self.inferencechain2_entailments,
open(os.path.join(self.axiom_dir, 'inferencechain2_entailments'), 'wb'))
pickle.dump(self.inferencechain3_entailments,
open(os.path.join(self.axiom_dir, 'inferencechain3_entailments'), 'wb'))
pickle.dump(self.inferencechain4_entailments,
open(os.path.join(self.axiom_dir, 'inferencechain4_entailments'), 'wb'))
print("finish dump axioms entialments")
if load:
print("load refexive entailments...")
self.reflexive_entailments = pickle.load(open(os.path.join(self.axiom_dir, 'reflexive_entailments'), 'rb'))
print(self.reflexive_entailments)
print('load symmetric entailments...')
self.symmetric_entailments = pickle.load(open(os.path.join(self.axiom_dir, 'symmetric_entailments'), 'rb'))
print("load transitive entialments... ")
self.transitive_entailments = pickle.load(open(os.path.join(self.axiom_dir, 'transitive_entailments'), 'rb'))
print("load inverse entailments...")
self.inverse_entailments = pickle.load(open(os.path.join(self.axiom_dir, 'inverse_entailments'), 'rb'))
print("load subproperty entailments...")
self.subproperty_entailments = pickle.load(open(os.path.join(self.axiom_dir, 'subproperty_entailments'), 'rb'))
#print("load inferencechain entailments...")
#self.inferencechain_entailments = pickle.load(open(os.path.join(self.axiom_dir, 'inferencechain_entailments'), 'rb'))
print("load equivalent entialments...")
self.equivalent_entailments = pickle.load(open(os.path.join(self.axiom_dir, 'equivalent_entailments'), 'rb'))
print("load inferencechain1 entailments...")
self.inferencechain1_entailments = pickle.load(
open(os.path.join(self.axiom_dir, 'inferencechain1_entailments'), 'rb'))
print("load inferencechain2 entailments...")
self.inferencechain2_entailments = pickle.load(
open(os.path.join(self.axiom_dir, 'inferencechain2_entailments'), 'rb'))
print("load inferencechain3 entailments...")
self.inferencechain3_entailments = pickle.load(
open(os.path.join(self.axiom_dir, 'inferencechain3_entailments'), 'rb'))
print("load inferencechain4 entailments...")
self.inferencechain4_entailments = pickle.load(
open(os.path.join(self.axiom_dir, 'inferencechain4_entailments'), 'rb'))
print("finish load axioms entailments")
def _materialize_sparse(self, axioms, type=None, sparse = False):
inference = []
# axiom2entailment is a dict
# with the all axioms in the axiom pool as keys
# and all the entailments for each axiom as values
axiom_list = axioms
length = len(axioms)
max_entailments = self.max_entialments
num = 0
if length == 0:
if type == 'reflexive':
np.reshape(inference, [-1, 3])
elif type == 'symmetric' or type =='inverse' or type =='equivalent' or type =='subproperty':
np.reshape(inference, [-1, 6])
elif type=='transitive' or type=='inferencechain':
np.reshape(inference, [-1, 9])
else:
raise NotImplementedError
return inference, num
if type == 'reflexive':
for axiom in axiom_list:
axiom_key =tuple(axiom)
r = axiom[0]
inference_tmp = []
for (h,t) in self.r_ht[r]:
# filter the axiom with too much entailments
if len(inference_tmp) > max_entailments:
inference_tmp = np.reshape([], [-1, 2])
break
if h != t and self.entity2sparsity[h]>self.sparsity:
num += 1
inference_tmp.append([h,r,h])
for entailment in inference_tmp:
self.reflexive2entailment[axiom_key].append(entailment)
inference.append(inference_tmp)
if type == 'symmetric':
#self.symmetric2entailment = defaultdict(list)
for axiom in axiom_list:
axiom_key = tuple(axiom)
r = axiom[0]
inference_tmp = []
for (h,t) in self.r_ht[r]:
if len(inference_tmp) > max_entailments:
inference_tmp = np.reshape([], [-1, 2])
break
if (t,h) not in self.r_ht[r] and (self.entity2sparsity[h]>self.sparsity or self.entity2sparsity[t]>self.sparsity):
num += 1
inference_tmp.append([h,r,t,t,r,h])
for entailment in inference_tmp:
self.symmetric2entailment[axiom_key].append(entailment)
inference.append(inference_tmp)
if type == 'transitive':
#self.transitive2entailment = defaultdict(list)
for axiom in axiom_list:
axiom_key = tuple(axiom)
r = axiom[0]
inference_tmp = []
for (h,t) in self.r_ht[r]:
if len(inference_tmp) > max_entailments:
inference_tmp = np.reshape([], [-1, 9])
break
# (t,r,e) exist but (h,r,e) not exist and e!=h
for e in self.hr_t[(t,r)]- self.hr_t[(h,r)]:
if e != h and (self.entity2sparsity[h]>self.sparsity or self.entity2sparsity[e]>self.sparsity):
num += 1
inference_tmp.append([h,r,t,t,r,e,h,r,e])
for entailment in inference_tmp:
self.transitive2entailment[axiom_key].append(entailment)
inference.append(inference_tmp)
if type == 'inverse':
for axiom in axiom_list:
axiom_key = tuple(axiom)
r1,r2 = axiom
inference_tmp = []
for (h,t) in self.r_ht[r1]:
if len(inference_tmp) > max_entailments:
inference_tmp = np.reshape([], [-1, 6])
break
if (t,h) not in self.r_ht[r2] and (self.entity2sparsity[h]>self.sparsity or self.entity2sparsity[t]>self.sparsity):
num += 1
inference_tmp.append([h,r1,t, t,r2,h])
#self.inverse2entailment[axiom_key].append([h,r1,t, t,r2,h])
for entailment in inference_tmp:
self.inverse2entailment[axiom_key].append(entailment)
inference.append(inference_tmp)
if type == 'equivalent' or type =='subproperty':
for axiom in axiom_list:
axiom_key = tuple(axiom)
r1,r2 = axiom
inference_tmp = []
for (h,t) in self.r_ht[r1]:
if len(inference_tmp) > max_entailments:
inference_tmp = np.reshape([], [-1, 6])
break
if (h,t) not in self.r_ht[r2] and (self.entity2sparsity[h]>self.sparsity or self.entity2sparsity[t]>self.sparsity):
num += 1
inference_tmp.append([h,r1,t, h,r2,t])
for entailment in inference_tmp:
self.equivalent2entailment[axiom_key].append(entailment)
self.subproperty2entailment[axiom_key].append(entailment)
inference.append(inference_tmp)
if type == 'inferencechain1':
self.inferencechain12entailment = defaultdict(list)
i = 0
for axiom in axiom_list:
axiom_key = tuple(axiom)
i += 1
# print('%d/%d' % (i, length))
r1, r2, r3 = axiom
inference_tmp = []
for (e, h) in self.r_ht[r2]:
if len(inference_tmp) > max_entailments:
inference_tmp = np.reshape([], [-1, 9])
break
for t in self.hr_t[(e, r3)]:
if (h, t) not in self.r_ht[r1] and (
self.entity2sparsity[h] > self.sparsity or self.entity2sparsity[e] > self.sparsity):
num += 1
inference_tmp.append([e, r2, h, e, r3, t, h, r1, t])
#self.inferencechain12entailment[axiom_key].append([[e, r2, h, e, r3, t, h, r1, t]])
for entailment in inference_tmp:
self.inferencechain12entailment[axiom_key].append(entailment)
inference.append(inference_tmp)
if type == 'inferencechain2':
self.inferencechain22entailment = defaultdict(list)
i = 0
for axiom in axiom_list:
axiom_key = tuple(axiom)
i += 1
# print('%d/%d' % (i, length))
r1, r2, r3 = axiom
inference_tmp = []
for (e, h) in self.r_ht[r2]:
if len(inference_tmp) > max_entailments:
inference_tmp = np.reshape([], [-1, 9])
break
for t in self.tr_h[(e, r3)]:
if (h, t) not in self.r_ht[r1] and (
self.entity2sparsity[h] > self.sparsity or self.entity2sparsity[e] > self.sparsity):
num += 1
inference_tmp.append([e, r2, h, t, r3, e, h, r1, t])
#self.inferencechain22entailment[axiom_key].append([[e, r2, h, t, r3, e, h, r1, t]])
for entailment in inference_tmp:
self.inferencechain22entailment[axiom_key].append(entailment)
inference.append(inference_tmp)
if type == 'inferencechain3':
self.inferencechain32entailment = defaultdict(list)
i = 0
for axiom in axiom_list:
axiom_key = tuple(axiom)
i += 1
# print('%d/%d' % (i, length))
r1, r2, r3 = axiom
inference_tmp = []
for (h, e) in self.r_ht[r2]:
if len(inference_tmp) > max_entailments:
inference_tmp = np.reshape([], [-1, 9])
break
for t in self.hr_t[(e, r3)]:
if (h, t) not in self.r_ht[r1] and (
self.entity2sparsity[h] > self.sparsity or self.entity2sparsity[e] > self.sparsity):
num += 1
inference_tmp.append([h, r2, e, e, r3, t, h, r1, t])
for entailment in inference_tmp:
self.inferencechain32entailment[axiom_key].append(entailment)
inference.append(inference_tmp)
if type == 'inferencechain4':
self.inferencechain42entailment = defaultdict(list)
i = 0
for axiom in axiom_list:
axiom_key = tuple(axiom)
i += 1
# print('%d/%d' % (i, length))
r1, r2, r3 = axiom
inference_tmp = []
for (h, e) in self.r_ht[r2]:
if len(inference_tmp) > max_entailments:
inference_tmp = np.reshape([], [-1, 9])
break
for t in self.tr_h[(e, r3)]:
if (h, t) not in self.r_ht[r1] and (
self.entity2sparsity[h] > self.sparsity or self.entity2sparsity[e] > self.sparsity):
num += 1
inference_tmp.append([h, r2, e, t, r3, e, h, r1, t])
for entailment in inference_tmp:
self.inferencechain42entailment[axiom_key].append(entailment)
inference.append(inference_tmp)
return inference, num
def _materialize(self, axioms, type=None, sparse=False):
inference = []
# axiom2entailment is a dict
# with the all axioms in the axiom pool as keys
# and all the entailments for each axiom as values
axiom_list = axioms
# print('axiom_list', axiom_list)
length = len(axioms)
max_entailments = 5000
num = 0
if length == 0:
if type == 'reflexive':
np.reshape(inference, [-1, 3])
elif type == 'symmetric' or type == 'inverse' or type == 'equivalent' or type == 'subproperty':
np.reshape(inference, [-1, 6])
elif type == 'transitive' or type == 'inferencechain':
np.reshape(inference, [-1, 9])
else:
raise NotImplementedError
return inference, num
if type == 'reflexive':
for axiom in axiom_list:
axiom_key = tuple(axiom)
r = axiom[0]
inference_tmp = []
for (h, t) in self.r_ht[r]:
if len(inference_tmp) > max_entailments:
inference_tmp = np.reshape([], [-1, 2])
break
if h != t: #and self.entity2sparsity[h] > self.sparsity:
num += 1
inference_tmp.append([h, r, h])
for entailment in inference_tmp:
self.reflexive2entailment[axiom_key].append(entailment)
inference.append(inference_tmp)
if type == 'symmetric':
for axiom in axiom_list:
axiom_key = tuple(axiom)
r = axiom[0]
inference_tmp = []
for (h, t) in self.r_ht[r]:
if len(inference_tmp) > max_entailments:
inference_tmp = np.reshape([], [-1, 2])
break
if (t, h) not in self.r_ht[r]: #and (self.entity2sparsity[h] > self.sparsity or self.entity2sparsity[t] > self.sparsity):
num += 1
inference_tmp.append([h, r, t, t, r, h])
for entailment in inference_tmp:
self.symmetric2entailment[axiom_key].append(entailment)
inference.append(inference_tmp)
if type == 'transitive':
for axiom in axiom_list:
axiom_key = tuple(axiom)
r = axiom[0]
inference_tmp = []
for (h, t) in self.r_ht[r]:
if len(inference_tmp) > max_entailments:
inference_tmp = np.reshape([], [-1, 9])
break
# (t,r,e) exist but (h,r,e) not exist and e!=h
for e in self.hr_t[(t, r)] - self.hr_t[(h, r)]:
if e != h: #and (self.entity2sparsity[h] > self.sparsity or self.entity2sparsity[e] > self.sparsity):
num += 1
inference_tmp.append([h, r, t, t, r, e, h, r, e])
for entailment in inference_tmp:
self.transitive2entailment[axiom_key].append(entailment)
inference.append(inference_tmp)
if type == 'inverse':
# self.inverse2entailment = defaultdict(list)
for axiom in axiom_list:
axiom_key = tuple(axiom)
r1, r2 = axiom
inference_tmp = []
for (h, t) in self.r_ht[r1]:
if len(inference_tmp) > max_entailments:
inference_tmp = np.reshape([], [-1, 6])
break
if (t, h) not in self.r_ht[r2]: #and (self.entity2sparsity[h] > self.sparsity or self.entity2sparsity[t] > self.sparsity):
num += 1
inference_tmp.append([h, r1, t, t, r2, h])
for entailment in inference_tmp:
self.inverse2entailment[axiom_key].append(entailment)
inference.append(inference_tmp)
if type == 'equivalent' or type == 'subproperty':
for axiom in axiom_list:
axiom_key = tuple(axiom)
r1, r2 = axiom
inference_tmp = []
for (h, t) in self.r_ht[r1]:
if len(inference_tmp) > max_entailments:
inference_tmp = np.reshape([], [-1, 6])
break
if (h, t) not in self.r_ht[r2]: #and (self.entity2sparsity[h] > self.sparsity or self.entity2sparsity[t] > self.sparsity):
num += 1
inference_tmp.append([h, r1, t, h, r2, t])
for entailment in inference_tmp:
self.equivalent2entailment[axiom_key].append(entailment)
self.subproperty2entailment[axiom_key].append(entailment)
inference.append(inference_tmp)
if type == 'inferencechain1':
self.inferencechain12entailment = defaultdict(list)
i = 0
for axiom in axiom_list:
axiom_key = tuple(axiom)
i += 1
# print('%d/%d' % (i, length))
r1, r2, r3 = axiom
inference_tmp = []
for (e, h) in self.r_ht[r2]:
if len(inference_tmp) > max_entailments:
inference_tmp = np.reshape([], [-1, 9])
break
for t in self.hr_t[(e, r3)]:
if (h, t) not in self.r_ht[r1]:
num += 1
inference_tmp.append([e, r2, h, e, r3, t, h, r1, t])
for entailment in inference_tmp:
self.inferencechain12entailment[axiom_key].append(entailment)
inference.append(inference_tmp)
if type == 'inferencechain2':
self.inferencechain22entailment = defaultdict(list)
i = 0
for axiom in axiom_list:
axiom_key = tuple(axiom)
i += 1
# print('%d/%d' % (i, length))
r1, r2, r3 = axiom
inference_tmp = []
for (e, h) in self.r_ht[r2]:
if len(inference_tmp) > max_entailments:
inference_tmp = np.reshape([], [-1, 9])
break
for t in self.tr_h[(e, r3)]:
if (h, t) not in self.r_ht[r1]:
num += 1
inference_tmp.append([e, r2, h, t, r3, e, h, r1, t])
for entailment in inference_tmp:
self.inferencechain22entailment[axiom_key].append(entailment)
inference.append(inference_tmp)
if type == 'inferencechain3':
self.inferencechain32entailment = defaultdict(list)
i = 0
for axiom in axiom_list:
axiom_key = tuple(axiom)
i += 1
# print('%d/%d' % (i, length))
r1, r2, r3 = axiom
inference_tmp = []
for (h, e) in self.r_ht[r2]:
if len(inference_tmp) > max_entailments:
inference_tmp = np.reshape([], [-1, 9])
break
for t in self.hr_t[(e, r3)]:
if (h, t) not in self.r_ht[r1]:
num += 1
inference_tmp.append([h, r2, e, e, r3, t, h, r1, t])
for entailment in inference_tmp:
self.inferencechain32entailment[axiom_key].append(entailment)
inference.append(inference_tmp)
if type == 'inferencechain4':
self.inferencechain42entailment = defaultdict(list)
i = 0
for axiom in axiom_list:
axiom_key = tuple(axiom)
i += 1
r1, r2, r3 = axiom
inference_tmp = []
for (h, e) in self.r_ht[r2]:
if len(inference_tmp) > max_entailments:
inference_tmp = np.reshape([], [-1, 9])
break
for t in self.tr_h[(e, r3)]:
if (h, t) not in self.r_ht[r1]:
num += 1
inference_tmp.append([h, r2, e, t, r3, e, h, r1, t])
for entailment in inference_tmp:
self.inferencechain42entailment[axiom_key].append(entailment)
inference.append(inference_tmp)
return inference, num
def _init_valid_axioms(self):
# init valid axioms
self.valid_reflexive, self.valid_symmetric, self.valid_transitive,\
self.valid_inverse, self.valid_subproperty, self.valid_equivalent,\
self.valid_inferencechain1, self.valid_inferencechain2, \
self.valid_inferencechain3, self.valid_inferencechain4 = [[] for x in range(self.axiom_types)]
# init valid axiom entailments
self.valid_reflexive2entailment, self.valid_symmetric2entailment, self.valid_transitive2entailment, \
self.valid_inverse2entailment, self.valid_subproperty2entailment, self.valid_equivalent2entailment, \
self.valid_inferencechain12entailment, self.valid_inferencechain22entailment, \
self.valid_inferencechain32entailment, self.valid_inferencechain42entailment = [[] for x in range(self.axiom_types)]
# init valid axiom entailments probability
self.valid_reflexive_p, self.valid_symmetric_p, self.valid_transitive_p, \
self.valid_inverse_p, self.valid_subproperty_p, self.valid_equivalent_p, \
self.valid_inferencechain1_p, self.valid_inferencechain2_p,\
self.valid_inferencechain3_p, self.valid_inferencechain4_p= [[] for x in range(self.axiom_types)]
# init valid axiom batchsize
self.reflexive_batchsize = 1
self.symmetric_batchsize = 1
self.transitive_batchsize = 1
self.inverse_batchsize = 1
self.subproperty_batchsize = 1
self.equivalent_batchsize = 1
#self.inferencechain_batchsize = 1
self.inferencechain1_batchsize = 1
self.inferencechain2_batchsize = 1
self.inferencechain3_batchsize = 1
self.inferencechain4_batchsize = 1
# add the new triples from axioms to training triple
def update_train_triples(self, epoch=0, update_per = 10):
"""add the new triples from axioms to training triple
Args:
epoch (int, optional): epoch in training process. Defaults to 0.
update_per (int, optional): Defaults to 10.
Returns:
updated_train_data: training triple after adding the new triples from axioms
"""
reflexive_triples, symmetric_triples, transitive_triples, inverse_triples,\
equivalent_triples, subproperty_triples, inferencechain1_triples, \
inferencechain2_triples, inferencechain3_triples, inferencechain4_triples = [ np.reshape(np.asarray([]), [-1, 3]) for i in range(self.axiom_types)]
reflexive_p, symmetric_p, transitive_p, inverse_p, \
equivalent_p, subproperty_p, inferencechain1_p, \
inferencechain2_p, inferencechain3_p, inferencechain4_p = [np.reshape(np.asarray([]), [-1, 1]) for i in
range(self.axiom_types)]
updated_train_data=None
if epoch >= 5:
print("len(self.valid_reflexive2entailment):", len(self.valid_reflexive2entailment))
print("len(self.valid_symmetric2entailment):", len(self.valid_symmetric2entailment))
print("len(self.valid_transitive2entailment)", len(self.valid_transitive2entailment))
print("len(self.valid_inverse2entailment)", len(self.valid_inverse2entailment))
print("len(self.valid_equivalent2entailment)", len(self.valid_equivalent2entailment))
print("len(self.valid_subproperty2entailment)", len(self.valid_subproperty2entailment))
valid_reflexive2entailment, valid_symmetric2entailment, valid_transitive2entailment,\
valid_inverse2entailment, valid_equivalent2entailment, valid_subproperty2entailment, \
valid_inferencechain12entailment, valid_inferencechain22entailment,\
valid_inferencechain32entailment, valid_inferencechain42entailment = [[] for i in range(10)]
if len(self.valid_reflexive2entailment)>0:
valid_reflexive2entailment = np.reshape(np.asarray(self.valid_reflexive2entailment), [-1, 3])
reflexive_triples = np.asarray(valid_reflexive2entailment)[:, -3:]
reflexive_p = np.reshape(np.asarray(self.valid_reflexive_p),[-1,1])
if len(self.valid_symmetric2entailment) > 0:
valid_symmetric2entailment = np.reshape(np.asarray(self.valid_symmetric2entailment), [-1, 6])
symmetric_triples = np.asarray(valid_symmetric2entailment)[:, -3:]
symmetric_p = np.reshape(np.asarray(self.valid_symmetric_p),[-1,1])
if len(self.valid_transitive2entailment) > 0:
valid_transitive2entailment = np.reshape(np.asarray(self.valid_transitive2entailment), [-1, 9])
transitive_triples = np.asarray(valid_transitive2entailment)[:, -3:]
transitive_p = np.reshape(np.asarray(self.valid_transitive_p), [-1, 1])
if len(self.valid_inverse2entailment) > 0:
valid_inverse2entailment = np.reshape(np.asarray(self.valid_inverse2entailment), [-1, 6])
inverse_triples = np.asarray(valid_inverse2entailment)[:, -3:]
inverse_p = np.reshape(np.asarray(self.valid_inverse_p), [-1, 1])
if len(self.valid_equivalent2entailment) > 0:
valid_equivalent2entailment = np.reshape(np.asarray(self.valid_equivalent2entailment), [-1, 6])
equivalent_triples = np.asarray(valid_equivalent2entailment)[:, -3:]
equivalent_p = np.reshape(np.asarray(self.valid_equivalent_p), [-1, 1])
if len(self.valid_subproperty2entailment) > 0:
valid_subproperty2entailment = np.reshape(np.asarray(self.valid_subproperty2entailment), [-1, 6])
subproperty_triples = np.asarray(valid_subproperty2entailment)[:, -3:]
subproperty_p = np.reshape(np.asarray(self.valid_subproperty_p),[-1,1])
if len(self.valid_inferencechain12entailment) > 0:
valid_inferencechain12entailment = np.reshape(np.asarray(self.valid_inferencechain12entailment), [-1, 9])
inferencechain1_triples = np.asarray(valid_inferencechain12entailment)[:, -3:]
inferencechain1_p = np.reshape(np.asarray(self.valid_inferencechain1_p), [-1, 1])
if len(self.valid_inferencechain22entailment) > 0:
valid_inferencechain22entailment = np.reshape(
|
np.asarray(self.valid_inferencechain22entailment)
|
numpy.asarray
|
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 19 15:43:27 2019
@author: mikeriess
"""
import pandas as pd
import numpy as np
def InitialFormatting(df, maxcases, dateformat):
import pandas as pd
#Work on a subset:
casestoload = df["id"].unique().tolist()[0:maxcases]
df = df.loc[df["id"].isin(casestoload)]
# find cases to drop due to length
print("Cases before dropping len=1:",len(casestoload),"cases",len(df),"rows")
# Make function to apply to groups
def func(sub):
out = None
keepid = min(sub.id)
if len(sub) > 1:
out = keepid
return out
# Make list of cases above length 1
df_grp = df.groupby('id').apply(func)
#Remove NaNs from the list
keepers = df_grp.values
keepers = [i for i in keepers if i]
# Drop cases with only one event:
df = df.loc[df["id"].isin(keepers)]
print("Cases after dropping len=1:",len(keepers),"cases",len(df),"rows")
#Sort the dataframe by time aftewards
df['parsed_date'] = pd.to_datetime(df.time, format = dateformat, exact = True)
##########################################################################
print("Sorting by id, date (chronological order)")
#generate new ID column:
df = df.assign(id=(df['id']).astype('category').cat.codes)
df["id"] = df.id.astype('int32')
# Ensure ID starts at 1
if min(df.id) == 0:
df.id = df.id +1
# Sort the DF baed on caseid, and the date of the event
df = df.sort_values(['id',"parsed_date"], ascending=[True, True])
df = df.drop("parsed_date",axis=1)
return df
def GetFileInfo(df):
print("Number of cases in log:",len(df["id"].unique()))
import numpy as np
import pandas as pd
#Get the maximal trace length, for determining prefix length
max_length = np.max(df['id'].value_counts())
print("longest trace is:",max_length)
#Look at the time format:
print("Time format:",df["time"].loc[0])
print("Std. format: %Y-%m-%d %H:%M:%S")
print(df.head())
return max_length
def MakeSplitCriterion(df, trainsize=0.8, mode="event"):
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
import time as tm
def datetime_range(start=None, end=None):
span = end - start
for i in range(span.days + 1):
yield start + timedelta(days=i)
#Parse date
df["time_parsed"] = pd.to_datetime(df["time"])
#Get min max dates:
earliest_date = min(df["time_parsed"])
lastest_date = max(df["time_parsed"])
#Find the date to divide on:
dates = list(datetime_range(start=earliest_date, end=lastest_date))
n_dates = len(dates)
splitpoint = n_dates*trainsize
splitpoint = int(np.round(splitpoint,decimals=0))
dividing_date = dates[splitpoint]
dividing_date = dividing_date
print("=======================================")
print("Log starts at:",earliest_date)
print("Last event starts at:",lastest_date)
print("Train-test split happens at:",dividing_date)
print("=======================================")
if mode=="event":
"""
Here we simply divide by date of the event,
and disregard that a case could be in both train and test set
this way
"""
df["trainset"] = df["time_parsed"] < dividing_date
df["trainset"].value_counts()
split_criterion = df[["id","trainset"]]
split_criterion = split_criterion.rename(columns={'id':'caseid',
'trainset':'trainset'}, inplace=False)
split_criterion = split_criterion.reset_index(drop=True)
split_criterion = split_criterion.drop_duplicates(subset="caseid",keep="first")
print(len(split_criterion["caseid"].unique().tolist()))
print(len(split_criterion))
print(np.sum(df["trainset"]*1))
print("=======================================")
if mode=="case":
"""
Here we remove all cases that are in both train and test set
"""
# For every case, verify if it has both True & False events
# If it has, drop that case ID
# And remember to print it
df["trainset"] = df["time_parsed"] < dividing_date
df["trainset"].value_counts()
split_criterion = df[["id","trainset"]]
split_criterion = split_criterion.rename(columns={'id':'caseid',
'trainset':'trainset'}, inplace=False)
split_criterion = split_criterion.reset_index(drop=True)
#Groupby and get count of every unique value per case id
validation = pd.DataFrame(split_criterion.groupby('caseid').trainset.nunique())
validation["caseid"] = validation.index
#If a caseid has both true and false within it (count == 2),
#it should be dropped.
print("=======================================")
print("Dropping cases that have events in both train + testsets:")
print("=======================================")
print("Cases before dropping:",len(validation["trainset"]))
validation["keep"] = validation["trainset"] == 1
validation = validation.loc[validation["keep"]==True]
print("Cases after dropping:",len(validation["trainset"]))
#list of caseids to keep
ids_keep = validation["caseid"]
#drop those thet should not be kept
print("Total events before:",len(split_criterion))
split_criterion = split_criterion.loc[split_criterion["caseid"].isin(ids_keep)]
print("Total events after:",len(split_criterion))
split_criterion = split_criterion.drop_duplicates(subset="caseid",keep="first")
print("=======================================")
print(len(split_criterion))
print(np.sum(split_criterion["trainset"]*1))
return split_criterion
def GenerateTrainData(df,
category_cols=[],
numeric_cols=[],
dateformat = "%Y-%m-%d %H:%M:%S",
droplastev=True,
drop_end_target=True,
get_activity_target=True,
get_case_features = True,
dummify_time_features = True,
max_prefix_length = 2,
window_position="last_k"):
#Make copy of df
data = df
#Subset only relevant variables
df = df[["id","time","event"]+category_cols+numeric_cols]
import time as tm
from datetime import datetime
import pandas as pd
import time
# Make new case ids: ##############################
cases = data["id"].unique().tolist()
newcaseids = list(range(0,len(cases)))
dictdf = pd.DataFrame([cases,newcaseids]).T
dictdf.columns =["id","newid"]
newdata = pd.merge(left=data,right=dictdf,on="id")
newdata.rename(columns={'id':'dropme',
'newid':'id'},
inplace=False).drop("dropme",axis=1)
# List all cases by their new id:
cases = data["id"].unique().tolist()
# Make new event ids: ##############################
evids = []
for i in cases:
subset = data.loc[data["id"] == i]
evids = evids + list(range(0,len(subset)))
evids = [x+1 for x in evids] # + 1 ###################################################### added +1
#set the new eventids
data["eventid"] = evids
#make a counter to keep status
num_cases = len(cases)
# Generate features case by case
for i in cases:
#iteration = iteration +1
print("case:",i, "of",num_cases)
#Look only at one caseid at a time
subset = data.loc[data["id"] == i]
subset.index = subset.eventid
"""
#######################################################################
PREFIX:
#######################################################################
"""
index1 = 0
#determine whether to start in the beginning or end of trace
if window_position == "last_k":
#if trace is smaller than desired prefix, just pick the full trace
if max_prefix_length > len(subset):
start = 1 #0
stop = len(subset) - index1 #
#If the the max prefix len is smaller than the actual trace,
#take the K last events (sliding window approach)
if max_prefix_length < len(subset):
start = len(subset) - max_prefix_length
stop = len(subset) - index1
#If max prefix is identical to trace len, start from one
if max_prefix_length == len(subset):
start = 1 #0
stop = len(subset) - index1
if window_position == "first_k":
#if trace is smaller than desired prefix len, just pick the full trace
if max_prefix_length > len(subset):
start = 1 #0
stop = len(subset) - index1 #
#If the the max prefix len is smaller than the actual trace,
#take the K FIRST events (sliding window approach)
if max_prefix_length < len(subset):
start = 1
stop = max_prefix_length - index1
#If max prefix is identical to trace len, start from one
if max_prefix_length == len(subset):
start = 1 #0
stop = len(subset) - index1
print("start",start,"stop",stop)
#Prefix capability: Subset k last events from trace
subset = subset.loc[start:stop]
#Make sure the data to be dummified also follows prefix convention
if i == 1:
datasub = subset
print("len subset:",len(subset))
print("len datasub:",len(datasub))
if i > 1:
datasub = pd.concat([datasub, subset],axis=0)
"""
#######################################################################
PREFIX:
#######################################################################
"""
#Get list of events
eventlist = subset.eventid.tolist()
#store the case id
caseid = str(i)
#for every event:
for event in eventlist:
#print(event)
#Generate an eventID
event_number = event+1
#get the event for later reference
event_activity = subset["event"].loc[event]
#For the tax-approach, get next event
if event != stop: #len(subset)-1: #if its the last event
next_activity = subset["event"].loc[event+1]
if event == stop: #len(subset)-1: #if its the last event
next_activity = "END"
"""
#######################################################################
Beginning of time features:
"""
#first event
starttime = datetime.fromtimestamp(tm.mktime(tm.strptime(subset["time"].loc[start], dateformat)))
#time in secs since midnight
t = tm.strptime(subset["time"].loc[event], dateformat) #Time now
midnight = datetime.fromtimestamp(tm.mktime(t)).replace(hour=0, minute=0, second=0, microsecond=0) #Midnight
timesincemidnight = (datetime.fromtimestamp(time.mktime(t)) - midnight).total_seconds()
#monday = 1
dayofweek = datetime.fromtimestamp(tm.mktime(t)).weekday()+1
#hour of day
hour = datetime.fromtimestamp(tm.mktime(t)).hour
#Time since start in seconds
timesincestart = (datetime.fromtimestamp(time.mktime(t)) - starttime).total_seconds()
#Time since last event in seconds
if event ==start:
t_last = tm.strptime(subset["time"].loc[event], dateformat) #Time last event: now
timesincelastev = 0
if event !=start:
t_last = tm.strptime(subset["time"].loc[event-1], dateformat) #Time last event
timesincelastev = (datetime.fromtimestamp(time.mktime(t)) - datetime.fromtimestamp(time.mktime(t_last))).total_seconds()
#Time until finishtime
t_finish = tm.strptime(subset["time"].loc[stop], dateformat) #Time last event
timetofinish = (datetime.fromtimestamp(time.mktime(t_finish)) - datetime.fromtimestamp(time.mktime(t))).total_seconds()
#Time to next event:
if event == stop:#len(subset)-1: #if its the last event
t_nextev = tm.strptime(subset["time"].loc[event], dateformat) #Time last event: now
timetonextev = 0
if event != stop:#len(subset)-1: #if not last event
t_nextev = tm.strptime(subset["time"].loc[event+1], dateformat) #Time last event
timetonextev = (datetime.fromtimestamp(time.mktime(t_nextev))-datetime.fromtimestamp(time.mktime(t))).total_seconds()
"""
#######################################################################
End of time features for each event
"""
#Make a marker for dropping last step where remaining time (y) = 0
drop = 0
#Mark if it is the last activity:
if event == stop:#len(subset)-1:
drop = 1
#Actual:
results = [caseid,
event_number,
event_activity, #event
timesincemidnight,
dayofweek,
hour,
timesincestart,
timesincelastev,
timetofinish,
timetonextev,
next_activity,
drop]
if i == 1 and event == start: #First time
out = pd.DataFrame(results).T
else:
res_i = pd.DataFrame(results).T
out = out.append(res_i)
#Rename all static vars
cols = ['caseid',
'event_number',
'event_activity',
'timesincemidnight',
'dayofweek',
'hourofday',
'timesincestart',
'timesincelastev',
'y_timetofinish',
'y_timetonextev',
'next_activity',
'drop']
out.columns = cols
print("============================")
print("Post-processing:")
print("============================")
#####################################
# One-hot encoding
#store original labels:
#convert event into numerical codes
out['event_activity'] = out['event_activity'].astype('category')
out['event_activity'] = out['event_activity'].cat.codes
out['event_activity'] = out['event_activity']+1
#do the same for the subset of categorical features
datasub['event_activity'] = datasub['event'].astype('category')
datasub['event_activity'] = datasub['event_activity'].cat.codes
datasub['event_activity'] = datasub['event_activity']+1
if get_activity_target == True:
#dummify next event variable
y_a = pd.get_dummies(out["next_activity"], prefix="y_a_t1")
y_a = y_a.reset_index(drop=True)
#generate list of original varnames
y_a_varnames = y_a.columns.tolist()
#do it all again, but with renamed activity names
#convert event into numerical codes
out['next_activity'] = out['next_activity'].astype('category')
out['next_activity'] = out['next_activity'].cat.codes
out['next_activity'] = out['next_activity']+1
#dummify next event variable
y_a = pd.get_dummies(out["next_activity"], prefix="y_a_t1")
y_a = y_a.reset_index(drop=True)
#generate list of new original varnames
y_a_new_varnames = y_a.columns.tolist()
#add it into the output table
out = out.reset_index(drop=True)
out = pd.concat([out, y_a], axis=1)
#No matter what, we always want the activity to be a feature:
Dummies = pd.get_dummies(out["event_activity"].astype('str'))
Dummies = Dummies.reset_index(drop=True)
Dummies = Dummies.add_prefix('ev_a_t0_')
out = out.reset_index(drop=True)
dummycols = Dummies.columns.tolist()
out = pd.concat([out,Dummies],axis=1)
out = out.drop("event_activity",axis=1)
if get_case_features == True:
if len(category_cols) > 0:
dummylist = category_cols
print("\nDummification of",dummylist)
Dummies = pd.get_dummies(datasub[dummylist])
Dummies = Dummies.reset_index(drop=True)
out = out.reset_index(drop=True)
dummycols = Dummies.columns.tolist()
out = pd.concat([out,Dummies],axis=1)
if len(numeric_cols) > 0:
#add numerical features:
print("Adding numerical features:",numeric_cols)
numerics = datasub[numeric_cols]
numerics = numerics.reset_index(drop=True)
numerics = numerics.add_prefix('num_')
out = out.reset_index(drop=True)
out = pd.concat([out,numerics],axis=1)
if dummify_time_features == True:
features = ["dayofweek","hourofday"]
print("Dummification of time features",features)
sysdummies = out[features]
sysdummies = pd.get_dummies(sysdummies,prefix="t_")
sysdummies = sysdummies.reset_index(drop=True)
out = out.drop(features,axis=1)
out = out.reset_index(drop=True)
out = pd.concat([out, sysdummies],axis=1)
#Remove last event in each case
if droplastev==True:
print("\ndropping last event from each case")
print("before:",len(out))
out = out.loc[out["drop"] != 1]
out = out.drop("drop",axis=1)
print("after:",len(out))
print("data in X is the",max_prefix_length,"last events, excluding the final event")
#####################################
# Separate outputs
#####################################
print("\ndropping vars:")
# Generate the next_activity target vector
y_a = out[y_a_new_varnames]
y_a["caseid"] = out["caseid"]
y_a["event_number"] = out["event_number"]
if drop_end_target==True:
""" NOT SURE IF THIS SHOULD BE DONE FOR THE TAX MODEL
as this would signal the end of a trace..
"""
dropme = ['y_a_t1_1']#before it was: ["y_a_t1_END"]
print("dropping last event category from y_a:",dropme)
y_a = y_a.drop(dropme,axis=1) #drop indicator that it is the last event
# Generate the time to next activity target vector
y_t = out["y_timetonextev"]
#y_t["caseid"] = out["caseid"]
#y_t["event"] = out["event"]
y = out["y_timetofinish"]
#y["caseid"] = out["caseid"]
#y["event"] = out["event"]
#Drop everything that is not for the model to see during training
drops = ["y_timetofinish","y_timetonextev","next_activity"] + y_a_new_varnames
print("dropping vars from X: ",drops)
#remove next activity maker to avoid peeking into the future
X = out.drop(drops,axis=1)
#Reset indexes:
X = X.reset_index(drop=True)
y_a = y_a.reset_index(drop=True)
y_t = y_t.reset_index(drop=True)
y = y.reset_index(drop=True)
#####################################
# Output stuff
"""
datasub #input data used, with desired prefix
X # X features
y_a # y next event type
y_t # y time to next event
y # y remaining time at time t
cases # case ids for generating stats and back-linking results
y_a_varnames # original varnames for y_a before renaming
"""
return X, y, y_a, y_t, cases, y_a_varnames
def PadInputs(caseids, df, max_prefix_len=3, standardize=True):
import pandas as pd
import numpy as np
import time
#copy the dataframe
res = df
maxlen = max_prefix_len
#Make an empty placeholder of the dataset
dataset = pd.DataFrame(df.loc[0]).T
dataset["SEQID"] = 0 #placeholder
dataset = dataset.drop(0, axis=0)
#Make a counter
count = 0
allcases = len(caseids)
timeend = time.time()
#loop through the cases
for i in caseids:
count = count +1
#Get only events from case i
subset = res.loc[res["caseid"] == str(i)]
events = subset["event_number"].unique().tolist() #event
cols = subset.columns
#time display
timestart = time.time()
timetaken=np.round((timestart-timeend),decimals=3)
timeleft=np.round(((allcases-count)*(timetaken*60))/60,decimals=2)
print("Case:",count,"of ",allcases," events:",len(events),
"-",timetaken,"s. per case, est.",timeleft,"min. left")
#for row in subset.itertuples():
for j in events:
#print(row)
#j = int(row.event_number)
"""row = subset.loc[subset["event_number"] == j]
#if it is touple form, try 2nd colum/datapoint
j = int(row.event_number)"""
##################
# Changing this to the event number, rather than type
#j = int(row.eventno)
##################
#Get current timestep, and all earlier timesteps
EV = subset.loc[subset["event_number"] < j+1]
#Figure out how many rows to pad
rowstoadd = maxlen - len(EV)
### Padding: pre-event-padding ###
zeros = np.zeros((rowstoadd, EV.shape[1]))
zeros = pd.DataFrame(zeros, columns=cols)
#Add the zeros before the actual data
EV = pd.concat([zeros, EV], ignore_index=True, axis=0)
#Set an ID for the sequence
EV["SEQID"] = str(i)+"_"+str(j)
EV["caseid"] = str(i)
#Add the sequence to the dataset
dataset = dataset.append(EV)
timeend = time.time()
print("\n\nOutput length:",len(dataset))
return dataset
def CaseData(df):
print("Generating case data")
# step 0: Get case aggregate stats:
import pandas as pd
CaseData = pd.DataFrame(df['id'].value_counts())
CaseData.rename(columns={"id":"num_events"}, inplace=True)
CaseData["caseid"] = CaseData.index
CaseData.sort_values('caseid', inplace=True)
distinct_events = df.groupby('id').event.nunique()
CaseData["distinct_events"] = distinct_events
mindate = df[df.groupby('id').time.transform('min') == df['time']]
mindate = mindate.drop_duplicates(subset="id",keep="first")[["id","time"]]
mindate.rename(columns={"time":"start"}, inplace=True)
maxdate = df[df.groupby('id').time.transform('max') == df['time']]
maxdate = maxdate.drop_duplicates(subset="id",keep="first")[["id","time"]]
maxdate.rename(columns={"time":"stop"}, inplace=True)
Dates = pd.merge(left=mindate,right=maxdate, on="id")
Dates["start"] = pd.to_datetime(Dates["start"])
Dates["stop"] = pd.to_datetime(Dates["stop"])
import datetime as dt
Dates["caseduration_days"] = (Dates['stop'] - Dates['start']).dt.days
Dates["caseduration_seconds"] = (Dates['stop'] - Dates['start']).dt.seconds
Dates.rename(columns={"id":"caseid"}, inplace=True)
CaseData = pd.merge(left=CaseData,right=Dates, on="caseid")
print("done")
return CaseData
def GetCaseStats(df, padded_df, CaseData, y_t, y_a, y, prefixwindow=0, dateformat="%Y-%m-%d %H:%M:%S", drop_last_ev=True):
"""
What we want is a table that can be linked with the
observations in the training data.
This table has to be ordered, and then permuted the
exact same way as the X/y output of this pipeline.
Table:
- SEQID (level)
- Input table (prefixes):
- Event number (same as number of events in curr. SEQID)
- number of events in parent case (trace length)
- (other interesting attributes could be added here)
- Target values
- y
- y_t
- y_a
- Dataset level (aggregated):
- number of prefixes in dataset
- number of unique cases
- number of events
- average number of events per case
- (other stats from the survey paper)
EVERYTHING SHOULD BE POSSIBLE TO IDENTIFY
=> Read Verenich again on useful stats
=> pick the most interesting ones
"""
# step 1: get aggregate numbers by SEQID so that
# for each SEQID, there is an Event_num, Num_events (for the case)
#Get all SEQIDs
SEQIDS = padded_df["SEQID"].unique().tolist()
SEQIDS
allseqs = len(SEQIDS)
#logic to build output table
counter = 0
for i in SEQIDS:
print("Making casestats for SEQ:",counter,"of",allseqs)
counter = counter +1
prefix = padded_df.loc[padded_df["SEQID"]==i]
SEQID = i
#get case number and prefix number
caseno = int(prefix["SEQID"].loc[0].split("_")[0])
eventno = int(prefix["SEQID"].loc[0].split("_")[1])
if drop_last_ev == False:
#Add 1 to the index in the case all events are present
eventno = eventno - 1
#Get event-specific data:
case = df.loc[df["id"]==caseno]
event = case.loc[case["event_number"]==eventno]
eventtime = event["time_parsed"].dt.strftime(dateformat).tolist()[0]
#get case stats for current seuqnce/prefix:
casestats = CaseData.loc[CaseData["caseid"]==caseno]
casestats["prefix_date"] = eventtime
casestats["prefixes"] = casestats["num_events"]-1
casestats["prefixwindow"] = prefixwindow
casestats["prefix_number"] = eventno
casestats["SEQID"] = SEQID
#select what is interesting:
casestats = casestats[["SEQID","caseid","num_events",
"prefix_number","prefixes","prefixwindow","prefix_date",
"distinct_events","caseduration_days"]]
if counter == 1:
output = casestats
if counter > 1:
output = pd.concat([output,casestats],axis=0)
# step 2: Match the new table with target variables
#"""
output["y"] = y.tolist()
output["y_t"] = y_t.tolist()
output = pd.concat([output.reset_index(drop=True),
y_a.reset_index(drop=True).drop("caseid",axis=1)], axis=1)
return output
def SplitAndReshape(df, y_a, y_t, y, split_criterion, prefixlength, standardize=False):
import pandas as pd
import numpy as np
padded_df = df
#prepare for join
x = padded_df.reset_index(drop=True)
x["caseid"] = x["caseid"].astype('int')
y_a = y_a.reset_index(drop=True)
y_a["caseid"] = y_a["caseid"].astype('int')
y_t = pd.concat([y_a["caseid"], y_t], axis=1)
y_t["caseid"] = y_t["caseid"].astype('int')
y_t = y_t.reset_index(drop=True)
y = pd.concat([y_a["caseid"], y], axis=1)
y["caseid"] = y["caseid"].astype('int')
y = y.reset_index(drop=True)
split = split_criterion.reset_index(drop=True)
split["caseid"] = split["caseid"].astype('int')
####################################################
#Add the splitting cplumn (true = to trainset):
print(len(x))
X = pd.merge(left=x.reset_index(drop=True),
right=split.reset_index(drop=True),
how='left',
on = 'caseid')
print(len(X))
print(len(y_a))
y_a = pd.merge(left=y_a.reset_index(drop=True),
right=split.reset_index(drop=True),
how='left',
on = 'caseid')
print(len(y_a))
print(len(y_t))
y_t = pd.merge(left=y_t.reset_index(drop=True),
right=split.reset_index(drop=True),
how='left',
on = 'caseid')
print(len(y_t))
print(len(y))
y = pd.merge(left=y.reset_index(drop=True),
right=split.reset_index(drop=True),
how='left',
on = 'caseid')
print(len(y))
####################################################
#Subset based on the date divider, made in beginning:
X_train = X.loc[X["trainset"]==True]
X_test = X.loc[X["trainset"]==False]
y_a_train = y_a.loc[y_a["trainset"]==True]
y_a_test = y_a.loc[y_a["trainset"]==False]
y_t_train = y_t.loc[y_t["trainset"]==True]
y_t_test = y_t.loc[y_t["trainset"]==False]
y_train = y.loc[y["trainset"]==True]
y_test = y.loc[y["trainset"]==False]
#Drop system variables
X_train = X_train.drop(["caseid","SEQID","trainset"],axis=1)
X_test = X_test.drop(["caseid","SEQID","trainset"],axis=1)
y_a_train = y_a_train.drop(["caseid","trainset","event_number"], axis=1) #removing the ,"event_number" again
y_a_test = y_a_test.drop(["caseid","trainset","event_number"], axis=1)
y_t_train = y_t_train.drop(["caseid","trainset"], axis=1)
y_t_test = y_t_test.drop(["caseid","trainset"], axis=1)
y_train = y_train.drop(["caseid","trainset"], axis=1)
y_test = y_test.drop(["caseid","trainset"], axis=1)
#############################
X_train = X_train.values
X_test = X_test.values
y_t_train = y_t_train.values
y_t_test = y_t_test.values
y_a_train = y_a_train.values
y_a_test = y_a_test.values
y_train = y_train.values
y_test = y_test.values
#Normalize to mean 0, sd 1
if standardize == True:
#Standardize
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
"""
This is possibly unconventional, but for simplicity,
everything is just normalized to standard scores
"""
#Transform Train set:
sc.fit_transform(X_train)
X_train = sc.transform(X_train)
#Transform TEST SET as well:
sc.fit_transform(X_test)
X_test = sc.transform(X_test)
#############################################################
#Reshape:
#time, n, k
timesteps = prefixlength
observations = y_train.shape[0] #int(X.shape[0]/prefixlength)
k = X_train.shape[1]
#Reshape the data
X_train = X_train.reshape(observations, timesteps, k)
X_test = X_test.reshape(y_test.shape[0], timesteps, X_test.shape[1])
print("Trainset size (with prefixes of ",prefixlength,"):",y_train.shape[0])
print("Testset size (with prefixes of ",prefixlength,"):",y_test.shape[0])
print("==========================================")
#Check the shapes
print("X: observations, timesteps, vars")
print(X_train.shape)
print("y_train: observations, labels")
print(y_train.shape)
print("y_t_train: observations, labels")
print(y_t_train.shape)
print("y_a_train: observations, labels")
print(y_a_train.shape)
return X_train, X_test, y_t_train, y_t_test, y_a_train, y_a_test, y_train, y_test
"""
GENR the experiments:
"""
def fullfact_corrected(levels):
import numpy as np
import pandas as pd
"""
Create a general full-factorial design
Parameters
----------
levels : array-like
An array of integers that indicate the number of levels of each input
design factor.
Returns
-------
mat : 2d-array
The design matrix with coded levels 0 to k-1 for a k-level factor
Example
-------
::
>>> fullfact([2, 4, 3])
array([[ 0., 0., 0.],
[ 1., 0., 0.],
[ 0., 1., 0.],
[ 1., 1., 0.],
[ 0., 2., 0.],
[ 1., 2., 0.],
[ 0., 3., 0.],
[ 1., 3., 0.],
[ 0., 0., 1.],
[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 1., 1.],
[ 0., 2., 1.],
[ 1., 2., 1.],
[ 0., 3., 1.],
[ 1., 3., 1.],
[ 0., 0., 2.],
[ 1., 0., 2.],
[ 0., 1., 2.],
[ 1., 1., 2.],
[ 0., 2., 2.],
[ 1., 2., 2.],
[ 0., 3., 2.],
[ 1., 3., 2.]])
"""
n = len(levels) # number of factors
nb_lines = np.prod(levels) # number of trial conditions
H =
|
np.zeros((nb_lines, n))
|
numpy.zeros
|
import torch
from torch.utils.data import Dataset
import torch.nn.functional as F
import numpy as np
import pandas as pd
from torch.optim import Adam
from torch.utils.data import DataLoader
from torch.utils.data.dataloader import default_collate
import torch.nn as nn
class DatasetIEMOCAP(Dataset):
def __init__(self, classes, FaceR, AudioR, TextR, method='avg', mode='train', transform=None):
super(DatasetIEMOCAP, self).__init__()
self.Data = {}
self.DataKeys = []
# self.Face = True
# self.Audio = True
# self.Text = True
self.Transform = transform
self.Classes = classes
self.Mode = mode
self.Method = method
self.loadData(FaceR, AudioR, TextR)
def loadData(self, face_results, audio_results, text_results):
iterable_keys = []
if face_results is not None:
LFks = list(face_results.keys())
iterable_keys = LFks
else:
LFks = []
if audio_results is not None:
LAks = list(audio_results.keys())
iterable_keys = LAks if len(LFks) < len(LAks) else LFks
else:
LAks = []
if text_results is not None:
LTks = list(text_results.keys())
iterable_keys = LTks if len(LAks) < len(LTks) else LAks
else:
LTks = []
for k in iterable_keys:
if k in LFks:
FD = face_results[k][0]
else:
FD = None
if k in LAks:
AD = audio_results[k][0]
else:
AD = None
if k in LTks:
TD = text_results[k][0]
else:
TD = None
self.Data[k] = (text_results[k][1], FD, AD, TD)
self.DataKeys = list(self.Data.keys())
def __len__(self):
return len(self.DataKeys)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
avs = np.ones(3)
label = self.Data[self.DataKeys[idx]][0]
face = self.Data[self.DataKeys[idx]][1]
if face is None:
avs[0] = 0.
face = np.zeros(self.Classes['number'])
audio = self.Data[self.DataKeys[idx]][2]
if audio is None:
avs[1] = 0.
audio = np.zeros(self.Classes['number'])
text = self.Data[self.DataKeys[idx]][3]
if text is None:
avs[2] = 0.
text = np.zeros(self.Classes['number'])
sample = {'face': face,
'audio': audio,
'text': text,
'label': label,
'availabilities':avs,
'name': self.DataKeys[idx]}
if self.Transform:
sample = self.Transform(sample)
return sample
def make_shuffle(self):
random.shuffle(self.DataKeys)
class EmbraceNet(nn.Module):
def __init__(self, device, input_size_list, embracement_size=256, bypass_docking=False):
super(EmbraceNet, self).__init__()
self.device = device
self.input_size_list = input_size_list
self.embracement_size = embracement_size
self.bypass_docking = bypass_docking
if (not bypass_docking):
for i, input_size in enumerate(input_size_list):
setattr(self, 'docking_%d' % (i), nn.Linear(input_size, embracement_size))
def forward(self, input_list, availabilities=None, selection_probabilities=None):
# check input data
assert len(input_list) == len(self.input_size_list)
num_modalities = len(input_list)
batch_size = input_list[0].shape[0]
# docking layer
docking_output_list = []
if (self.bypass_docking):
docking_output_list = input_list
else:
for i, input_data in enumerate(input_list):
# print(i)
# print(input_data)
x = getattr(self, 'docking_%d' % (i))(input_data)
# print('pass')
x = nn.functional.relu(x)
docking_output_list.append(x)
# check availabilities
if (availabilities is None):
availabilities = torch.ones(batch_size, len(input_list), dtype=torch.float, device=self.device)
else:
availabilities = availabilities.float()
# adjust selection probabilities
if (selection_probabilities is None):
selection_probabilities = torch.ones(batch_size, len(input_list), dtype=torch.float, device=self.device)
selection_probabilities = torch.mul(selection_probabilities, availabilities)
probability_sum = torch.sum(selection_probabilities, dim=-1, keepdim=True)
selection_probabilities = torch.div(selection_probabilities, probability_sum)
# stack docking outputs
docking_output_stack = torch.stack(docking_output_list, dim=-1) # [batch_size, embracement_size, num_modalities]
# embrace
modality_indices = torch.multinomial(selection_probabilities, num_samples=self.embracement_size, replacement=True) # [batch_size, embracement_size]
modality_toggles = nn.functional.one_hot(modality_indices, num_classes=num_modalities).float() # [batch_size, embracement_size, num_modalities]
embracement_output_stack = torch.mul(docking_output_stack, modality_toggles)
embracement_output = torch.sum(embracement_output_stack, dim=-1) # [batch_size, embracement_size]
return embracement_output
class Wrapper(nn.Module):
def __init__(self, device, n_classes=6, size_list=[6,6,6],
embracesize=100, bypass_docking=False):
super(Wrapper, self).__init__()
self.NClasses = n_classes
self.Embrace = EmbraceNet(device=device,
input_size_list=size_list,
embracement_size=embracesize,
bypass_docking=bypass_docking)
self.classifier = False
if embracesize != n_classes:
self.classifier = True
# setattr(self, 'docking_%d' % (i), nn.Linear(input_size, embracement_size))
self.clf = nn.Sequential(nn.Linear(embracesize, n_classes),
nn.Softmax(dim=-1))
def forward(self, face, audio, text, avs):
out = self.Embrace([face, audio, text], availabilities=avs)
if self.classifier:
out = self.clf(out)
return out
class FusionTransformer(object):
def __init__(self, modename):
self.mode = modename
def __call__(self, sample):
facedata, audiodata, textdata = sample['face'], sample['audio'], sample['text']
label, avs, name = sample['label'], sample['availabilities'], sample['name']
# facedata = torch.flatten(torch.from_numpy(facedata))
# audiodata = F.softmax(torch.from_numpy(audiodata),dim=-1)
facedata = torch.from_numpy(facedata)
audiodata = torch.from_numpy(audiodata)
textdata = torch.from_numpy(textdata)
avs = torch.from_numpy(avs)
label =
|
np.asarray(label)
|
numpy.asarray
|
import numpy as np
from numpy.testing import assert_allclose, assert_, assert_raises
from nose import SkipTest
from .. import LombScargle, LombScargleAstroML, LombScargleFast
def _generate_data(N=100, period=1, theta=[10, 2, 3], dy=1, rseed=0):
"""Generate some data for testing"""
rng = np.random.RandomState(rseed)
t = 20 * period * rng.rand(N)
omega = 2 * np.pi / period
y = theta[0] + theta[1] * np.sin(omega * t) + theta[2] * np.cos(omega * t)
dy = dy * (0.5 + rng.rand(N))
y += dy * rng.randn(N)
return t, y, dy
def test_periodogram_auto(N=100, period=1):
t, y, dy = _generate_data(N, period)
period, score = LombScargle().fit(t, y, dy).periodogram_auto()
def check_model(Model):
p, s = Model().fit(t, y, dy).periodogram_auto()
assert_allclose(p, period)
assert_allclose(s, score, atol=1E-2)
for Model in [LombScargle, LombScargleAstroML, LombScargleFast]:
yield check_model, Model
def test_lomb_scargle_std_vs_centered(N=100, period=1):
"""Test whether the standard and generalized lomb-scargle
give close to the same results for non-centered data"""
t, y, dy = _generate_data(N, period)
periods = np.linspace(period - 0.5, period + 0.5, 100)
def check_model(Model):
P1 = Model(fit_offset=True).fit(t, y, dy).score(periods)
P2 = Model(fit_offset=False).fit(t, y, dy).score(periods)
rms = np.sqrt(np.mean((P1 - P2) ** 2))
assert_(rms < 0.005)
for Model in [LombScargle, LombScargleAstroML]:
yield check_model, Model
def test_dy_scalar(N=100, period=1):
t, y, dy = _generate_data(N, period)
# Make dy array all the same
dy[:] = dy.mean()
def check_model(Model):
assert_allclose(Model().fit(t, y, dy).periodogram_auto(),
Model().fit(t, y, dy[0]).periodogram_auto())
for Model in [LombScargle, LombScargleAstroML, LombScargleFast]:
yield check_model, Model
def test_vs_astroML(N=100, period=1):
t, y, dy = _generate_data(N, period)
periods = np.linspace(period - 0.5, period + 0.5, 100)
def compare_models(model1, model2):
P = [model.fit(t, y, dy).score(periods)
for model in (model1, model2)]
assert_allclose(P[0], P[1])
# standard lomb-scargle
for fit_offset in [True, False]:
yield (compare_models,
LombScargle(fit_offset=fit_offset),
LombScargleAstroML(fit_offset=fit_offset))
yield (compare_models,
LombScargleAstroML(fit_offset=fit_offset),
LombScargleAstroML(fit_offset=fit_offset, slow_version=True))
# Sanity check: make sure they work without centering data
yield (compare_models,
LombScargleAstroML(center_data=False),
LombScargle(center_data=False))
def test_construct_X(N=100, period=1):
"""
Check whether the X array is constructed correctly
"""
t, y, dy = _generate_data(N, period)
X = [LombScargle(Nterms=N, fit_offset=False).fit(t, y, dy)
._construct_X(period) for N in [1, 2, 3]]
Y = [LombScargle(Nterms=N, fit_offset=True).fit(t, y, dy)
._construct_X(period) for N in [0, 1, 2, 3]]
for i in range(3):
assert_allclose(X[i], Y[i + 1][:, 1:])
for i in range(4):
|
assert_allclose(Y[i][:, 0], 1 / dy)
|
numpy.testing.assert_allclose
|
"""@package vcu_constraints
Constraints for the updated Voce-Chaboche model for limited information opt.
"""
import numpy as np
from numdifftools import nd_algopy as nda
def g3_upper(x, constants, variables):
""" Constraint on the maximum ratio of stress at saturation to initial yield stress.
:param np.ndarray x: Parameters of updated Voce-Chaboche model.
:param dict constants: Defines the constants for the constraint.
:param dict variables: Defines constraint values that depend on x.
:return float: Value of the constraint in standard form.
"""
max_hardening_to_yield = constants['rho_yield_sup']
n_backstresses = int((len(x) - 6) / 2)
sy0 = x[1]
q_inf = x[2]
d_inf = x[4]
sum_ck_gammak = 0.
for i in range(n_backstresses):
c_ind = 6 + 2 * i
gamma_ind = 7 + 2 * i
sum_ck_gammak += x[c_ind] / x[gamma_ind]
return (sy0 + q_inf - d_inf + sum_ck_gammak) / sy0 - max_hardening_to_yield
def g3_lower(x, constants, variables):
""" Constraint on the minimum ratio of stress at saturation to initial yield stress.
:param np.ndarray x: Parameters of updated Voce-Chaboche model.
:param dict constants: Defines the constants for the constraint.
:param dict variables: Defines constraint values that depend on x.
:return float: Value of the constraint in standard form.
"""
min_hardening_to_yield = constants['rho_yield_inf']
n_backstresses = int((len(x) - 6) / 2)
sy0 = x[1]
q_inf = x[2]
d_inf = x[4]
sum_ck_gammak = 0.
for i in range(n_backstresses):
c_ind = 6 + 2 * i
gamma_ind = 7 + 2 * i
sum_ck_gammak += x[c_ind] / x[gamma_ind]
return -(sy0 + q_inf - d_inf + sum_ck_gammak) / sy0 + min_hardening_to_yield
def g4_upper(x, constants, variables):
""" Constraint on the maximum ratio of isotropic to combined isotropic/kinematic hardening at saturation.
:param np.ndarray x: Parameters of updated Voce-Chaboche model.
:param dict constants: Defines the constants for the constraint.
:param dict variables: Defines constraint values that depend on x.
:return float: Value of the constraint in standard form.
"""
iso_kin_ratio_max = constants['rho_iso_sup']
q_inf = x[2]
n_backstresses = int((len(x) - 6) / 2)
sum_ck_gammak = 0.
for i in range(n_backstresses):
c_ind = 6 + 2 * i
gamma_ind = 7 + 2 * i
sum_ck_gammak += x[c_ind] / x[gamma_ind]
return q_inf / (q_inf + sum_ck_gammak) - iso_kin_ratio_max
def g4_lower(x, constants, variables):
""" Constraint on the minimum ratio of isotropic to combined isotropic/kinematic hardening at saturation.
:param np.ndarray x: Parameters of updated Voce-Chaboche model.
:param dict constants: Defines the constants for the constraint.
:param dict variables: Defines constraint values that depend on x.
:return float: Value of the constraint in standard form.
"""
iso_kin_ratio_min = constants['rho_iso_inf']
q_inf = x[2]
n_backstresses = int((len(x) - 6) / 2)
sum_ck_gammak = 0.
for i in range(n_backstresses):
c_ind = 6 + 2 * i
gamma_ind = 7 + 2 * i
sum_ck_gammak += x[c_ind] / x[gamma_ind]
return -q_inf / (q_inf + sum_ck_gammak) + iso_kin_ratio_min
def g5_lower(x, constants, variables):
""" Constraint on the lower bound ratio of gamma_1 to b for the updated VC model.
:param np.ndarray x: Parameters of updated Voce-Chaboche model.
:param dict constants: Defines the constants for the constraint.
:param dict variables: Defines constraint values that depend on x.
:return float: Value of the constraint in standard form.
"""
b = x[3]
gamma1 = x[7]
gamma_b_ratio_min = constants['rho_gamma_b_inf']
return -gamma1 / b + gamma_b_ratio_min
def g5_upper(x, constants, variables):
""" Constraint on the upper bound ratio of gamma_1 to b for the updated VC model.
:param np.ndarray x: Parameters of updated Voce-Chaboche model.
:param dict constants: Defines the constants for the constraint.
:param dict variables: Defines constraint values that depend on x.
:return float: Value of the constraint in standard form.
"""
b = x[3]
gamma1 = x[7]
gamma_b_ratio_max = constants['rho_gamma_b_sup']
return gamma1 / b - gamma_b_ratio_max
def g6_lower(x, constants, variables):
""" Constraint on the lower bound ratio of gamma_1 to gamma_2 for the updated VC model.
:param np.ndarray x: Parameters of updated Voce-Chaboche model.
:param dict constants: Defines the constants for the constraint.
:param dict variables: Defines constraint values that depend on x.
:return float: Value of the constraint in standard form.
gamma_1 is always x[7] and gamma_2 is always x[9].
"""
gamma1 = x[7]
gamma2 = x[9]
gamma_1_2_ratio_min = constants['rho_gamma_12_inf']
return -gamma1 / gamma2 + gamma_1_2_ratio_min
def g6_upper(x, constants, variables):
""" Constraint on the upper bound ratio of gamma_1 to gamma_2 for the updated VC model.
:param np.ndarray x: Parameters of updated Voce-Chaboche model.
:param dict constants: Defines the constants for the constraint.
:param dict variables: Defines constraint values that depend on x.
:return float: Value of the constraint in standard form.
gamma_1 is always x[7] and gamma_2 is always x[9].
"""
gamma1 = x[7]
gamma2 = x[9]
gamma_1_2_ratio_max = constants['rho_gamma_12_sup']
return gamma1 / gamma2 - gamma_1_2_ratio_max
def g7_lower(x, constants, variables):
""" Constraint on the lower bound ratio of D_inf to the total hardening for the updated VC model.
:param np.ndarray x: Parameters of updated Voce-Chaboche model.
:param dict constants: Defines the constants for the constraint.
:param dict variables: Defines constraint values that depend on x.
:return float: Value of the constraint in standard form.
"""
q_inf = x[2]
d_inf = x[4]
n_backstresses = int((len(x) - 6) / 2)
sum_c_gamma = 0.
for i in range(n_backstresses):
sum_c_gamma += x[6 + 2 * i] / x[7 + 2 * i]
d_ratio_min = constants['rho_d_inf']
return -d_inf / (q_inf + sum_c_gamma) + d_ratio_min
def g7_upper(x, constants, variables):
""" Constraint on the upper bound ratio of D_inf to the total hardening for the updated VC model.
:param np.ndarray x: Parameters of updated Voce-Chaboche model.
:param dict constants: Defines the constants for the constraint.
:param dict variables: Defines constraint values that depend on x.
:return float: Value of the constraint in standard form.
"""
q_inf = x[2]
d_inf = x[4]
n_backstresses = int((len(x) - 6) / 2)
sum_c_gamma = 0.
for i in range(n_backstresses):
sum_c_gamma += x[6 + 2 * i] / x[7 + 2 * i]
d_ratio_max = constants['rho_d_sup']
return d_inf / (q_inf + sum_c_gamma) - d_ratio_max
""" Gradients and Hessians of the above constraints. """
def g3_lower_gradient(x, constants, variables):
fun_wrapper = lambda x1: g3_lower(x1, constants, variables)
grad_fun = nda.Gradient(fun_wrapper)
grad = grad_fun(x)
return np.reshape(grad, (-1, 1))
def g3_upper_gradient(x, constants, variables):
fun_wrapper = lambda x1: g3_upper(x1, constants, variables)
grad_fun = nda.Gradient(fun_wrapper)
grad = grad_fun(x)
return np.reshape(grad, (-1, 1))
def g4_lower_gradient(x, constants, variables):
fun_wrapper = lambda x1: g4_lower(x1, constants, variables)
grad_fun = nda.Gradient(fun_wrapper)
grad = grad_fun(x)
return
|
np.reshape(grad, (-1, 1))
|
numpy.reshape
|
import glob, os, sys
import numpy as np
import scipy.integrate as integrate
import scipy.ndimage
import matplotlib.pylab as plt
from scipy.interpolate import griddata
def integra(x,y,method='simps'):
fullHyst = x[-1] == x[0]
if fullHyst:
middle=int(np.round(x.size/2))
top=int(np.round(x.size))
if method=='simps':
branchup=integrate.simps(y[0:middle],x[0:middle])
branchdown=integrate.simps(y[middle:top],x[middle:top])
else:
branchup=integrate.trapz(y[0:middle],x[0:middle])
branchdown=integrate.trapz(y[middle:top],x[middle:top])
result=(-branchdown-branchup)/2
else:
if method=='simps':
branchdown=integrate.simps(y,x)
branchup=integrate.simps(-np.flipud(y),-np.flipud(x))
else:
branchdown=integrate.trapz(y,x)
branchup=integrate.trapz(-np.flipud(y),-np.flipud(x))
result=(-branchdown+branchup)/2
return result
class Dist:
"""
This class load the data given a filename
and gives the possibility to generate a plot with the uploaded data
"""
def __init__(self, filename, is_avoid_zeros=True):
# It is better to make general x,y arrays
if not os.path.isfile(filename):
filename='%s %s' %(filename.split(".dat",1)[0],".DAT")
if not os.path.isfile(filename):
print("%s file do not exists" % (filename))
self.x, self.y=[0,0]
else:
self.x, self.y = np.loadtxt(filename, comments="#", unpack=True, usecols=(0,1))
if is_avoid_zeros:
s_len = len(self.x)
self.x, self.y = self.avoid_zeros()
print("%i lines deleted" % (s_len - len(self.x)))
self.x, self.y = self.avoid_rep()
else:
self.x, self.y =
|
np.loadtxt(filename, comments="#", unpack=True, usecols=(0,1))
|
numpy.loadtxt
|
# -*- coding: utf-8 -*-
# This work is part of the Core Imaging Library (CIL) developed by CCPi
# (Collaborative Computational Project in Tomographic Imaging), with
# substantial contributions by UKRI-STFC and University of Manchester.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import numpy
import warnings
from functools import reduce
from numbers import Number
import ctypes, platform
from ctypes import util
import math
from cil.utilities.multiprocessing import NUM_THREADS
# check for the extension
if platform.system() == 'Linux':
dll = 'libcilacc.so'
elif platform.system() == 'Windows':
dll_file = 'cilacc.dll'
dll = util.find_library(dll_file)
elif platform.system() == 'Darwin':
dll = 'libcilacc.dylib'
else:
raise ValueError('Not supported platform, ', platform.system())
cilacc = ctypes.cdll.LoadLibrary(dll)
def find_key(dic, val):
"""return the key of dictionary dic given the value"""
return [k for k, v in dic.items() if v == val][0]
def message(cls, msg, *args):
msg = "{0}: " + msg
for i in range(len(args)):
msg += " {%d}" %(i+1)
args = list(args)
args.insert(0, cls.__name__ )
return msg.format(*args )
class ImageGeometry(object):
RANDOM = 'random'
RANDOM_INT = 'random_int'
CHANNEL = 'channel'
VERTICAL = 'vertical'
HORIZONTAL_X = 'horizontal_x'
HORIZONTAL_Y = 'horizontal_y'
@property
def shape(self):
shape_dict = {ImageGeometry.CHANNEL: self.channels,
ImageGeometry.VERTICAL: self.voxel_num_z,
ImageGeometry.HORIZONTAL_Y: self.voxel_num_y,
ImageGeometry.HORIZONTAL_X: self.voxel_num_x}
shape = []
for label in self.dimension_labels:
shape.append(shape_dict[label])
return tuple(shape)
@shape.setter
def shape(self, val):
print("Deprecated - shape will be set automatically")
@property
def spacing(self):
spacing_dict = {ImageGeometry.CHANNEL: self.channel_spacing,
ImageGeometry.VERTICAL: self.voxel_size_z,
ImageGeometry.HORIZONTAL_Y: self.voxel_size_y,
ImageGeometry.HORIZONTAL_X: self.voxel_size_x}
spacing = []
for label in self.dimension_labels:
spacing.append(spacing_dict[label])
return tuple(spacing)
@property
def length(self):
return len(self.dimension_labels)
@property
def dimension_labels(self):
labels_default = DataOrder.CIL_IG_LABELS
shape_default = [ self.channels - 1, #channels default is 1
self.voxel_num_z,
self.voxel_num_y,
self.voxel_num_x]
try:
labels = list(self.__dimension_labels)
except AttributeError:
labels = labels_default.copy()
for i, x in enumerate(shape_default):
if x == 0:
try:
labels.remove(labels_default[i])
except ValueError:
pass #if not in custom list carry on
return tuple(labels)
@dimension_labels.setter
def dimension_labels(self, val):
self.set_labels(val)
def set_labels(self, labels):
labels_default = DataOrder.CIL_IG_LABELS
#check input and store. This value is not used directly
if labels is not None:
for x in labels:
if x not in labels_default:
raise ValueError('Requested axis are not possible. Accepted label names {},\ngot {}'\
.format(labels_default,labels))
self.__dimension_labels = tuple(labels)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
if self.voxel_num_x == other.voxel_num_x \
and self.voxel_num_y == other.voxel_num_y \
and self.voxel_num_z == other.voxel_num_z \
and self.voxel_size_x == other.voxel_size_x \
and self.voxel_size_y == other.voxel_size_y \
and self.voxel_size_z == other.voxel_size_z \
and self.center_x == other.center_x \
and self.center_y == other.center_y \
and self.center_z == other.center_z \
and self.channels == other.channels \
and self.channel_spacing == other.channel_spacing \
and self.dimension_labels == other.dimension_labels:
return True
return False
@property
def dtype(self):
return self.__dtype
@dtype.setter
def dtype(self, val):
self.__dtype = val
def __init__(self,
voxel_num_x=0,
voxel_num_y=0,
voxel_num_z=0,
voxel_size_x=1,
voxel_size_y=1,
voxel_size_z=1,
center_x=0,
center_y=0,
center_z=0,
channels=1,
**kwargs):
self.voxel_num_x = int(voxel_num_x)
self.voxel_num_y = int(voxel_num_y)
self.voxel_num_z = int(voxel_num_z)
self.voxel_size_x = float(voxel_size_x)
self.voxel_size_y = float(voxel_size_y)
self.voxel_size_z = float(voxel_size_z)
self.center_x = center_x
self.center_y = center_y
self.center_z = center_z
self.channels = channels
self.channel_labels = None
self.channel_spacing = 1.0
self.dimension_labels = kwargs.get('dimension_labels', None)
self.dtype = kwargs.get('dtype', numpy.float32)
def subset(self, dimensions=None, **kw):
'''Returns a new sliced and/or reshaped ImageGeometry'''
if not kw.get('suppress_warning', False):
warnings.warn('Subset has been deprecated and will be removed in following version. Use reorder() and get_slice() instead',
DeprecationWarning)
if dimensions is None:
return self.get_slice(**kw)
else:
if len(dimensions) != len(self.dimension_labels):
raise ValueError('The axes list for subset must contain the dimension_labels {0} got {1}'.format(self.dimension_labels, dimensions))
temp = self.copy()
temp.set_labels(dimensions)
return temp
def get_slice(self,channel=None, vertical=None, horizontal_x=None, horizontal_y=None):
'''
Returns a new ImageGeometry of a single slice of in the requested direction.
'''
geometry_new = self.copy()
if channel is not None:
geometry_new.channels = 1
try:
geometry_new.channel_labels = [self.channel_labels[channel]]
except:
geometry_new.channel_labels = None
if vertical is not None:
geometry_new.voxel_num_z = 0
if horizontal_y is not None:
geometry_new.voxel_num_y = 0
if horizontal_x is not None:
geometry_new.voxel_num_x = 0
return geometry_new
def get_order_by_label(self, dimension_labels, default_dimension_labels):
order = []
for i, el in enumerate(default_dimension_labels):
for j, ek in enumerate(dimension_labels):
if el == ek:
order.append(j)
break
return order
def get_min_x(self):
return self.center_x - 0.5*self.voxel_num_x*self.voxel_size_x
def get_max_x(self):
return self.center_x + 0.5*self.voxel_num_x*self.voxel_size_x
def get_min_y(self):
return self.center_y - 0.5*self.voxel_num_y*self.voxel_size_y
def get_max_y(self):
return self.center_y + 0.5*self.voxel_num_y*self.voxel_size_y
def get_min_z(self):
if not self.voxel_num_z == 0:
return self.center_z - 0.5*self.voxel_num_z*self.voxel_size_z
else:
return 0
def get_max_z(self):
if not self.voxel_num_z == 0:
return self.center_z + 0.5*self.voxel_num_z*self.voxel_size_z
else:
return 0
def clone(self):
'''returns a copy of the ImageGeometry'''
return copy.deepcopy(self)
def copy(self):
'''alias of clone'''
return self.clone()
def __str__ (self):
repres = ""
repres += "Number of channels: {0}\n".format(self.channels)
repres += "channel_spacing: {0}\n".format(self.channel_spacing)
if self.voxel_num_z > 0:
repres += "voxel_num : x{0},y{1},z{2}\n".format(self.voxel_num_x, self.voxel_num_y, self.voxel_num_z)
repres += "voxel_size : x{0},y{1},z{2}\n".format(self.voxel_size_x, self.voxel_size_y, self.voxel_size_z)
repres += "center : x{0},y{1},z{2}\n".format(self.center_x, self.center_y, self.center_z)
else:
repres += "voxel_num : x{0},y{1}\n".format(self.voxel_num_x, self.voxel_num_y)
repres += "voxel_size : x{0},y{1}\n".format(self.voxel_size_x, self.voxel_size_y)
repres += "center : x{0},y{1}\n".format(self.center_x, self.center_y)
return repres
def allocate(self, value=0, **kwargs):
'''allocates an ImageData according to the size expressed in the instance
:param value: accepts numbers to allocate an uniform array, or a string as 'random' or 'random_int' to create a random array or None.
:type value: number or string, default None allocates empty memory block, default 0
:param dtype: numerical type to allocate
:type dtype: numpy type, default numpy.float32
'''
dtype = kwargs.get('dtype', self.dtype)
if kwargs.get('dimension_labels', None) is not None:
raise ValueError("Deprecated: 'dimension_labels' cannot be set with 'allocate()'. Use 'geometry.set_labels()' to modify the geometry before using allocate.")
out = ImageData(geometry=self.copy(),
dtype=dtype,
suppress_warning=True)
if isinstance(value, Number):
# it's created empty, so we make it 0
out.array.fill(value)
else:
if value == ImageGeometry.RANDOM:
seed = kwargs.get('seed', None)
if seed is not None:
numpy.random.seed(seed)
if dtype in [ numpy.complex , numpy.complex64 , numpy.complex128 ] :
r = numpy.random.random_sample(self.shape) + 1j * numpy.random.random_sample(self.shape)
out.fill(r)
else:
out.fill(numpy.random.random_sample(self.shape))
elif value == ImageGeometry.RANDOM_INT:
seed = kwargs.get('seed', None)
if seed is not None:
numpy.random.seed(seed)
max_value = kwargs.get('max_value', 100)
r = numpy.random.randint(max_value,size=self.shape, dtype=numpy.int32)
out.fill(numpy.asarray(r, dtype=self.dtype))
elif value is None:
pass
else:
raise ValueError('Value {} unknown'.format(value))
return out
class ComponentDescription(object):
r'''This class enables the creation of vectors and unit vectors used to describe the components of a tomography system
'''
def __init__ (self, dof):
self.__dof = dof
@staticmethod
def CreateVector(val):
try:
vec = numpy.asarray(val, dtype=numpy.float32).reshape(len(val))
except:
raise ValueError("Can't convert to numpy array")
return vec
@staticmethod
def CreateUnitVector(val):
vec = ComponentDescription.CreateVector(val)
dot_product = vec.dot(vec)
if abs(dot_product)>1e-8:
vec = (vec/numpy.sqrt(dot_product))
else:
raise ValueError("Can't return a unit vector of a zero magnitude vector")
return vec
def length_check(self, val):
try:
val_length = len(val)
except:
raise ValueError("Vectors for {0}D geometries must have length = {0}. Got {1}".format(self.__dof,val))
if val_length != self.__dof:
raise ValueError("Vectors for {0}D geometries must have length = {0}. Got {1}".format(self.__dof,val))
class PositionVector(ComponentDescription):
r'''This class creates a component of a tomography system with a position attribute
'''
@property
def position(self):
try:
return self.__position
except:
raise AttributeError
@position.setter
def position(self, val):
self.length_check(val)
self.__position = ComponentDescription.CreateVector(val)
class DirectionVector(ComponentDescription):
r'''This class creates a component of a tomography system with a direction attribute
'''
@property
def direction(self):
try:
return self.__direction
except:
raise AttributeError
@direction.setter
def direction(self, val):
self.length_check(val)
self.__direction = ComponentDescription.CreateUnitVector(val)
class PositionDirectionVector(PositionVector, DirectionVector):
r'''This class creates a component of a tomography system with position and direction attributes
'''
pass
class Detector1D(PositionVector):
r'''This class creates a component of a tomography system with position and direction_x attributes used for 1D panels
'''
@property
def direction_x(self):
try:
return self.__direction_x
except:
raise AttributeError
@direction_x.setter
def direction_x(self, val):
self.length_check(val)
self.__direction_x = ComponentDescription.CreateUnitVector(val)
class Detector2D(PositionVector):
r'''This class creates a component of a tomography system with position, direction_x and direction_y attributes used for 2D panels
'''
@property
def direction_x(self):
try:
return self.__direction_x
except:
raise AttributeError
@property
def direction_y(self):
try:
return self.__direction_y
except:
raise AttributeError
def set_direction(self, x, y):
self.length_check(x)
x = ComponentDescription.CreateUnitVector(x)
self.length_check(y)
y = ComponentDescription.CreateUnitVector(y)
dot_product = x.dot(y)
if not numpy.isclose(dot_product, 0):
raise ValueError("vectors detector.direction_x and detector.direction_y must be orthogonal")
self.__direction_y = y
self.__direction_x = x
class SystemConfiguration(object):
r'''This is a generic class to hold the description of a tomography system
'''
SYSTEM_SIMPLE = 'simple'
SYSTEM_OFFSET = 'offset'
SYSTEM_ADVANCED = 'advanced'
@property
def dimension(self):
if self._dimension == 2:
return '2D'
else:
return '3D'
@dimension.setter
def dimension(self,val):
if val != 2 and val != 3:
raise ValueError('Can set up 2D and 3D systems only. got {0}D'.format(val))
else:
self._dimension = val
@property
def geometry(self):
return self.__geometry
@geometry.setter
def geometry(self,val):
if val != AcquisitionGeometry.CONE and val != AcquisitionGeometry.PARALLEL:
raise ValueError('geom_type = {} not recognised please specify \'cone\' or \'parallel\''.format(val))
else:
self.__geometry = val
def __init__(self, dof, geometry):
"""Initialises the system component attributes for the acquisition type
"""
self.dimension = dof
self.geometry = geometry
if geometry == AcquisitionGeometry.PARALLEL:
self.ray = DirectionVector(dof)
else:
self.source = PositionVector(dof)
if dof == 2:
self.detector = Detector1D(dof)
self.rotation_axis = PositionVector(dof)
else:
self.detector = Detector2D(dof)
self.rotation_axis = PositionDirectionVector(dof)
def __str__(self):
"""Implements the string representation of the system configuration
"""
raise NotImplementedError
def __eq__(self, other):
"""Implements the equality check of the system configuration
"""
raise NotImplementedError
def update_reference_frame(self):
"""Returns the components of the system in the reference frame of the rotation axis at position 0
"""
raise NotImplementedError
def get_centre_slice(self):
"""Returns the 2D system configuration corersponding to the centre slice
"""
raise NotImplementedError
def calculate_magnification(self):
r'''Calculates the magnification of the system using the source to rotate axis,
and source to detector distance along the direction.
:return: returns [dist_source_center, dist_center_detector, magnification], [0] distance from the source to the rotate axis, [1] distance from the rotate axis to the detector, [2] magnification of the system
:rtype: list
'''
raise NotImplementedError
def system_description(self):
r'''Returns `simple` if the the geometry matches the default definitions with no offsets or rotations,
\nReturns `offset` if the the geometry matches the default definitions with centre-of-rotation or detector offsets
\nReturns `advanced` if the the geometry has rotated or tilted rotation axis or detector, can also have offsets
'''
raise NotImplementedError
def copy(self):
'''returns a copy of SystemConfiguration'''
return copy.deepcopy(self)
class Parallel2D(SystemConfiguration):
r'''This class creates the SystemConfiguration of a parallel beam 2D tomographic system
:param ray_direction: A 2D vector describing the x-ray direction (x,y)
:type ray_direction: list, tuple, ndarray
:param detector_pos: A 2D vector describing the position of the centre of the detector (x,y)
:type detector_pos: list, tuple, ndarray
:param detector_direction_x: A 2D vector describing the direction of the detector_x (x,y)
:type detector_direction_x: list, tuple, ndarray
:param rotation_axis_pos: A 2D vector describing the position of the axis of rotation (x,y)
:type rotation_axis_pos: list, tuple, ndarray
'''
def __init__ (self, ray_direction, detector_pos, detector_direction_x, rotation_axis_pos):
"""Constructor method
"""
super(Parallel2D, self).__init__(dof=2, geometry = 'parallel')
#source
self.ray.direction = ray_direction
#detector
self.detector.position = detector_pos
self.detector.direction_x = detector_direction_x
#rotate axis
self.rotation_axis.position = rotation_axis_pos
def update_reference_frame(self):
r'''Transforms the system origin to the rotate axis
'''
self.detector.position -= self.rotation_axis.position
self.rotation_axis.position = [0,0]
def align_reference_frame(self):
r'''Transforms the system origin to the rotate axis and aligns the ray along the positive Y direction
'''
self.update_reference_frame()
ray_vec = -self.ray.direction
axis_rotation = numpy.eye(2)
if numpy.allclose(ray_vec,[0,-1]):
pass
elif numpy.allclose(ray_vec,[0,1]):
axis_rotation[0][0] = -1
axis_rotation[1][1] = -1
else:
theta = math.atan2(ray_vec[0],-ray_vec[1])
axis_rotation[0][0] = axis_rotation[1][1] = math.cos(theta)
axis_rotation[0][1] = math.sin(theta)
axis_rotation[1][0] = -math.sin(theta)
rotation_matrix = numpy.matrix(axis_rotation)
self.ray.direction = rotation_matrix.dot(self.ray.direction.reshape(2,1))
self.detector.position = rotation_matrix.dot(self.detector.position.reshape(2,1))
self.detector.direction_x = rotation_matrix.dot(self.detector.direction_x.reshape(2,1))
def system_description(self):
r'''Returns `simple` if the the geometry matches the default definitions with no offsets or rotations,
\nReturns `offset` if the the geometry matches the default definitions with centre-of-rotation or detector offsets
\nReturns `advanced` if the the geometry has rotated or tilted rotation axis or detector, can also have offsets
'''
new = self.copy()
new.align_reference_frame()
try:
det_unit = ComponentDescription.CreateUnitVector(new.detector.position)
except ValueError: #pass test if detector is on origin
det_unit = [0,1]
if not numpy.allclose(new.ray.direction,[0,1]) or\
not numpy.allclose(new.detector.direction_x,[1,0]):
return SystemConfiguration.SYSTEM_ADVANCED
elif not numpy.allclose(det_unit,[0,1]):
return SystemConfiguration.SYSTEM_OFFSET
else:
return SystemConfiguration.SYSTEM_SIMPLE
def __str__(self):
def csv(val):
return numpy.array2string(val, separator=', ')
repres = "2D Parallel-beam tomography\n"
repres += "System configuration:\n"
repres += "\tRay direction: {0}\n".format(csv(self.ray.direction))
repres += "\tRotation axis position: {0}\n".format(csv(self.rotation_axis.position))
repres += "\tDetector position: {0}\n".format(csv(self.detector.position))
repres += "\tDetector direction x: {0}\n".format(csv(self.detector.direction_x))
return repres
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
if numpy.allclose(self.ray.direction, other.ray.direction) \
and numpy.allclose(self.detector.position, other.detector.position)\
and numpy.allclose(self.detector.direction_x, other.detector.direction_x)\
and numpy.allclose(self.rotation_axis.position, other.rotation_axis.position):
return True
return False
def get_centre_slice(self):
return self
def calculate_magnification(self):
return [None, None, 1.0]
class Parallel3D(SystemConfiguration):
r'''This class creates the SystemConfiguration of a parallel beam 3D tomographic system
:param ray_direction: A 3D vector describing the x-ray direction (x,y,z)
:type ray_direction: list, tuple, ndarray
:param detector_pos: A 3D vector describing the position of the centre of the detector (x,y,z)
:type detector_pos: list, tuple, ndarray
:param detector_direction_x: A 3D vector describing the direction of the detector_x (x,y,z)
:type detector_direction_x: list, tuple, ndarray
:param detector_direction_y: A 3D vector describing the direction of the detector_y (x,y,z)
:type detector_direction_y: list, tuple, ndarray
:param rotation_axis_pos: A 3D vector describing the position of the axis of rotation (x,y,z)
:type rotation_axis_pos: list, tuple, ndarray
:param rotation_axis_direction: A 3D vector describing the direction of the axis of rotation (x,y,z)
:type rotation_axis_direction: list, tuple, ndarray
'''
def __init__ (self, ray_direction, detector_pos, detector_direction_x, detector_direction_y, rotation_axis_pos, rotation_axis_direction):
"""Constructor method
"""
super(Parallel3D, self).__init__(dof=3, geometry = 'parallel')
#source
self.ray.direction = ray_direction
#detector
self.detector.position = detector_pos
self.detector.set_direction(detector_direction_x, detector_direction_y)
#rotate axis
self.rotation_axis.position = rotation_axis_pos
self.rotation_axis.direction = rotation_axis_direction
def update_reference_frame(self):
r'''Transforms the system origin to the rotate axis with z direction aligned to the rotate axis direction
'''
#shift detector
self.detector.position = (self.detector.position - self.rotation_axis.position)
self.rotation_axis.position = [0,0,0]
#calculate rotation matrix to align rotation axis direction with z
a = self.rotation_axis.direction
if numpy.allclose(a,[0,0,1]):
return
elif numpy.allclose(a,[0,0,-1]):
axis_rotation =
|
numpy.eye(3)
|
numpy.eye
|
"""
Three inference algorithms: reveal, Best-fit and decision tree.
Reference:
[1] <NAME>, <NAME>, <NAME> (2010). BoolNet -- an R package for generation, reconstruction
and analysis of Boolean networks. Bioinformatics 26(10):1378-1380.
"""
import itertools
from sklearn import tree
import numpy as np
from gene_network import Genes
from sim import state_to_index
def _entropy(X: np.ndarray):
"""
Compute the entropy of an array X, where each row is a sample.
:param X: an 1d or 2d array
:return:
"""
if X.ndim == 1:
data = X
else: # 2d
data = np.ascontiguousarray(X).view(np.dtype((np.void, X.dtype.itemsize * X.shape[1])))
u, counts = np.unique(data, return_counts=True)
p = counts / counts.sum() # probability of each occurrence
return -(p * np.log(p)).sum()
def _removeInconsistency(X, y):
"""
Choose an identical label for each input pattern such that the training set can have an extension
:param X: input of the training set
:param y: output of the training set
:return: X and y with conflicting labels modified to be consistent
"""
index_table = [0] * (2 ** len(Genes))
for i in range(len(y)):
index = state_to_index(X[i, :])
if y[i] == 1:
index_table[index] += 1
else:
index_table[index] -= 1
for i in range(len(y)):
index = state_to_index(X[i, :])
if index_table[index] >= 0:
y[i] = 1
else:
y[i] = 0
def reveal(X, y):
"""
REVEAL algorithm, infer the regulators for a gene given the training set
:param X: input in 2d array, where each row represents a network state
:param y: output in a vector, where each element means the state of a gene
:return: a list containing the regulators, or None if no matching regulators found
"""
y = np.copy(y)
_removeInconsistency(X, y)
for k in range(1, len(Genes) + 1):
for c in itertools.combinations(Genes, k): # for each combination
iX = X[:, c]
hx = _entropy(iX)
hxy = _entropy(np.hstack((iX, y.reshape((y.size, 1)))))
if hx == hxy:
return set(c)
return None
def best_fit(X, y, candidate_genes=Genes):
"""
Best-fit extension algorithm, infer the regulators for a gene given the training set
:param X: input in 2d array, where each row represents a network state
:param y: output in a vector, where each element means the state of a gene
:param candidate_genes: the candidates for regulator test
:return: a list containing the regulators
"""
min_error = len(y) + 1
min_c = None # the regulator list corresponding to the minimum classification error
for k in range(1, len(candidate_genes) + 1):
for c in itertools.combinations(candidate_genes, k):
count_0 = [0] * (2 ** k)
count_1 = [0] * (2 ** k)
data = X[:, c] # enum can be used as numbers
for i in range(data.shape[0]):
index = state_to_index(data[i, :])
if y[i] == 0:
count_0[index] += 1 # default weight is 1
else:
count_1[index] += 1
# now for this regulator combination c, we choose its "true" label for each input to minimize
# misclassification errors
error = 0
for i in range(len(count_0)):
if count_0[i] > count_1[i]: # true label is 0, count_1 is misclassified by our function
error += count_1[i]
else:
error += count_0[i]
if error < min_error:
min_error = error
min_c = c
if min_error == 0: # choose the one with minimum k, once min error is 0, terminate enumeration
return set(min_c)
return set(min_c)
def decision_tree_infer(X, y, importance_threshold=0):
"""
Decision tree for Boolean network inference (DTBNI), infer the regulators for a gene given the training set
:param X: input in 2d array, where each row represents a network state
:param y: output in a vector, where each element means the state of a gene
:param importance_threshold: critetiorn for the regulator selection. 0: choose the ones with non-zero importance.
:return: a list containing the regulators
"""
y =
|
np.copy(y)
|
numpy.copy
|
import networkx as nx
import json
import random
import numpy as np
import operator as op
import math
import operator
class NetWorkHelper():
def __init__(self, strat_dict, G, targets=None):
self.node_count = nx.number_of_nodes(G)
self.red_budget = strat_dict['red_budget']
self.black_budget = strat_dict['black_budget']
self.red_dist = self.node_count * [0]
self.black_dist = self.node_count * [0]
self.red_strat = strat_dict['red_strat']
self.black_strat = strat_dict['black_strat']
self.strat_dict = strat_dict
self.G = G
self.targets = targets
# TODO: Add distribution of red/black balls in network toggle
def create_network(self, initial_condition):
G = self.G
# Initializes urn dictionary
urns = {}
prev_exposure = []
# Set initial condition of urn
# TODO: Add more distribtutions
if initial_condition['dist'] == 'random':
# Creates random distribution of starting red and black balls between all nodes
red_dist = self.more_random_initial_dist(initial_condition['red'])
black_dist = self.more_random_initial_dist(initial_condition['black'])
elif initial_condition['dist'] == 'equal':
red_dist = self.equally_divide(initial_condition['red'])
black_dist = self.equally_divide(initial_condition['black'])
# Add distributions to urn
for key in G.nodes.keys():
urns[key] = {'red': red_dist[key], 'black': black_dist[key]}
# Adds unique urn to each node in network
nx.set_node_attributes(G, name="urns", values=urns)
nx.set_node_attributes(G, name="prev_draw", values=-1)
nx.set_node_attributes(G, name="prev_exposure", values=prev_exposure) # not being used
# nx.set_node_attributes(G, name="prev_deltar", values=0)
# nx.set_node_attributes(G, name="prev_deltab", values=0)
nx.set_node_attributes(G, name="entropy", values=[])
# #Set the intitial budget distributions
# G.node[0]['prev_deltar'] = self.red_budget
# G.node[0]['prev_deltab'] = self.black_budget
# self.black_dist = self.equally_divide(self.black_budget)
# self.red_dist = self.equally_divide(self.red_budget)
#Set initial exposure rate
self.G = G
self.set_prev_exposure()
# construct superurn
for node in G.node.items():
self.construct_super_urn(node)
return self.G
def run_time_step(self):
if self.black_strat == 'uniform':
curing_dist = self.equally_divide(self.black_budget)
if self.black_strat == 'random':
curing_dist = self.constrained_sample_sum_pos(self.black_budget)
if self.black_strat == 'gradient':
curing_dist = self.black_gradient_descent()
if self.black_strat == 'centrality_ratio':
curing_dist = self.black_centrality_ratio_strat()
if self.black_strat == 'regression':
curing_dist = self.run_regression()
if self.black_strat == 'follow_bot':
curing_dist = self.follow_bot()
if self.black_strat == 'threshold':
curing_dist = self.threshold()
if self.black_strat == 'centrality_threshold':
curing_dist = self.centrality_threshold()
if self.black_strat == 'pure_centrality_threshold':
curing_dist = self.pure_centrality_threshold()
if self.black_strat == 'pure_centrality':
curing_dist = self.pure_centrality()
if self.black_strat == 'entropy':
curing_dist = self.entropy()
if self.black_strat == 'entropy2':
curing_dist = self.entropy2()
if self.black_strat == 'entropy3':
curing_dist = self.entropy3()
if self.entropy() == 'entropy_ratio':
curing_dist = self.entropy_ratio()
if self.black_strat == 'bot':
curing_dist = self.bot_strat_black()
if self.black_strat == 'pure_closeness':
curing_dist = self.pure_closeness()
if self.black_strat == 'pure_degree':
curing_dist = self.pure_degree()
if self.black_strat == 'pure_exposure':
curing_dist = self.pure_exposure()
if self.red_strat == 'uniform':
infecting_dist = self.equally_divide(self.red_budget)
if self.red_strat == 'random':
infecting_dist = self.constrained_sample_sum_pos(self.red_budget)
if self.red_strat == 'gradient':
infecting_dist = self.red_gradient_descent()
if self.red_strat == 'centrality_ratio':
infecting_dist = self.red_centrality_ratio_strat()
if self.red_strat == 'bot':
infecting_dist = self.bot_strat()
self.set_distributions(curing_dist, infecting_dist)
def equally_divide(self, total):
if self.node_count <= 0:
return []
else:
dist = [total / self.node_count + 1] * (total % self.node_count) + [total / self.node_count] * \
(self.node_count - total % self.node_count)
return [int(i) for i in dist]
def constrained_sample_sum_pos(self, total):
"""Return a randomly chosen list of n positive integers summing to total.
Each such list is equally likely to occur."""
dividers = sorted(random.sample(range(1, total), self.node_count - 1))
random_dist = [a - b for a, b in zip(dividers + [total], [0] + dividers)]
return random_dist
def more_random_initial_dist(self, total):
dist = self.constrained_sample_sum_pos(total)
# randomly pick 20% of total nodes and add 50% balls
choices = random.choices(dist, k = int(self.node_count / 5))
for i in range(len(dist)):
if i in choices:
dist[i] += 50
return dist
# Not being used
def set_prev_exposure(self):
for node in self.G.node.items():
total_red = 0
total_balls = 0
total_red += node[1]['urns']['red']
total_balls += node[1]['urns']['red'] + node[1]['urns']['black']
neighbors = nx.all_neighbors(self.G, node[0])
for neighbor_node in neighbors:
total_red += self.G.node[neighbor_node]['urns']['red']
total_balls += self.G.node[neighbor_node]['urns']['red'] + self.G.node[neighbor_node]['urns']['black']
node[1]['prev_exposure'] = total_red/total_balls
def construct_super_urn(self, node):
# Construct super urns
super_urn = {'red': node[1]['urns']['red'], 'black': node[1]['urns']['black']}
neighbors = nx.all_neighbors(self.G, node[0])
for neighbor_node in neighbors:
super_urn['red'] += self.G.node[neighbor_node]['urns']['red']
super_urn['black'] += self.G.node[neighbor_node]['urns']['black']
# network_infection(Si,n) = proportion of the red balls in the node's super urn
network_infection = super_urn['red'] / (super_urn['red'] + super_urn['black'])
super_urn['network_infection'] = network_infection
node[1]['network_infection'] = network_infection
node[1]['super_urn'] = super_urn
return super_urn
def record_entropy(self):
for index, node in self.G.node.items():
p = 1 - node['network_infection']
#node['entropy'].append(-p*math.log(p))
node['entropy'].append(-p*math.log(p)-(1-p)*math.log(1-p))
def record_wasted_budget(self, node, waste):
node['wasted_budget'] = waste
def find_superurn_exp_red(self, infecting_strat = None):
if infecting_strat == None:
infecting_strat = self.red_dist
exp_red = {}
for node in self.G.node.items():
exp_red_temp = 0
exp_red_temp += node[1]['super_urn']['network_infection']*infecting_strat[node[0]]
neighbors = nx.all_neighbors(self.G, node[0])
for nd in neighbors:
exp_red_temp += self.G.node[nd]['super_urn']['network_infection']*infecting_strat[nd]
exp_red[node[0]] = exp_red_temp
return exp_red
def find_superurn_exp_black(self, curing_strat = None):
if curing_strat == None:
curing_strat = self.black_dist
exp_black = {}
for node in self.G.node.items():
exp_black_temp = 0
exp_black_temp += (1 - node[1]['super_urn']['network_infection'])*curing_strat[node[0]]
neighbors = nx.all_neighbors(self.G, node[0])
for nd in neighbors:
exp_black_temp += (1 - self.G.node[nd]['super_urn']['network_infection'])*curing_strat[nd]
exp_black[node[0]] = exp_black_temp
return exp_black
def calc_node_partial_exposure_black(self, node, exp_red, exp_black):
node_exp = node[1]['super_urn']['network_infection']
neighbors = nx.all_neighbors(self.G, node[0])
all_nodes = []
all_nodes.append(node[0])
partial_exp_sum = 0
for x in neighbors:
all_nodes.append(x)
for nd in all_nodes:
numerator = -(self.G.node[nd]['super_urn']['red'] + exp_red[nd])*(1-node_exp)
denominator = (self.G.node[nd]['super_urn']['black'] + self.G.node[nd]['super_urn']['red'] + exp_red[nd] + exp_black[nd])**2
partial_exp_sum += (numerator/denominator)
return partial_exp_sum
def calc_node_partial_exposure_red(self, node, exp_red, exp_black):
node_exp = node[1]['super_urn']['network_infection']
neighbors = nx.all_neighbors(self.G, node[0])
all_nodes = []
all_nodes.append(node[0])
partial_exp_sum = 0
for x in neighbors:
all_nodes.append(x)
for nd in all_nodes:
numerator = -(self.G.node[nd]['super_urn']['black'] + exp_black[nd])*(node_exp)
denominator = (self.G.node[nd]['super_urn']['black'] + self.G.node[nd]['super_urn']['red'] + exp_red[nd] + exp_black[nd])**2
partial_exp_sum += (numerator/denominator)
return partial_exp_sum
#Also doesnt always output full budget worth of distribution
def black_gradient_descent(self):
#This stays constant at every step of the gradient descent right now
#due to us only changing our curing strategy
exp_red = self.find_superurn_exp_red()
step = 0.1
curing_dist = self.node_count * [0]
curing_dist[0] = self.black_budget
for k in range(0,100):
next_partial_exposures = []
next_strat = []
for node in self.G.node.items():
next_strat.append(0)
exp_black = self.find_superurn_exp_black(curing_dist)
for node in self.G.node.items():
next_partial_exposures.append(self.calc_node_partial_exposure_black(node, exp_red, exp_black))
min_index = next_partial_exposures.index(min(next_partial_exposures))
next_strat[min_index] = self.black_budget
temp_array = list( map(op.sub, next_strat, curing_dist) )
temp_array = [x*step for x in temp_array]
curing_dist = list( map(op.add, curing_dist, temp_array) )
for index,x in enumerate(curing_dist):
curing_dist[index] = round(x)
return curing_dist
def red_gradient_descent(self):
#This stays constant at every step of the gradient descent right now
#due to us only changing our curing strategy
exp_black = self.find_superurn_exp_black()
step = 0.1
infecting_dist = self.node_count * [0]
infecting_dist[0] = self.red_budget
for k in range(0,100):
next_partial_exposures = []
next_strat = []
for node in self.G.node.items():
next_strat.append(0)
exp_red = self.find_superurn_exp_red(infecting_dist)
for node in self.G.node.items():
next_partial_exposures.append(self.calc_node_partial_exposure_red(node, exp_red, exp_black))
min_index = next_partial_exposures.index(min(next_partial_exposures))
next_strat[min_index] = self.red_budget
temp_array = list( map(op.sub, next_strat, infecting_dist) )
temp_array = [x*step for x in temp_array]
infecting_dist = list( map(op.add, infecting_dist, temp_array) )
for index,x in enumerate(infecting_dist):
infecting_dist[index] = round(x)
return infecting_dist
def get_centrality_infection(self):
# closeness centrality
closeness_centrality_dict = nx.closeness_centrality(self.G)
centrality_infection_sum = 0
for index, node in self.G.node.items():
degree = self.G.degree(index)
closeness_centrality = closeness_centrality_dict[index]
infection_rate = node['super_urn']['network_infection']
##### parameters: degree, closeness, infection
centrality_infection = degree*closeness_centrality*infection_rate
node['centrality_infection'] = centrality_infection
centrality_infection_sum += centrality_infection
return centrality_infection_sum
#TODO: Fix this: This does not always output a distribution that adds to budget.
#We should probably also run the entire strat with one call and not give it node by node
def black_centrality_ratio_strat(self):
infection_array = np.array(list(nx.get_node_attributes(self.G,'network_infection').values()))
centrality_mult_array = np.array(list(nx.get_node_attributes(self.G,'centrality_multiplier').values()))
curing_dist = np.multiply(infection_array, centrality_mult_array)
# curing_dist = centrality_mult_array
curing_dist = curing_dist / sum(curing_dist)
curing_dist =
|
np.around(curing_dist * self.black_budget)
|
numpy.around
|
from __future__ import print_function, division
import sys,os
quspin_path = os.path.join(os.getcwd(),"../")
sys.path.insert(0,quspin_path)
from quspin.basis import spin_basis_1d,spin_basis_general
from quspin.operators import hamiltonian,quantum_operator,quantum_LinearOperator
from itertools import product
import cProfile,os
import numpy as np
import scipy.sparse as sp
dtypes = [np.float64,np.complex128]
def eps(dtype1,dtype2):
return 100*max(np.finfo(dtype1).eps,np.finfo(dtype2).eps)
Lx = 3
Ly = 2
N = Lx*Ly
m = None
i = np.arange(N)
x = i%Lx
y = i//Lx
tx = (x+1)%Lx + y*Lx
ty = x + ((y+1)%Ly)*Lx
px = x[::-1] + y*Lx
py = x + y[::-1]*Lx
z = -(1+i)
basis_full = spin_basis_general(N,pauli=False)
basis_pcon = spin_basis_general(N,pauli=False,m=0.0)
basis_pcon_symm = spin_basis_general(N,pauli=False,m=0.0,tx=(tx,0),ty=(ty,0),px=(px,0),py=(py,0),z=(z,0))
Jzz_list = [[0.0,i,tx[i]] for i in range(N)]+[[0.0,i,ty[i]] for i in range(N)]
Jxy_list = [[0.5,i,tx[i]] for i in range(N)]+[[0.5,i,ty[i]] for i in range(N)]
static = [["+-",Jxy_list],["-+",Jxy_list],["zz",Jzz_list]]
for b in [basis_full,basis_pcon,basis_pcon_symm]:
for dtype1,dtype2 in product(dtypes,dtypes):
H = hamiltonian(static,[],basis=b,dtype=dtype1)
H_op = quantum_LinearOperator(static,basis=b,dtype=dtype1)
for i in range(10):
v = np.random.uniform(-1,1,size=(b.Ns,)) + 1j*np.random.uniform(-1,1,size=(b.Ns,))
v /= np.linalg.norm(v)
v1 = H.dot(v)
v2 = H_op.dot(v)
atol = eps(dtype1,dtype2)
np.testing.assert_allclose(v1,v2,atol=atol)
v1 = H.T.dot(v)
v2 = H_op.T.dot(v)
atol = eps(dtype1,dtype2)
np.testing.assert_allclose(v1,v2,atol=atol)
v1 = H.conj().dot(v)
v2 = H_op.conj().dot(v)
atol = eps(dtype1,dtype2)
np.testing.assert_allclose(v1,v2,atol=atol)
v1 = H.H.dot(v)
v2 = H_op.H.dot(v)
atol = eps(dtype1,dtype2)
np.testing.assert_allclose(v1,v2,atol=atol)
v = np.random.uniform(-1,1,size=(b.Ns,10)) + 1j*np.random.uniform(-1,1,size=(b.Ns,10))
v /= np.linalg.norm(v)
v1 = H.dot(v)
v2 = H_op.dot(v)
atol = eps(dtype1,dtype2)
np.testing.assert_allclose(v1,v2,atol=atol)
v1 = H.T.dot(v)
v2 = H_op.T.dot(v)
atol = eps(dtype1,dtype2)
np.testing.assert_allclose(v1,v2,atol=atol)
v1 = H.conj().dot(v)
v2 = H_op.conj().dot(v)
atol = eps(dtype1,dtype2)
|
np.testing.assert_allclose(v1,v2,atol=atol)
|
numpy.testing.assert_allclose
|
import torch
import torch.nn as nn
import os
import time
import uuid
import numpy as np
import random
import string
import statistics
from agents.GTN_base import GTN_Base
from envs.env_factory import EnvFactory
from utils import calc_abs_param_sum
class GTN_Master(GTN_Base):
def __init__(self, config, bohb_id=-1, bohb_working_dir=None):
super().__init__(bohb_id)
self.config = config
self.device = config["device"]
self.env_name = config['env_name']
gtn_config = config["agents"]["gtn"]
self.max_iterations = gtn_config["max_iterations"]
self.agent_name = gtn_config["agent_name"]
self.num_workers = gtn_config["num_workers"]
self.step_size = gtn_config["step_size"]
self.nes_step_size = gtn_config["nes_step_size"]
self.weight_decay = gtn_config["weight_decay"]
self.score_transform_type = gtn_config["score_transform_type"]
self.time_mult = gtn_config["time_mult"]
self.time_max = gtn_config["time_max"]
self.time_sleep_master = gtn_config["time_sleep_master"]
self.quit_when_solved = gtn_config["quit_when_solved"]
self.synthetic_env_type = gtn_config["synthetic_env_type"]
self.unsolved_weight = gtn_config["unsolved_weight"]
# make it faster on single PC
if gtn_config["mode"] == 'single':
self.time_sleep_master /= 10
# to store results from workers
self.time_elapsed_list = [None] * self.num_workers # for debugging
self.score_list = [None]*self.num_workers
self.score_orig_list = [None]*self.num_workers # for debugging
self.score_transform_list = [None]*self.num_workers
# to keep track of the reference virtual env
self.env_factory = EnvFactory(config)
if self.synthetic_env_type == 0:
generate_synthetic_env_fn = self.env_factory.generate_virtual_env
elif self.synthetic_env_type == 1:
generate_synthetic_env_fn = self.env_factory.generate_reward_env
else:
raise NotImplementedError("Unknown synthetic_env_type value: " + str(self.synthetic_env_type))
self.synthetic_env_orig = generate_synthetic_env_fn(print_str='GTN_Base: ')
self.synthetic_env_list = [generate_synthetic_env_fn(print_str='GTN_Master: ') for _ in range(self.num_workers)]
self.eps_list = [generate_synthetic_env_fn(print_str='GTN_Master: ') for _ in range(self.num_workers)]
# for early out
self.avg_runtime = None
self.real_env = self.env_factory.generate_real_env()
# to store models
if bohb_working_dir:
self.model_dir = str(os.path.join(bohb_working_dir, 'GTN_models_' + self.env_name))
else:
self.model_dir = str(os.path.join(os.getcwd(), "results", 'GTN_models_' + self.env_name))
self.model_name = self.get_model_file_name(self.env_name + '_' + ''.join(random.choices(string.ascii_uppercase + string.digits, k = 6)) + '.pt')
self.best_score = -float('Inf')
os.makedirs(self.model_dir, exist_ok=True)
print('Starting GTN Master with bohb_id {}'.format(bohb_id))
# self.bohb_next_run_counter = 0
def get_model_file_name(self, file_name):
return os.path.join(self.model_dir, file_name)
def run(self):
mean_score_orig_list = []
for it in range(self.max_iterations):
t1 = time.time()
print('-- Master: Iteration ' + str(it) + ' ' + str(time.time()-t1))
print('-- Master: write worker inputs' + ' ' + str(time.time()-t1))
self.write_worker_inputs(it)
print('-- Master: read worker results' + ' ' + str(time.time()-t1))
self.read_worker_results()
mean_score = np.mean(self.score_orig_list)
mean_score_orig_list.append(mean_score)
solved_flag = self.save_good_model(mean_score)
if solved_flag and self.quit_when_solved:
print('ENV SOLVED')
# self.bohb_next_run_counter += 1
break
print('-- Master: rank transform' + ' ' + str(time.time()-t1))
self.score_transform()
print('-- Master: update env' + ' ' + str(time.time()-t1))
self.update_env()
print('-- Master: print statistics' + ' ' + str(time.time()-t1))
self.print_statistics(it=it, time_elapsed=time.time()-t1)
print('Master quitting')
self.print_statistics(it=-1, time_elapsed=-1)
# error handling
if len(mean_score_orig_list) > 0:
return np.mean(self.score_orig_list), mean_score_orig_list, self.model_name
else:
return 1e9, mean_score_orig_list, self.model_name
def save_good_model(self, mean_score):
if self.synthetic_env_orig.is_virtual_env():
if mean_score > self.real_env.get_solved_reward() and mean_score > self.best_score:
self.save_model()
self.best_score = mean_score
return True
else:
# we save all models and select the best from the log
# whether we can solve an environment is irrelevant for reward_env since we optimize for speed here
if mean_score > self.best_score:
self.save_model()
self.best_score = mean_score
return False
def save_model(self):
save_dict = {}
save_dict['model'] = self.synthetic_env_orig.state_dict()
save_dict['config'] = self.config
save_path = os.path.join(self.model_dir, self.model_name)
print('save model: ' + str(save_path))
torch.save(save_dict, save_path)
def calc_worker_timeout(self):
if self.time_elapsed_list[0] is None:
return self.time_max
else:
return statistics.mean(self.time_elapsed_list) * self.time_mult
def write_worker_inputs(self, it):
timeout = self.calc_worker_timeout()
print('timeout: ' + str(timeout))
for id in range(self.num_workers):
file_name = self.get_input_file_name(id=id)
check_file_name = self.get_input_check_file_name(id=id)
# wait until worker has deleted the file (i.e. acknowledged the previous input)
while os.path.isfile(file_name):
time.sleep(self.time_sleep_master)
time.sleep(self.time_sleep_master)
# if we are not using bohb, shut everything down after last iteration
if self.bohb_id < 0:
quit_flag = it == self.max_iterations-1
else:
quit_flag = False
data = {}
data['timeout'] = timeout
data['quit_flag'] = quit_flag
data['config'] = self.config
data['synthetic_env_orig'] = self.synthetic_env_orig.state_dict()
# data['bohb_next_run_counter'] = self.bohb_next_run_counter
torch.save(data, file_name)
torch.save({}, check_file_name)
def read_worker_results(self):
for id in range(self.num_workers):
file_name = self.get_result_file_name(id)
check_file_name = self.get_result_check_file_name(id)
# wait until worker has finished calculations
while not os.path.isfile(check_file_name):
time.sleep(self.time_sleep_master)
data = torch.load(file_name)
self.time_elapsed_list[id] = data['time_elapsed']
self.score_list[id] = data['score']
self.eps_list[id].load_state_dict(data['eps'])
self.score_orig_list[id] = data['score_orig']
self.synthetic_env_list[id].load_state_dict(data['synthetic_env']) # for debugging
os.remove(check_file_name)
os.remove(file_name)
def score_transform(self):
scores = np.asarray(self.score_list)
scores_orig = np.asarray(self.score_orig_list)
if self.score_transform_type == 0:
# convert [1, 0, 5] to [0.2, 0, 1]
scores = (scores - min(scores)) / (max(scores)-min(scores)+1e-9)
elif self.score_transform_type == 1:
# convert [1, 0, 5] to [0.5, 0, 1]
s = np.argsort(scores)
n = len(scores)
for i in range(n):
scores[s[i]] = i / (n-1)
elif self.score_transform_type == 2 or self.score_transform_type == 3:
# fitness shaping from "Natural Evolution Strategies" (Wierstra 2014) paper, either with zero mean (2) or without (3)
lmbda = len(scores)
s = np.argsort(-scores)
for i in range(lmbda):
scores[s[i]] = i + 1
scores = scores.astype(float)
for i in range(lmbda):
scores[i] = max(0, np.log(lmbda / 2 + 1) - np.log(scores[i]))
scores = scores / sum(scores)
if self.score_transform_type == 2:
scores -= 1 / lmbda
scores /= max(scores)
elif self.score_transform_type == 4:
# consider single best eps
scores_tmp = np.zeros(scores.size)
scores_tmp[np.argmax(scores)] = 1
scores = scores_tmp
elif self.score_transform_type == 5:
# consider single best eps that is better than the average
avg_score_orig = np.mean(scores_orig)
scores_idx = np.where(scores > avg_score_orig + 1e-6,1,0) # 1e-6 to counter numerical errors
if sum(scores_idx) > 0:
scores_tmp = np.zeros(scores.size)
scores_tmp[np.argmax(scores)] = 1
scores = scores_tmp
else:
scores = scores_idx
elif self.score_transform_type == 6 or self.score_transform_type == 7:
# consider all eps that are better than the average, normalize weight sum to 1
avg_score_orig =
|
np.mean(scores_orig)
|
numpy.mean
|
"""
"T#" test cases from https://archimede.dm.uniba.it/~bvpsolvers/testsetbvpsolvers/?page_id=27, [1]_.
"R#" test cases from https://doi.org/10.2514/6.2019-3666, [2]_.
References
----------
.. [1] <NAME> and <NAME>. "A fortran test set for boundary value problem solvers."
AIP Conference Proceedings. 1648(1):020009, 2015.
.. [2] <NAME> and <NAME>. "Numerical Algorithms for Solving Boundary-Value Problemson Reduced
Dimensional Manifolds." AIAA Aviation 2019 Forum. 2019.
"""
import pytest
import itertools
from beluga.numeric.data_classes.Trajectory import Trajectory
from beluga.numeric.bvp_solvers import Shooting
import numpy as np
import copy
from scipy.special import erf
# Test the shooting solver for each algorithm listed below
ALGORITHMS = ['Armijo', 'SLSQP']
EASY = [1]
MEDIUM = [1e-1]
HARD = [1e-2]
VHARD = [1e-3]
tol = 1e-3
@pytest.mark.parametrize("algorithm, const", itertools.product(ALGORITHMS, HARD))
def test_t1(algorithm, const):
def odefun(y, _, k):
return y[1], y[0] / k[0]
def odejac(_, __, k):
df_dy = np.array([[0, 1], [1 / k[0], 0]])
df_dp = np.empty((2, 0))
return df_dy, df_dp
def bcfun(y0, yf, _, __, ___):
return y0[0] - 1, yf[0]
algo = Shooting(odefun, None, bcfun, algorithm=algorithm)
algo.set_derivative_jacobian(odejac)
solinit = Trajectory()
solinit.t = np.linspace(0, 1, 2)
solinit.y = np.array([[0, 1], [0, 1]])
solinit.const = np.array([const])
sol = algo.solve(solinit)['sol']
e1 = (np.exp(-sol.t / np.sqrt(sol.const)) - np.exp((sol.t - 2) / np.sqrt(sol.const))) / (
1 - np.exp(-2.e0 / np.sqrt(sol.const)))
e2 = (1. / (sol.const ** (1 / 2) * np.exp(sol.t / sol.const ** (1 / 2))) + np.exp(
(sol.t - 2) / sol.const ** (1 / 2)) / sol.const ** (1 / 2)) / (1 / np.exp(2 / sol.const ** (1 / 2)) - 1)
assert all(e1 - sol.y[:, 0] < tol)
assert all(e2 - sol.y[:, 1] < tol)
@pytest.mark.parametrize("algorithm, const", itertools.product(ALGORITHMS, MEDIUM))
def test_t2(algorithm, const):
def odefun(y, _, k):
return y[1], y[1] / k[0]
def odejac(_, __, k):
df_dy = np.array([[0, 1], [0, 1 / k[0]]])
df_dp = np.empty((2, 0))
return df_dy, df_dp
def bcfun(y0, yf, _, __, ___):
return y0[0] - 1, yf[0]
algo = Shooting(odefun, None, bcfun, algorithm=algorithm)
algo.set_derivative_jacobian(odejac)
solinit = Trajectory()
solinit.t = np.linspace(0, 1, 2)
solinit.y = np.array([[0, 1], [0, 1]])
solinit.const = np.array([const])
sol = algo.solve(solinit)['sol']
e1 = (1.e0 - np.exp((sol.t - 1.e0) / sol.const)) / (1.e0 - np.exp(-1.e0 / sol.const))
e2 = np.exp((sol.t - 1) / sol.const) / (sol.const * (1 / np.exp(1 / sol.const) - 1))
assert all(e1 - sol.y[:, 0] < tol)
assert all(e2 - sol.y[:, 1] < tol)
@pytest.mark.parametrize("algorithm, const", itertools.product(ALGORITHMS, VHARD))
def test_t3(algorithm, const):
def odefun(y, _, k):
return (2 * y[1], 2 * (-(2 + np.cos(np.pi * y[2])) * y[1] + y[0] - (1 + k[0] * np.pi * np.pi) * np.cos(
np.pi * y[2]) - (2 + np.cos(np.pi * y[2])) * np.pi * np.sin(np.pi * y[2])) / k[0], 2)
def odejac(y, _, k):
df_dy = np.array([[0, 2, 0],
[2 / k[0], -(2 * np.cos(np.pi * y[2]) + 4)/k[0],
(2*np.pi**2 * np.sin(np.pi * y[2])**2 + 2 * np.pi*np.sin(np.pi*y[2])*(k[0]*np.pi**2 + 1)
- 2*np.pi**2*np.cos(np.pi*y[2])*(np.cos(np.pi*y[2]) + 2)
+ 2*y[1]*np.pi*np.sin(np.pi*y[2]))/k[0]],
[0, 0, 0]], dtype=np.float)
df_dp = np.empty((3, 0))
return df_dy, df_dp
def bcfun(y0, yf, _, __, ___):
return y0[0] + 1, yf[0] + 1, y0[2] + 1
algo = Shooting(odefun, None, bcfun, algorithm=algorithm)
algo.set_derivative_jacobian(odejac)
solinit = Trajectory()
solinit.t = np.linspace(0, 1, 2)
solinit.y = np.array([[-1, 0, -1], [-1, 0, 1]])
solinit.const = np.array([const])
sol = algo.solve(solinit)['sol']
e1 = np.cos(np.pi * sol.y[:, 2])
e2 = -np.pi * np.sin(np.pi * sol.y[:, 2])
assert all(e1 - sol.y[:, 0] < tol)
assert all(e2 - sol.y[:, 1] < tol)
@pytest.mark.parametrize("algorithm, const", itertools.product(ALGORITHMS, HARD))
def test_t4(algorithm, const):
def odefun(y, _, k):
return 2 * y[1], 2 * (((1 + k[0]) * y[0] - y[1]) / k[0]), 2
def odejac(_, __, k):
df_dy = np.array([[0, 2, 0], [2 * (1 + k[0]) / k[0], 2 * (-1) / k[0], 0], [0, 0, 0]])
df_dp = np.empty((3, 0))
return df_dy, df_dp
def bcfun(y0, yf, _, __, k):
return y0[0] - 1 - np.exp(-2), yf[0] - 1 - np.exp(-2 * (1 + k[0]) / k[0]), y0[2] + 1
algo = Shooting(odefun, None, bcfun, algorithm=algorithm)
algo.set_derivative_jacobian(odejac)
solinit = Trajectory()
solinit.t = np.linspace(0, 1, 2)
solinit.y = np.array([[-1, 0, -1], [-1, 0, 1]])
solinit.const = np.array([const])
sol = algo.solve(solinit)['sol']
e1 = np.exp(sol.y[:, 2] - 1) + np.exp(-((1 + sol.const[0]) * (1 + sol.y[:, 2]) / sol.const[0]))
e2 = np.exp(sol.y[:, 2] - 1) - (sol.const[0] + 1) / (
sol.const[0] * np.exp((sol.y[:, 2] + 1) * (sol.const[0] + 1) / sol.const[0]))
assert all(e1 - sol.y[:, 0] < tol)
assert all(e2 - sol.y[:, 1] < tol)
@pytest.mark.parametrize("algorithm, const", itertools.product(ALGORITHMS, MEDIUM))
def test_t5(algorithm, const):
def odefun(y, _, k):
return (2 * y[1], 2 * ((y[0] + y[2] * y[1] - (1 + k[0] * np.pi ** 2) * np.cos(np.pi * y[2])
+ y[2] * np.pi * np.sin(np.pi * y[2])) / k[0]), 2)
def odejac(y, _, k):
df_dy = np.array([[0, 2, 0], [2 / k[0], 2 * y[2] / k[0],
(2 * (y[1] + np.pi * np.sin(np.pi * y[2]) + np.pi * np.sin(np.pi * y[2])
* (k * np.pi ** 2 + 1) + np.pi * np.pi * y[2]
* np.cos(np.pi * y[2]))) / k[0]], [0, 0, 0]])
df_dp = np.empty((3, 0))
return df_dy, df_dp
def bcfun(y0, yf, _, __, ___):
return y0[0] + 1, yf[0] + 1, y0[2] + 1
algo = Shooting(odefun, None, bcfun, algorithm=algorithm)
algo.set_derivative_jacobian(odejac)
solinit = Trajectory()
solinit.t = np.linspace(0, 1, 2)
solinit.y = np.array([[-1, 0, -1], [-1, 0, 1]])
solinit.const = np.array([const])
sol = algo.solve(solinit)['sol']
e1 = np.cos(np.pi * sol.y[:, 2])
e2 = -np.pi * np.sin(np.pi * sol.y[:, 2])
assert all(e1 - sol.y[:, 0] < tol)
assert all(e2 - sol.y[:, 1] < tol)
@pytest.mark.parametrize("algorithm, const", itertools.product(ALGORITHMS, MEDIUM))
def test_t6(algorithm, const):
def odefun(y, _, k):
return (2 * y[1], 2 * ((-y[2] * y[1] - k[0] * np.pi ** 2 * np.cos(np.pi * y[2]) - np.pi * y[2] * np.sin(
np.pi * y[2])) / k[0]), 2)
def odejac(y, _, k):
df_dy = np.array([[0, 2, 0],
[0, -2 * y[2] / k[0],
-(2 * (y[1] + np.pi * np.sin(np.pi * y[2]) - k[0] * np.pi ** 3 * np.sin(np.pi * y[2])
+ np.pi ** 2 * y[2] * np.cos(np.pi * y[2]))) / k[0]], [0, 0, 0]])
df_dp = np.empty((3, 0))
return df_dy, df_dp
def bcfun(y0, yf, _, __, ___):
return y0[0] + 2, yf[0], y0[2] + 1
algo = Shooting(odefun, None, bcfun, algorithm=algorithm)
algo.set_derivative_jacobian(odejac)
solinit = Trajectory()
solinit.t = np.linspace(0, 1, 2)
solinit.y = np.array([[-1, 0, -1], [-1, 0, 1]])
solinit.const = np.array([const])
sol = algo.solve(solinit)['sol']
e1 = np.cos(np.pi * sol.y[:, 2]) + erf(sol.y[:, 2] / np.sqrt(2 * sol.const[0])) / erf(1 / np.sqrt(2 * sol.const[0]))
e2 = np.sqrt(2) / (np.sqrt(np.pi) * np.sqrt(sol.const[0]) * np.exp(sol.y[:, 2] ** 2 / (2 * sol.const[0])) * erf(
np.sqrt(2) / (2 * np.sqrt(sol.const[0])))) - np.pi * np.sin(np.pi * sol.y[:, 2])
assert all(e1 - sol.y[:, 0] < tol)
assert all(e2 - sol.y[:, 1] < tol)
@pytest.mark.parametrize("algorithm, const", itertools.product(ALGORITHMS, MEDIUM))
def test_t7(algorithm, const):
def odefun(y, _, k):
return 2 * y[1], 2 * ((-y[2] * y[1] + y[0] - (1.0e0 + k[0] * np.pi ** 2) * np.cos(np.pi * y[2]) - np.pi *
y[2] * np.sin(np.pi * y[2])) / k[0]), 2
def odejac(y, _, k):
df_dy = np.array([[0, 2, 0],
[2 / k[0], -2 * y[2] / k[0],
-(2 * (y[1] + np.pi * np.sin(np.pi * y[2]) + np.pi ** 2 * y[2] * np.cos(np.pi * y[2])
- np.pi * np.sin(np.pi * y[2]) * (k[0] * np.pi ** 2 + 1))) / k[0]],
[0, 0, 0]])
df_dp = np.empty((3, 0))
return df_dy, df_dp
def bcfun(y0, yf, _, __, ___):
return y0[0] + 1, yf[0] - 1, y0[2] + 1
algo = Shooting(odefun, None, bcfun, algorithm=algorithm)
algo.set_derivative_jacobian(odejac)
solinit = Trajectory()
solinit.t = np.linspace(0, 1, 2)
solinit.y = np.array([[-1, 0, -1], [1, 0, 1]])
solinit.const = np.array([const])
sol = algo.solve(solinit)['sol']
e1 = np.cos(np.pi * sol.y[:, 2]) + sol.y[:, 2] + (
sol.y[:, 2] * erf(sol.y[:, 2] / np.sqrt(2.0e0 * sol.const[0]))
+ np.sqrt(2 * sol.const[0] / np.pi) * np.exp(-sol.y[:, 2] ** 2 / (2 * sol.const[0]))) / (
erf(1.0e0 / np.sqrt(2 * sol.const[0])) + np.sqrt(2.0e0 * sol.const[0] / np.pi)
* np.exp(-1 / (2 * sol.const[0])))
e2 = erf((np.sqrt(2) * sol.y[:, 2]) / (2 * np.sqrt(sol.const[0]))) / (
erf(np.sqrt(2) / (2 * np.sqrt(sol.const[0]))) + (np.sqrt(2) * np.sqrt(sol.const[0])) / (
np.sqrt(np.pi) * np.exp(1 / (2 * sol.const[0])))) - np.pi * np.sin(np.pi * sol.y[:, 2]) + 1
assert all(e1 - sol.y[:, 0] < tol)
assert all(e2 - sol.y[:, 1] < tol)
@pytest.mark.parametrize("algorithm, const", itertools.product(ALGORITHMS, HARD))
def test_t8(algorithm, const):
def odefun(y, _, k):
return y[1], (-y[1] / k[0]), 1
def odejac(_, __, k):
df_dy = np.array([[0, 1, 0], [0, -1 / k[0], 0], [0, 0, 0]])
df_dp = np.empty((3, 0))
return df_dy, df_dp
def bcfun(y0, yf, _, __, ___):
return y0[0] - 1, yf[0] - 2, y0[2]
algo = Shooting(odefun, None, bcfun, algorithm=algorithm)
algo.set_derivative_jacobian(odejac)
solinit = Trajectory()
solinit.t = np.linspace(0, 1, 2)
solinit.y = np.array([[1, 0, -1], [2, 0, 1]])
solinit.const = np.array([const])
sol = algo.solve(solinit)['sol']
e1 = (2 - np.exp(-1 / sol.const[0]) - np.exp(-sol.y[:, 2] / sol.const[0])) / (1 - np.exp(-1 / sol.const[0]))
e2 = -1 / (sol.const[0] * np.exp(sol.y[:, 2] / sol.const[0]) * (1 / np.exp(1 / sol.const[0]) - 1))
assert all(e1 - sol.y[:, 0] < tol)
assert all(e2 - sol.y[:, 1] < tol)
@pytest.mark.parametrize("algorithm, const", itertools.product(ALGORITHMS, HARD))
def test_t9(algorithm, const):
def odefun(y, _, k):
return 2 * y[1], 2 * (-(4 * y[2] * y[1] + 2 * y[0]) / (k[0] + y[2] ** 2)), 2
def odejac(y, _, k):
df_dy = np.array([[0, 2, 0],
[-4 / (y[2] ** 2 + k[0]), -(8 * y[2]) / (y[2] ** 2 + k[0]),
(4 * y[2] * (2 * y[0] + 4 * y[1] * y[2])) / (y[2] ** 2 + k[0]) ** 2
- (8 * y[1]) / (y[2] ** 2 + k[0])], [0, 0, 0]])
df_dp = np.empty((3, 0))
return df_dy, df_dp
def bcfun(y0, yf, _, __, k):
return y0[0] - 1 / (1 + k[0]), yf[0] - 1 / (1 + k[0]), y0[2] + 1
algo = Shooting(odefun, None, bcfun, algorithm=algorithm, num_arcs=2)
algo.set_derivative_jacobian(odejac)
solinit = Trajectory()
solinit.t = np.linspace(0., 1., 2)
# noinspection PyTypeChecker
solinit.y = np.array([[1. / (1. + const), 0., -1.], [1. / (1. + const), 1., 1.]])
solinit.const = np.array([const])
sol = algo.solve(solinit)['sol']
e1 = 1 / (sol.const[0] + sol.y[:, 2] ** 2)
e2 = -(2 * sol.y[:, 2]) / (sol.y[:, 2] ** 2 + sol.const[0]) ** 2
assert all(e1 - sol.y[:, 0] < tol)
assert all(e2 - sol.y[:, 1] < tol)
@pytest.mark.parametrize("algorithm, const", itertools.product(ALGORITHMS, MEDIUM))
def test_t10(algorithm, const):
def odefun(y, _, k):
return 2 * y[1], 2 * (-y[2] * y[1] / k[0]), 2
def odejac(y, _, k):
df_dy = np.array([[0, 2, 0], [0, 2 * (-y[2]) / k[0], 2 * (-y[1] / k[0])], [0, 0, 0]])
df_dp = np.empty((3, 0))
return df_dy, df_dp
def bcfun(y0, yf, _, __, ___):
return y0[0], yf[0] - 2, y0[2] + 1
algo = Shooting(odefun, None, bcfun, algorithm=algorithm)
algo.set_derivative_jacobian(odejac)
solinit = Trajectory()
solinit.t = np.linspace(0, 1, 2)
solinit.y = np.array([[0, 0, -1], [2, 0, 1]])
solinit.const = np.array([const])
sol = algo.solve(solinit)['sol']
e1 = 1 + erf(sol.y[:, 2] / np.sqrt(2 * sol.const[0])) / erf(1 / np.sqrt(2 * sol.const[0]))
e2 = np.sqrt(2) / (np.sqrt(np.pi) * np.sqrt(sol.const[0]) * np.exp(sol.y[:, 2] ** 2 / (2 * sol.const[0])) * erf(
np.sqrt(2) / (2 * np.sqrt(sol.const[0]))))
assert all(e1 - sol.y[:, 0] < tol)
assert all(e2 - sol.y[:, 1] < tol)
@pytest.mark.parametrize("algorithm, const", itertools.product(ALGORITHMS, HARD))
def test_t11(algorithm, const):
def odefun(y, _, k):
return 2 * y[1], 2 * ((y[0] - k[0] * np.pi ** 2 * np.cos(np.pi * y[2]) - np.cos(np.pi * y[2])) / k[0]), 2
def odejac(y, _, k):
df_dy = np.array([[0, 2, 0],
[2 / k[0], 0, (2 * (np.pi * np.sin(np.pi * y[2])
+ k[0] * np.pi ** 3 * np.sin(np.pi * y[2]))) / k[0]], [0, 0, 0]])
df_dp = np.empty((3, 0))
return df_dy, df_dp
def bcfun(y0, yf, _, __, ___):
return y0[0] + 1, yf[0] + 1, y0[2] + 1
algo = Shooting(odefun, None, bcfun, algorithm=algorithm)
algo.set_derivative_jacobian(odejac)
solinit = Trajectory()
solinit.t = np.linspace(0, 1, 2)
solinit.y = np.array([[-1, 0, -1], [-1, 0, 1]])
solinit.const = np.array([const])
sol = algo.solve(solinit)['sol']
e1 = np.cos(np.pi * sol.y[:, 2])
e2 = -np.pi * np.sin(np.pi * sol.y[:, 2])
assert all(e1 - sol.y[:, 0] < tol)
assert all(e2 - sol.y[:, 1] < tol)
@pytest.mark.parametrize("algorithm, const", itertools.product(ALGORITHMS, HARD))
def test_t12(algorithm, const):
def odefun(y, _, k):
return 2 * y[1], 2 * ((y[0] - k[0] * np.pi ** 2 * np.cos(np.pi * y[2]) - np.cos(np.pi * y[2])) / k[0]), 2
def odejac(y, _, k):
df_dy = np.array([[0, 2, 0],
[2 / k[0], 0, (2 * (np.pi * np.sin(np.pi * y[2]) + k[0] * np.pi ** 3 * np.sin(np.pi * y[2])))
/ k[0]], [0, 0, 0]])
df_dp = np.empty((3, 0))
return df_dy, df_dp
def bcfun(y0, yf, _, __, ___):
return y0[0] + 1, yf[0], y0[2] + 1
algo = Shooting(odefun, None, bcfun, algorithm=algorithm)
algo.set_derivative_jacobian(odejac)
solinit = Trajectory()
solinit.t = np.linspace(0, 1, 2)
solinit.y = np.array([[-1, 0, -1], [0, 0, 1]])
solinit.const = np.array([const])
sol = algo.solve(solinit)['sol']
e1 = np.cos(np.pi * sol.y[:, 2]) + np.exp(-(1 - sol.y[:, 2]) / np.sqrt(sol.const[0]))
e2 = np.exp((sol.y[:, 2] - 1) / np.sqrt(sol.const[0])) / np.sqrt(sol.const[0]) - np.pi * np.sin(np.pi * sol.y[:, 2])
assert all(e1 - sol.y[:, 0] < tol)
assert all(e2 - sol.y[:, 1] < tol)
@pytest.mark.parametrize("algorithm, const", itertools.product(ALGORITHMS, HARD))
def test_t13(algorithm, const):
def odefun(y, _, k):
return 2 * y[1], 2 * ((y[0] - k[0] * np.pi ** 2 * np.cos(np.pi * y[2]) - np.cos(np.pi * y[2])) / k[0]), 2
def odejac(y, _, k):
df_dy = np.array([[0, 2, 0],
[2 / k[0], 0, (2 * (np.pi * np.sin(np.pi * y[2]) + k[0] * np.pi ** 3 * np.sin(np.pi * y[2])))
/ k[0]], [0, 0, 0]])
df_dp = np.empty((3, 0))
return df_dy, df_dp
def bcfun(y0, yf, _, __, ___):
return y0[0], yf[0] + 1, y0[2] + 1
algo = Shooting(odefun, None, bcfun, algorithm=algorithm, num_arcs=2)
algo.set_derivative_jacobian(odejac)
solinit = Trajectory()
solinit.t = np.linspace(0, 1, 2)
solinit.y = np.array([[0, 0, -1], [0, 0, 1]])
solinit.const = np.array([const])
sol = algo.solve(solinit)['sol']
e1 = np.cos(np.pi * sol.y[:, 2]) + np.exp(-(1 + sol.y[:, 2]) / np.sqrt(sol.const[0]))
e2 = -np.pi * np.sin(np.pi * sol.y[:, 2]) - 1 / (
np.sqrt(sol.const[0]) * np.exp((sol.y[:, 2] + 1) / np.sqrt(sol.const[0])))
assert all(e1 - sol.y[:, 0] < tol)
assert all(e2 - sol.y[:, 1] < tol)
@pytest.mark.parametrize("algorithm, const", itertools.product(ALGORITHMS, HARD))
def test_t14(algorithm, const):
def odefun(y, _, k):
return 2 * y[1], 2 * ((y[0] - k[0] * np.pi ** 2 * np.cos(np.pi * y[2]) - np.cos(np.pi * y[2])) / k[0]), 2
def odejac(y, _, k):
df_dy = np.array([[0, 2, 0],
[2 / k[0], 0, (2 * (np.pi * np.sin(np.pi * y[2]) + k[0] * np.pi ** 3 * np.sin(np.pi * y[2])))
/ k[0]], [0, 0, 0]])
df_dp = np.empty((3, 0))
return df_dy, df_dp
def bcfun(y0, yf, _, __, ___):
return y0[0], yf[0], y0[2]+1
algo = Shooting(odefun, None, bcfun, algorithm=algorithm, num_arcs=4)
algo.set_derivative_jacobian(odejac)
sol = Trajectory()
sol.t = np.linspace(0, 1, 2)
sol.y = np.array([[0, 0, -1], [0, 0, 1]])
sol.const = np.array([const])
# noinspection PyTypeChecker
cc = np.linspace(const * 10, const, 10)
for c in cc:
sol = copy.deepcopy(sol)
sol.const = np.array([c])
sol = algo.solve(sol)['sol']
e1 = np.cos(np.pi * sol.y[:, 2]) + np.exp(-(1 + sol.y[:, 2]) / np.sqrt(sol.const[0])) + np.exp(
-(1 - sol.y[:, 2]) / np.sqrt(sol.const[0]))
e2 = np.exp((sol.y[:, 2] - 1) / np.sqrt(sol.const[0])) / np.sqrt(sol.const[0]) - np.pi * np.sin(
np.pi * sol.y[:, 2]) - 1 / (np.sqrt(sol.const[0]) * np.exp((sol.y[:, 2] + 1) / np.sqrt(sol.const[0])))
assert all(e1 - sol.y[:, 0] < tol)
assert all(e2 - sol.y[:, 1] < tol)
@pytest.mark.parametrize("algorithm, const", itertools.product(ALGORITHMS, HARD))
def test_t15(algorithm, const):
def odefun(y, _, k):
return 2 * y[1], 2 * (y[2] * y[0] / k[0]), 2
def odejac(y, _, k):
df_dy = np.array([[0, 2, 0], [2 * (y[2] / k[0]), 0, 2 * (y[0] / k[0])], [0, 0, 0]])
df_dp = np.empty((3, 0))
return df_dy, df_dp
def bcfun(y0, yf, _, __, ___):
return y0[0] - 1, yf[0] - 1, y0[2] + 1
algo = Shooting(odefun, None, bcfun, algorithm=algorithm)
algo.set_derivative_jacobian(odejac)
solinit = Trajectory()
solinit.t = np.linspace(0, 1, 2)
solinit.y = np.array([[1, 0, -1], [0, 0, 1]])
solinit.const = np.array([const])
sol = algo.solve(solinit)['sol']
assert sol.converged is True
@pytest.mark.parametrize("algorithm, const", itertools.product(ALGORITHMS, MEDIUM))
def test_t16(algorithm, const):
def odefun(y, _, k):
return 1 * y[1], 1 * (-y[0] * np.pi ** 2 / (4 * k[0])), 1
def odejac(_, __, k):
df_dy = np.array([[0, 1, 0], [-np.pi**2 / (4 * k[0]), 0, 0], [0, 0, 0]])
df_dp = np.empty((3, 0))
return df_dy, df_dp
def bcfun(y0, yf, _, __, k):
return y0[0], yf[0] - np.sin(np.pi / (2 * np.sqrt(k[0]))), y0[2]
algo = Shooting(odefun, None, bcfun, algorithm=algorithm)
algo.set_derivative_jacobian(odejac)
solinit = Trajectory()
solinit.t = np.linspace(0, 1, 2)
solinit.y = np.array([[0, 0, 0], [0, 0, 1]])
solinit.const = np.array([const])
sol = algo.solve(solinit)['sol']
e1 = np.sin(np.pi * sol.y[:, 2] / (2 * np.sqrt(sol.const[0])))
e2 = (np.pi * np.cos((np.pi * sol.y[:, 2]) / (2 * np.sqrt(sol.const[0])))) / (2 * np.sqrt(sol.const[0]))
assert all(e1 - sol.y[:, 0] < tol)
assert all(e2 - sol.y[:, 1] < tol)
@pytest.mark.parametrize("algorithm, const", itertools.product(ALGORITHMS, VHARD))
def test_t17(algorithm, const):
def odefun(y, _, k):
return 0.2 * y[1], 0.2 * (-3 * k[0] * y[0] / (k[0] + y[2] ** 2) ** 2), 0.2
def odejac(y, _, k):
df_dy = np.array([[0, 0.2, 0],
[-(3 * k[0]) / (5 * (y[2] ** 2 + k[0]) ** 2), 0, (12 * k[0] * y[0] * y[2])
/ (5 * (y[2] ** 2 + k[0]) ** 3)], [0, 0, 0]])
df_dp = np.empty((3, 0))
return df_dy, df_dp
def bcfun(y0, yf, _, __, k):
return y0[0] + 0.1 / np.sqrt(k[0] + 0.01), yf[0] - 0.1 / np.sqrt(k[0] + 0.01), y0[2] + 0.1
algo = Shooting(odefun, None, bcfun, algorithm=algorithm)
algo.set_derivative_jacobian(odejac)
solinit = Trajectory()
solinit.t = np.linspace(0, 1, 2)
solinit.y = np.array([[0, 0, 0], [0, 0, 1]])
solinit.const = np.array([const])
sol = algo.solve(solinit)['sol']
e1 = sol.y[:, 2]/np.sqrt(sol.const[0] + sol.y[:, 2] ** 2)
e2 = 1 / np.sqrt(sol.y[:, 2] ** 2 + sol.const[0]) - sol.y[:, 2] ** 2 / (sol.y[:, 2] ** 2 + sol.const[0]) ** (3 / 2)
assert all(e1 - sol.y[:, 0] < tol)
assert all(e2 - sol.y[:, 1] < tol)
@pytest.mark.parametrize("algorithm, const", itertools.product(ALGORITHMS, HARD))
def test_t18(algorithm, const):
def odefun(y, _, k):
return y[1], (-y[1] / k[0]), 1
def odejac(_, __, k):
df_dy = np.array([[0, 1, 0], [0, -1 / k[0], 0], [0, 0, 0]])
df_dp = np.empty((3, 0))
return df_dy, df_dp
def bcfun(y0, yf, _, __, k):
return y0[0] - 1, yf[0] - np.exp(-1 / k[0]), y0[2]
algo = Shooting(odefun, None, bcfun, algorithm=algorithm)
algo.set_derivative_jacobian(odejac)
solinit = Trajectory()
solinit.t = np.linspace(0, 1, 2)
solinit.y = np.array([[0, 0, 0], [0, 0, 1]])
solinit.const = np.array([const])
sol = algo.solve(solinit)['sol']
e1 = np.exp(-sol.y[:, 2] / sol.const[0])
e2 = -1 / (sol.const[0] * np.exp(sol.y[:, 2] / sol.const[0]))
assert all(e1 - sol.y[:, 0] < tol)
assert all(e2 - sol.y[:, 1] < tol)
@pytest.mark.parametrize("algorithm, const", itertools.product(ALGORITHMS, VHARD))
def test_t19(algorithm, const):
def odefun(y, _, k):
return y[1], (-y[1] / k[0]), 1
def odejac(_, __, k):
df_dy = np.array([[0, 1, 0], [0, -1 / k[0], 0], [0, 0, 0]])
df_dp = np.empty((3, 0))
return df_dy, df_dp
def bcfun(y0, yf, _, __, ___):
return y0[0], yf[0], y0[2]
algo = Shooting(odefun, None, bcfun, algorithm=algorithm)
algo.set_derivative_jacobian(odejac)
solinit = Trajectory()
solinit.t = np.linspace(0, 1, 2)
solinit.y = np.array([[0, 0, 0], [0, 0, 1]])
solinit.const = np.array([const])
sol = algo.solve(solinit)['sol']
assert sol.converged
@pytest.mark.parametrize("algorithm, const", itertools.product(ALGORITHMS, MEDIUM))
def test_t21(algorithm, const):
def odefun(y, _, k):
return y[1], (y[0] * (1 + y[0]) - np.exp(-2 * y[2] / np.sqrt(k[0]))) / k[0], 1
def odejac(y, _, k):
df_dy = np.array([[0, 1, 0],
[(2*y[0] + 1) / k[0], 0, (2 * np.exp(-(2 * y[2]) / np.sqrt(k[0]))) / k[0] ** (3 / 2)],
[0, 0, 0]])
df_dp = np.empty((3, 0))
return df_dy, df_dp
def bcfun(y0, yf, _, __, k):
return y0[0] - 1, yf[0] - np.exp(-1 / np.sqrt(k[0])), y0[2]
algo = Shooting(odefun, None, bcfun, algorithm=algorithm)
algo.set_derivative_jacobian(odejac)
solinit = Trajectory()
solinit.t = np.linspace(0, 1, 2)
solinit.y = np.array([[0, 0, 0], [0, 0, 1]])
solinit.const = np.array([const])
sol = algo.solve(solinit)['sol']
e1 = np.exp(-sol.y[:, 2] / np.sqrt(const))
e2 = -np.exp(-sol.y[:, 2] / np.sqrt(const)) / np.sqrt(const)
assert all(e1 - sol.y[:, 0] < tol)
assert all(e2 - sol.y[:, 1] < tol)
@pytest.mark.parametrize("algorithm, const", itertools.product(ALGORITHMS, HARD))
def test_t22(algorithm, const):
def odefun(y, _, k):
return y[1], -(y[1] + y[0] * y[0]) / k[0]
def odejac(y, _, k):
df_dy = np.array([[0, 1], [-(2*y[0]) / k[0], -1 / k[0]]])
df_dp = np.empty((2, 0))
return df_dy, df_dp
def bcfun(y0, yf, _, __, ___):
return y0[0], yf[0] - 1 / 2
algo = Shooting(odefun, None, bcfun, algorithm=algorithm)
algo.set_derivative_jacobian(odejac)
solinit = Trajectory()
solinit.t = np.linspace(0, 1, 2)
solinit.y = np.array([[0, 0], [0, 0]])
solinit.const = np.array([const])
sol = algo.solve(solinit)['sol']
assert sol.converged
@pytest.mark.parametrize("algorithm, const", itertools.product(ALGORITHMS, MEDIUM))
def test_t23(algorithm, const):
def odefun(y, _, k):
return y[1], 1 / k[0] * np.sinh(y[0] / k[0])
def odejac(y, _, k):
df_dy = np.array([[0, 1], [np.cosh(y[0] / k[0]) / k[0] ** 2, 0]])
df_dp = np.empty((2, 0))
return df_dy, df_dp
def bcfun(y0, yf, _, __, ___):
return y0[0], yf[0] - 1
algo = Shooting(odefun, None, bcfun, algorithm=algorithm)
algo.set_derivative_jacobian(odejac)
solinit = Trajectory()
solinit.t = np.linspace(0, 1, 2)
solinit.y = np.array([[0, 0], [1, 0]])
solinit.const = np.array([const])
sol = algo.solve(solinit)['sol']
assert sol.converged
@pytest.mark.parametrize("algorithm, const", itertools.product(ALGORITHMS, HARD))
def test_t24(algorithm, const):
def odefun(x, _, k):
a_mat_x = 1 + x[2] ** 2
a_mat_xp = 2 * x[2]
y = 1.4
return (x[1], (((1 + y) / 2 - k[0] * a_mat_xp) * x[0] * x[1] - x[1] / x[0] - (a_mat_xp / a_mat_x) * (
1 - (y - 1) / 2 * x[0] ** 2)) / (k[0] * a_mat_x * x[0]), 1)
def odejac(x, _, k):
y = 1.4
df_dy = np.array(
[[0, 1, 0],
[(x[1] * (y / 2 - 2 * k * x[2] + 1 / 2) + x[1] / x[0] ** 2
+ (4 * x[0] * x[2] * (y / 2 - 1 / 2)) / (x[2] ** 2 + 1)) / (k[0] * x[0] * (x[2] ** 2 + 1))
- ((2 * x[2] * ((y / 2 - 1 / 2) * x[0] ** 2 - 1)) / (x[2] ** 2 + 1) - x[1] / x[0] + x[0] * x[1]
* (y / 2 - 2 * k[0] * x[2] + 1 / 2)) / (k[0] * x[0] ** 2 * (x[2] ** 2 + 1)),
(x[0] * (y / 2 - 2 * k[0] * x[2] + 1 / 2) - 1 / x[0]) / (k[0] * x[0] * (x[2] ** 2 + 1)),
-((4 * x[2] ** 2 * ((y / 2 - 1 / 2) * x[0] ** 2 - 1)) / (x[2] ** 2 + 1) ** 2
- (2 * ((y / 2 - 1 / 2) * x[0] ** 2 - 1)) / (x[2] ** 2 + 1) + 2 * k[0] * x[0] * x[1])
/ (k[0] * x[0] * (x[2] ** 2 + 1))
- (2 * x[2] * ((2 * x[2] * ((y / 2 - 1 / 2) * x[0] ** 2 - 1))
/ (x[2] ** 2 + 1) - x[1] / x[0] + x[0] * x[1]
* (y / 2 - 2 * k[0] * x[2] + 1 / 2))) / (k[0] * x[0] * (x[2] ** 2 + 1) ** 2)],
[0, 0, 0]])
df_dp = np.empty((3, 0))
return df_dy, df_dp
def bcfun(x0, xf, _, __, ___):
return x0[0] - 0.9129, xf[0] - 0.375, x0[2]
algo = Shooting(odefun, None, bcfun, algorithm=algorithm, num_arcs=4)
algo.set_derivative_jacobian(odejac)
sol = Trajectory()
sol.t = np.linspace(0, 1, 2)
sol.y = np.array([[1, 1, 0], [0.1, 0.1, 1]])
sol.const = np.array([const])
# noinspection PyTypeChecker
cc = np.linspace(const*10, const, 10)
for c in cc:
sol = copy.deepcopy(sol)
sol.const = np.array([c])
sol = algo.solve(sol)['sol']
assert sol.converged
@pytest.mark.parametrize("algorithm, const", itertools.product(ALGORITHMS, VHARD))
def test_t25(algorithm, const):
def odefun(y, _, k):
return y[1], y[0] * (1 - y[1]) / k[0]
def odejac(y, _, k):
df_dy = np.array([[0, 1], [(1-y[1]) / k[0], -y[0] / k[0]]])
df_dp = np.empty((2, 0))
return df_dy, df_dp
def bcfun(y0, yf, _, __, ___):
return y0[0] + 1 / 3, yf[0] - 1 / 3
algo = Shooting(odefun, None, bcfun, algorithm=algorithm, num_arcs=16)
algo.set_derivative_jacobian(odejac)
sol = Trajectory()
sol.t = np.linspace(0, 1, 2)
sol.y = np.array([[-1/3, 1], [1/3, 1]])
sol.const = np.array([const])
# noinspection PyTypeChecker
cc = np.linspace(const*10, const, 10)
for c in cc:
sol = copy.deepcopy(sol)
sol.const = np.array([c])
sol = algo.solve(sol)['sol']
assert sol.converged
@pytest.mark.parametrize("algorithm, const", itertools.product(ALGORITHMS, VHARD))
def test_t26(algorithm, const):
def odefun(y, _, k):
return y[1], y[0] * (1 - y[1]) / k[0]
def odejac(y, _, k):
df_dy = np.array([[0, 1], [(1-y[1]) / k[0], -y[0] / k[0]]])
df_dp = np.empty((2, 0))
return df_dy, df_dp
def bcfun(y0, yf, _, __, ___):
return y0[0] - 1, yf[0] + 1/3
algo = Shooting(odefun, None, bcfun, algorithm=algorithm, num_arcs=64)
algo.set_derivative_jacobian(odejac)
sol = Trajectory()
sol.t = np.linspace(0, 1, 2)
sol.y = np.array([[1, 0], [-1/3, 0]])
sol.const = np.array([const])
# noinspection PyTypeChecker
cc = np.linspace(const*10, const, 10)
for c in cc:
sol = copy.deepcopy(sol)
sol.const = np.array([c])
sol = algo.solve(sol)['sol']
assert sol.converged
@pytest.mark.parametrize("algorithm, const", itertools.product(ALGORITHMS, VHARD))
def test_t27(algorithm, const):
def odefun(y, _, k):
return y[1], y[0] * (1 - y[1]) / k[0]
def odejac(y, _, k):
df_dy = np.array([[0, 1], [(1-y[1]) / k[0], -y[0] / k[0]]])
df_dp = np.empty((2, 0))
return df_dy, df_dp
def bcfun(y0, yf, _, __, ___):
return y0[0] - 1, yf[0] - 1 / 3
algo = Shooting(odefun, None, bcfun, algorithm=algorithm, num_arcs=4)
algo.set_derivative_jacobian(odejac)
sol = Trajectory()
sol.t = np.linspace(0, 1, 2)
sol.y = np.array([[1, 0], [1 / 3, 0]])
sol.const = np.array([const])
# noinspection PyTypeChecker
cc = np.linspace(const*10, const, 10)
for c in cc:
sol = copy.deepcopy(sol)
sol.const = np.array([c])
sol = algo.solve(sol)['sol']
assert sol.converged
@pytest.mark.parametrize("algorithm, const", itertools.product(ALGORITHMS, VHARD))
def test_t28(algorithm, const):
def odefun(y, _, k):
return y[1], (y[0] - y[0]*y[1]) / k[0]
def odejac(y, _, k):
df_dy = np.array([[0, 1], [(1-y[1]) / k[0], -y[0] / k[0]]])
df_dp = np.empty((2, 0))
return df_dy, df_dp
def bcfun(y0, yf, _, __, ___):
return y0[0] - 1, yf[0] - 3/2
algo = Shooting(odefun, None, bcfun, algorithm=algorithm, num_arcs=1)
algo.set_derivative_jacobian(odejac)
sol = Trajectory()
sol.t = np.linspace(0, 1, 2)
sol.y = np.array([[1, 0], [3/2, 0]])
sol.const = np.array([const])
sol = algo.solve(sol)['sol']
assert sol.converged
@pytest.mark.parametrize("algorithm, const", itertools.product(ALGORITHMS, VHARD))
def test_t29(algorithm, const):
def odefun(y, _, k):
return y[1], (y[0] - y[0]*y[1]) / k[0]
def odejac(y, _, k):
df_dy = np.array([[0, 1], [(1-y[1]) / k[0], -y[0] / k[0]]])
df_dp = np.empty((2, 0))
return df_dy, df_dp
def bcfun(y0, yf, _, __, ___):
return y0[0], yf[0] - 3/2
algo = Shooting(odefun, None, bcfun, algorithm=algorithm, num_arcs=1)
algo.set_derivative_jacobian(odejac)
sol = Trajectory()
sol.t = np.linspace(0, 1, 2)
sol.y = np.array([[0, 0], [3/2, 0]])
sol.const = np.array([const])
sol = algo.solve(sol)['sol']
assert sol.converged
@pytest.mark.parametrize("algorithm, const", itertools.product(ALGORITHMS, HARD))
def test_t30(algorithm, const):
def odefun(y, _, k):
return y[1], (y[0] - y[0]*y[1]) / k[0]
def odejac(y, _, k):
df_dy = np.array([[0, 1], [(1-y[1]) / k[0], -y[0] / k[0]]])
df_dp = np.empty((2, 0))
return df_dy, df_dp
def bcfun(y0, yf, _, __, ___):
return y0[0] + 7/6, yf[0] - 3/2
algo = Shooting(odefun, None, bcfun, algorithm=algorithm, num_arcs=8)
algo.set_derivative_jacobian(odejac)
sol = Trajectory()
sol.t = np.linspace(0, 1, 2)
sol.y = np.array([[-7/6, 0], [3/2, 0]])
sol.const = np.array([const])
# noinspection PyTypeChecker
cc = np.linspace(const * 10, const, 10)
for c in cc:
sol = copy.deepcopy(sol)
sol.const = np.array([c])
sol = algo.solve(sol)['sol']
assert sol.converged
@pytest.mark.parametrize("algorithm, const", itertools.product(ALGORITHMS, HARD))
def test_t31(algorithm, const):
def odefun(y, _, k):
return np.sin(y[1]), y[2], -y[3] / k[0], \
((y[0]-1) * np.cos(y[1]) - y[2] / np.cos(y[1]) - k[0] * y[3] * np.tan(y[1])) / k[0]
def odejac(y, _, k):
df_dy = np.array(
[[0, np.cos(y[1]), 0, 0], [0, 0, 1, 0], [0, 0, 0, -1 / k[0]],
[np.cos(y[1]) / k[0], -(np.sin(y[1]) * (y[0] - 1) + k[0] * y[3] * (np.tan(y[1]) ** 2 + 1)
+ (y[2] * np.sin(y[1])) / np.cos(y[1]) ** 2) / k[0], -1 / (k[0] * np.cos(y[1])),
-np.tan(y[1])]])
df_dp =
|
np.empty((4, 0))
|
numpy.empty
|
"""
Utilities for Gaussian process (GP) inference.
"""
import numpy as np
from scipy.linalg import solve_triangular
from scipy.spatial.distance import cdist
def kern_exp_quad(xmat1, xmat2, ls, alpha):
"""
Exponentiated quadratic kernel function (aka squared exponential kernel aka
RBF kernel).
"""
return alpha ** 2 * kern_exp_quad_noscale(xmat1, xmat2, ls)
def kern_exp_quad_noscale(xmat1, xmat2, ls):
"""
Exponentiated quadratic kernel function (aka squared exponential kernel aka
RBF kernel), without scale parameter.
"""
sq_norm = (-1 / (2 * ls ** 2)) * cdist(xmat1, xmat2, 'sqeuclidean')
return np.exp(sq_norm)
def squared_euc_distmat(xmat1, xmat2, coef=1.0):
"""
Distance matrix of squared euclidean distance (multiplied by coef) between
points in xmat1 and xmat2.
"""
return coef * cdist(xmat1, xmat2, 'sqeuclidean')
def kern_distmat(xmat1, xmat2, ls, alpha, distfn):
"""
Kernel for a given distmat, via passed in distfn (which is assumed to be fn
of xmat1 and xmat2 only).
"""
distmat = distfn(xmat1, xmat2)
sq_norm = -distmat / ls ** 2
return alpha ** 2 * np.exp(sq_norm)
def get_cholesky_decomp(k11_nonoise, sigma, psd_str):
"""Return cholesky decomposition."""
if psd_str == 'try_first':
k11 = k11_nonoise + sigma ** 2 *
|
np.eye(k11_nonoise.shape[0])
|
numpy.eye
|
# --------------------------------------------------------------------------------------
# Copyright (C) 2020–2021 by <NAME> <<EMAIL>>
#
# Permission to use, copy, modify, and/or distribute this software for any purpose
# with or without fee is hereby granted.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
# TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF
# THIS SOFTWARE.
# --------------------------------------------------------------------------------------
"""Module to find signals using the Jade ICA method.
This module contains the function, _jade, which does blind source separation of real
signals. The original Python code can be found at
https://github.com/gvacaliuc/jade_c/blob/master/jade.py
"""
from __future__ import annotations
import logging
from itertools import combinations
from typing import Any
from typing import Optional
import numpy as np
from nptyping import Float
from nptyping import NDArray
from scipy import linalg
from sklearn.base import BaseEstimator
from sklearn.base import TransformerMixin
from sklearn.utils.validation import check_array
from sklearn.utils.validation import check_is_fitted
logger = logging.getLogger(__name__)
def _jade(
arr: NDArray[(Any, ...), Float], n_components: int = 1
) -> NDArray[(Any, ...), Float]:
"""Blind separation of real signals with JADE.
jadeR implements JADE, an Independent Component Analysis (ICA) algorithm developed
by <NAME>. See http://www.tsi.enst.fr/~cardoso/guidesepsou.html, and
papers cited at the end of the source file.
Translated into NumPy from the original Matlab Version 1.8 (May 2005) by <NAME>, http://gbeckers.nl .
Parameters
----------
arr : NDArray
a data matrix (n_features, n_samples)
n_components : int
output matrix B has size mxn so that only m sources are extracted. This is done
by restricting the operation of jadeR to the m first principal components.
Defaults to None, in which case :math:`m = n`.
Returns
-------
NDArray
An m*n matrix B (NumPy matrix type), such that Y=B*X are separated sources
extracted from the n*T data matrix X. If m is omitted, B is a square n*n matrix
(as many sources as sensors). The rows of B are ordered such that the columns of
:math:`pinv(B)` are in order of decreasing norm; this has the effect that the
`most energetically significant` components appear first in the rows of
:math:`Y = B * X`.
Notes
-----
Quick notes (more at the end of this file):
- This code is for REAL-valued signals. A MATLAB implementation of JADE for both
real and complex signals is also available from
http://sig.enst.fr/~cardoso/stuff.html
- This algorithm differs from the first released implementations of JADE in that it
has been optimized to deal more efficiently
1) with real signals (as opposed to complex)
2) with the case when the ICA model does not necessarily hold.
- There is a practical limit to the number of independent components that can be
extracted with this implementation. Note that the first step of JADE amounts to a
PCA with dimensionality reduction from `n` to `m` (which defaults to n). In
practice m cannot be *very large* (more than 40, 50, 60... depending on available
memory)
- See more notes, references and revision history at the end of this file and more
stuff at http://sig.enst.fr/~cardoso/stuff.html
- For more info on NumPy translation, see the end of this file.
- This code is supposed to do a good job! Please report any problem relating to
the NumPY code <EMAIL>
Copyright original Matlab code : Jean-<NAME> <<EMAIL>>
Copyright Numpy translation : <NAME> <<EMAIL>>
"""
logger.info("jade -> Looking for %d sources", n_components)
logger.info("jade -> Removing the mean value")
n_samples, _ = arr.shape
arr -= arr.mean(axis=0)
# whitening & projection onto signal subspace
# ===========================================
logger.info("jade -> Whitening the data")
# --- PCA ----------------------------------------------------------
u, s, vh = linalg.svd(arr, full_matrices=False)
u, s, vh = u[:, :n_components], s[:n_components], vh[:n_components]
B = linalg.inv(np.diag(s)) @ vh * np.sqrt(n_samples)
arr = u * np.sqrt(n_samples)
del u, s, vh
# NOTE: At this stage, X is a PCA analysis in m components of the real data, except
# that all its entries now have unit variance. Any further rotation of X will
# preserve the property that X is a vector of uncorrelated components. It remains
# to find the rotation matrix such that the entries of X are not only uncorrelated
# but also `as independent as possible". This independence is measured by
# correlations of order higher than 2. We have defined such a measure of
# independence which
# 1) is a reasonable approximation of the mutual information
# 2) can be optimized by a `fast algorithm"
# This measure of independence also corresponds to the `diagonality" of a set of
# cumulant matrices. The code below finds the `missing rotation " as the matrix
# which best diagonalizes a particular set of cumulant matrices.
# Estimation of the cumulant matrices.
# ====================================
logger.info("jade -> Estimating cumulant matrices")
# Dim. of the space of real symm matrices
dimsymm = n_components * (n_components + 1) // 2
nbcm = dimsymm # number of cumulant matrices
# Storage for cumulant matrices
CM = np.zeros((n_components, n_components * nbcm))
R = np.eye(n_components)
# I am using a symmetry trick to save storage. I should write a short note one of
# these days explaining what is going on here.
# will index the columns of CM where to store the cum. mats.
Range = np.arange(n_components)
sqrt2 = np.sqrt(2)
for im in range(n_components):
Xim = np.c_[arr[:, im]]
Xijm = Xim ** 2
# Note to myself: the -R on next line can be removed: it does not affect
# the joint diagonalization criterion
Rim = np.c_[R[:, im]]
CM[:, Range] = (Xijm * arr).T @ arr / n_samples - R - 2 * (Rim @ Rim.T)
Range += n_components
for jm in range(im):
Xijm = Xim * np.c_[arr[:, jm]]
Rjm = np.c_[R[:, jm]]
CM[:, Range] = (
sqrt2 * (Xijm * arr).T @ arr / n_samples - Rim @ Rjm.T - Rjm @ Rim.T
)
Range = Range + n_components
# Now we have nbcm = m(m+1)/2 cumulants matrices stored in a big m x m*nbcm array.
V = np.eye(n_components)
On =
|
np.zeros(CM.shape[0])
|
numpy.zeros
|
import argparse
import sys
import math
import json
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pack_padded_sequence as pack, pad_packed_sequence as unpack
from torch.autograd import Variable
import models
import torchaudio.transforms as tat
import torchvision.transforms as tvt
import mgc_transforms
from loader_audioset import *
from tqdm import tqdm
from sklearn.metrics import confusion_matrix
class CFG(object):
def __init__(self):
parser = self.get_params()
args = parser.parse_args()
self.args = args
self.save_model = args.save_model
self.load_model = args.load_model
self.chkpt_interval = args.chkpt_interval
self.noises_dir = args.noises_dir
self.use_precompute = args.use_precompute
self.use_cache = args.use_cache
self.data_path = args.data_path
self.dataset = args.dataset
self.batch_size = args.batch_size
self.num_workers = args.num_workers
self.log_interval = args.log_interval
self.do_validate = args.validate
self.max_len = args.max_len if args.max_len else 80000 # 160000 #10 secs
self.use_cuda = torch.cuda.is_available()
self.device = torch.device("cuda" if self.use_cuda else "cpu")
self.ngpu = torch.cuda.device_count()
print("CUDA: {} with {} devices".format(self.use_cuda, self.ngpu))
self.model_name = args.model_name
self.loss_criterion = args.loss_criterion
# load weights
if args.load_model:
state_dicts = torch.load(args.load_model, map_location=lambda storage, loc: storage)
else:
state_dicts = {
"models": None,
"optimizer": None,
"epoch": 0,
}
self.cur_epoch = state_dicts["epoch"]
self.ds, self.dl = self.get_dataloader()
self.model_list = self.get_models(state_dicts["models"])
self.epochs, self.criterion, self.optimizer, self.scheduler = self.init_optimizer(state_dicts["optimizer"])
if self.ngpu > 1 and "attn" not in self.model_name:
self.model_list = [nn.DataParallel(m) for m in self.model_list]
self.valid_losses = []
self.train_losses = []
self.tqdmiter = None
def get_params(self):
parser = argparse.ArgumentParser(description='PyTorch Language ID Classifier Trainer')
parser.add_argument('--data-path', type=str, default="data/audioset",
help='data path')
parser.add_argument('--lr', type=float, default=0.0001,
help='initial learning rate')
parser.add_argument('--epochs', type=int, default=10,
help='upper epoch limit')
parser.add_argument('--batch-size', type=int, default=50, metavar='b',
help='batch size')
parser.add_argument('--freq-bands', type=int, default=224,
help='number of frequency bands to use')
parser.add_argument('--num-samples', type=int, default=None,
help='limit number of samples to load for testing')
parser.add_argument('--dataset', type=str, default="balanced",
help='which Audioset dataset to use balanced / eval / unbalanced')
parser.add_argument('--add-no-label', action='store_true',
help='add a label for "no label" or background noise')
parser.add_argument('--use-cache', action='store_true',
help='use cache in the dataloader')
parser.add_argument('--use-precompute', action='store_true',
help='precompute transformations')
parser.add_argument('--noises-dir', type=str, default=None,
help='absolute path of noises to add to the audio')
parser.add_argument('--num-workers', type=int, default=0,
help='number of workers for data loader')
parser.add_argument('--validate', action='store_true',
help='do out-of-bag validation')
parser.add_argument('--num-validate', type=int, default=None,
help='number of validation samples')
parser.add_argument('--log-interval', type=int, default=5,
help='reports per epoch')
parser.add_argument('--chkpt-interval', type=int, default=10,
help='how often to save checkpoints')
parser.add_argument('--max-len', type=int, default=None,
help='max length of sample')
parser.add_argument('--model-name', type=str, default="resnet34_conv",
help='name of model to use')
parser.add_argument('--loss-criterion', type=str, default="bce",
help='loss criterion')
parser.add_argument('--load-model', type=str, default=None,
help='path of model to load')
parser.add_argument('--save-model', action='store_true',
help='path to save the final model')
parser.add_argument('--train-full-model', action='store_true',
help='train full model vs. final layer')
parser.add_argument('--seed', type=int, default=1111,
help='random seed')
return parser
def get_models(self, weights=None):
NUM_CLASSES = len(self.ds.labels_dict)
use_pretrained = True if not self.load_model else False
if "resnet34" in self.model_name:
model_list = [models.resnet.resnet34(use_pretrained, num_genres=NUM_CLASSES)]
elif "resnet101" in self.model_name:
model_list = [models.resnet.resnet101(use_pretrained, num_genres=NUM_CLASSES)]
elif "squeezenet" in self.model_name:
model_list = [models.squeezenet.squeezenet(use_pretrained, num_genres=NUM_CLASSES)]
elif "attn" in self.model_name:
self.hidden_size = 2500
kwargs_encoder = {
"input_size": self.args.freq_bands // 2,
"hidden_size": self.hidden_size,
"n_layers": 1,
"batch_size": self.batch_size
}
kwargs_decoder = {
"input_size": self.args.freq_bands // 2,
"hidden_size": self.hidden_size,
"output_size": NUM_CLASSES,
"attn_model": "general",
"n_layers": 1,
"dropout": 0.0, # was 0.1
"batch_size": self.batch_size
}
model_list = models.attn.attn(kwargs_encoder, kwargs_decoder)
elif "bytenet" in self.model_name:
self.d = self.args.freq_bands // 2
kwargs_encoder = {
"d": self.d,
"max_r": 16,
"k": 3,
"num_sets": 6,
"reduce_out": [8, 4, 0, 2, 0, 4],
}
kwargs_decoder = {
"d": self.d,
"max_r": 16,
"k": 3,
"num_sets": 6,
"num_classes": NUM_CLASSES,
"reduce_out": None,
"use_logsm": False,
}
model_list = models.bytenet.bytenet(kwargs_encoder, kwargs_decoder)
# move model to GPU or multi-GPU
model_list = [m.to(self.device) for m in model_list]
# load weights
if weights is not None:
for i, sd in enumerate(weights):
model_list[i].load_state_dict(sd)
#if self.ngpu > 1:
# model_list = [nn.DataParallel(m) for m in model_list]
return model_list
def get_dataloader(self):
usl = True if self.loss_criterion == "crossentropy" else False
ds = AUDIOSET(self.data_path, dataset=self.args.dataset, noises_dir=self.noises_dir,
use_cache=False, num_samples=self.args.num_samples,
add_no_label=self.args.add_no_label, use_single_label=usl)
if any(x in self.model_name for x in ["resnet34_conv", "resnet101_conv", "squeezenet"]):
T = tat.Compose([
#tat.PadTrim(self.max_len, fill_value=1e-8),
mgc_transforms.SimpleTrim(self.max_len),
mgc_transforms.MEL(sr=16000, n_fft=600, hop_length=300, n_mels=self.args.freq_bands//2),
#mgc_transforms.Scale(),
mgc_transforms.BLC2CBL(),
mgc_transforms.Resize((self.args.freq_bands, self.args.freq_bands)),
])
elif "_mfcc_librosa" in self.model_name:
T = tat.Compose([
#tat.PadTrim(self.max_len, fill_value=1e-8),
mgc_transforms.SimpleTrim(self.max_len),
mgc_transforms.MFCC2(sr=16000, n_fft=600, hop_length=300, n_mfcc=12),
mgc_transforms.Scale(),
mgc_transforms.BLC2CBL(),
mgc_transforms.Resize((self.args.freq_bands, self.args.freq_bands)),
])
elif "_mfcc" in self.model_name:
sr = 16000
ws = 800
hs = ws // 2
n_fft = 512 # 256
n_filterbanks = 26
n_coefficients = 12
low_mel_freq = 0
high_freq_mel = (2595 * math.log10(1 + (sr/2) / 700))
mel_pts = torch.linspace(low_mel_freq, high_freq_mel, n_filterbanks + 2) # sr = 16000
hz_pts = torch.floor(700 * (torch.pow(10,mel_pts / 2595) - 1))
bins = torch.floor((n_fft + 1) * hz_pts / sr)
td = {
"RfftPow": mgc_transforms.RfftPow(n_fft),
"FilterBanks": mgc_transforms.FilterBanks(n_filterbanks, bins),
"MFCC": mgc_transforms.MFCC(n_filterbanks, n_coefficients),
}
T = tat.Compose([
#tat.PadTrim(self.max_len, fill_value=1e-8),
mgc_transforms.Preemphasis(),
mgc_transforms.SimpleTrim(self.max_len),
mgc_transforms.Sig2Features(ws, hs, td),
mgc_transforms.DummyDim(),
mgc_transforms.Scale(),
tat.BLC2CBL(),
mgc_transforms.Resize((self.args.freq_bands, self.args.freq_bands)),
])
elif "attn" in self.model_name:
T = tat.Compose([
mgc_transforms.SimpleTrim(self.max_len),
mgc_transforms.MEL(sr=16000, n_fft=600, hop_length=300, n_mels=self.args.freq_bands//2),
#mgc_transforms.Scale(),
mgc_transforms.SqueezeDim(2),
tat.LC2CL(),
])
elif "bytenet" in self.model_name:
#offset = 714 # make clips divisible by 224
T = tat.Compose([
mgc_transforms.SimpleTrim(self.max_len),
#tat.PadTrim(self.max_len),
mgc_transforms.Scale(),
tat.LC2CL(),
])
ds.transform = T
if self.loss_criterion == "crossentropy":
TT = mgc_transforms.XEntENC(ds.labels_dict)
#TT = mgc_transforms.BinENC(ds.labels_dict, dtype=torch.int64)
else:
TT = mgc_transforms.BinENC(ds.labels_dict)
ds.target_transform = TT
ds.use_cache = self.use_cache
if self.use_cache:
ds.init_cache()
if self.use_precompute:
ds.load_precompute(self.model_name)
dl = data.DataLoader(ds, batch_size=self.batch_size, drop_last=True,
num_workers=self.num_workers, collate_fn=bce_collate,
shuffle=True)
if "attn" in self.model_name:
dl.collate_fn = sort_collate
return ds, dl
def init_optimizer(self, weights=None):
#if self.ngpu < 2 or "attn" in self.model_name:
# model = self.model
#else:
# model = self.model.module
model_list = self.model_list
if self.loss_criterion == "softmargin":
criterion = nn.MultiLabelSoftMarginLoss()
elif self.loss_criterion == "margin":
criterion = nn.MultiLabelMarginLoss()
elif self.loss_criterion == "crossentropy":
criterion = nn.CrossEntropyLoss()
else:
criterion = nn.BCEWithLogitsLoss()
epochs = None
if "squeezenet" in self.model_name:
if self.dataset == "unbalanced":
epochs = [5, 10, 35, 50]
else:
epochs = [10, 20, 100]
opt_type = torch.optim.Adam
opt_params = [
{"params": model_list[0][1].features.parameters(), "lr": 0.},
{"params": model_list[0][1].classifier.parameters(), "lr": self.args.lr}
]
opt_kwargs = {"amsgrad": True}
elif any(x in self.model_name for x in ["resnet34", "resnet101"]):
if "resnet34" in self.model_name:
if self.dataset == "unbalanced":
epochs = [10, 25, 40, 50]
else:
epochs = [20, 60, 100, 120]
elif "resnet101" in self.model_name:
if self.dataset == "unbalanced":
epochs = [10, 20, 28, 33]
else:
epochs = [20, 40, 80]
opt_type = torch.optim.Adam
feature_params = nn.ParameterList()
for m in list(model_list[0][1].children())[:-1]:
feature_params.extend(m.parameters())
fc_params = model_list[0][1].fc.parameters()
opt_params = [
{"params": feature_params, "lr": 0.}, # features
{"params": fc_params, "lr": self.args.lr} # classifier
]
opt_kwargs = {"amsgrad": True}
elif any(x in self.model_name for x in ["attn", "bytenet"]):
if self.dataset == "unbalanced":
epochs = [8, 20, 70]
else:
epochs = [25, 70, 100]
if "attn" in self.model_name:
opt_type = torch.optim.SGD
opt_kwargs = {"momentum": 0.9}
else:
opt_type = torch.optim.Adam
opt_kwargs = {"amsgrad": True}
opt_params = [
{"params": model_list[0].parameters(), "lr": self.args.lr},
{"params": model_list[1].parameters(), "lr": self.args.lr}
]
optimizer = opt_type(opt_params, **opt_kwargs)
if weights is not None:
optimizer.load_state_dict(weights)
# https://github.com/pytorch/pytorch/issues/2830, fixed in master?
for state in optimizer.state.values():
for k, v in state.items():
if torch.is_tensor(v):
v = v.to(self.device)
state[k] = v
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=epochs[1:], gamma=0.4)
return epochs, criterion, optimizer, scheduler
def adjust_opt_params(self, epoch):
# automate fine tuning
if epoch == self.epochs[0]:
if "squeezenet" in self.model_name:
self.optimizer.param_groups[0]["initial_lr"] = self.scheduler.base_lrs[0] = self.optimizer.param_groups[1]["initial_lr"]
self.optimizer.param_groups[0]["lr"] = self.args.lr
elif "resnet" in self.model_name:
self.optimizer.param_groups[0]["initial_lr"] = self.scheduler.base_lrs[0] = self.optimizer.param_groups[1]["initial_lr"]
self.optimizer.param_groups[0]["lr"] = self.args.lr
elif "attn" in self.model_name:
# no finetuning of these models yet
pass
elif "bytenet" in self.model_name:
# no finetuning of these models yet
pass
#print(self.optimizer)
def fit(self, epoch, early_stop=None):
epoch_losses = []
self.ds.set_split("train")
self.adjust_opt_params(epoch)
self.scheduler.step()
#self.optimizer = self.get_optimizer(epoch)
num_batches = len(self.dl)
if any(x in self.model_name for x in ["resnet", "squeezenet"]):
if self.use_precompute:
pass # TODO implement network precomputation
#self.precompute(self.L["fc_layer"]["precompute"])
m = self.model_list[0]
with tqdm(total=num_batches, leave=False, position=1) as t:
for i, (mb, tgts) in enumerate(self.dl):
if i == early_stop: break
m.train()
mb, tgts = mb.to(self.device), tgts.to(self.device)
m.zero_grad()
out = m(mb)
if "margin" in self.loss_criterion:
out = F.sigmoid(out)
if self.loss_criterion == "margin":
tgts = tgts.long()
#print(tgts)
loss = self.criterion(out, tgts)
loss.backward()
self.optimizer.step()
epoch_losses.append(loss.item())
if self.tqdmiter:
self.tqdmiter.set_postfix({"loss": "{0:.6f}".format(epoch_losses[-1])})
self.tqdmiter.refresh()
else:
print(epoch_losses[-1])
if i % self.log_interval == 0 and self.do_validate and i != 0:
with torch.no_grad():
self.validate(epoch)
self.ds.set_split("train")
t.update()
elif "attn" in self.model_name:
encoder = self.model_list[0]
decoder = self.model_list[1]
with tqdm(total=num_batches, leave=False, position=1) as t:
for i, ((mb, lengths), tgts) in enumerate(self.dl):
# set model into train mode and clear gradients
encoder.train()
decoder.train()
encoder.zero_grad()
decoder.zero_grad()
# set inputs and targets
mb, tgts = mb.to(self.device), tgts.to(self.device)
# create the initial hidden input before packing sequence
encoder_hidden = encoder.initHidden(mb)
# pack sequence
mb = pack(mb, lengths, batch_first=True)
#print(mb.size(), tgts.size())
# encode sequence
encoder_output, encoder_hidden = encoder(mb, encoder_hidden)
# Prepare input and output variables for decoder
#dec_size = [[[0] * encoder.hidden_size]*1]*self.batch_size
#print(encoder_output.detach().new(dec_size).size())
#enc_out_var, enc_out_len = unpack(encoder_output, batch_first=True)
#dec_i = enc_out_var.new_zeros((self.batch_size, 1, encoder.hidden_size))
dec_h = encoder_hidden # Use last (forward) hidden state from encoder
#print(decoder.n_layers, encoder_hidden.size(), dec_i.size(), dec_h.size())
# run through decoder in one shot
mb, _ = unpack(mb, batch_first=True)
dec_o, dec_h, dec_attn = decoder(mb, dec_h, encoder_output)
dec_o.squeeze_()
#print(dec_o)
#print(dec_o.size(), dec_h.size(), dec_attn.size(), tgts.size())
#print(dec_o.view(-1, decoder.output_size).size(), tgts.view(-1).size())
# calculate loss and backprop
if "margin" in self.loss_criterion:
dec_o = F.sigmoid(dec_o)
if self.loss_criterion == "margin":
tgts = tgts.long()
loss = self.criterion(dec_o, tgts)
#nn.utils.clip_grad_norm(encoder.parameters(), 0.05)
#nn.utils.clip_grad_norm(decoder.parameters(), 0.05)
loss.backward()
self.optimizer.step()
epoch_losses.append(loss.item())
if self.tqdmiter:
self.tqdmiter.set_postfix({"loss": "{0:.6f}".format(epoch_losses[-1])})
self.tqdmiter.refresh()
else:
print(epoch_losses[-1])
if i % self.log_interval == 0 and self.do_validate and i != 0:
with torch.no_grad():
self.validate(epoch)
self.ds.set_split("train")
t.update()
elif "bytenet" in self.model_name:
encoder = self.model_list[0]
decoder = self.model_list[1]
with tqdm(total=num_batches, leave=False, position=1) as t:
for i, (mb, tgts) in enumerate(self.dl):
# set model into train mode and clear gradients
encoder.train()
decoder.train()
encoder.zero_grad()
decoder.zero_grad()
# set inputs and targets
mb, tgts = mb.to(self.device), tgts.to(self.device)
mb = encoder(mb)
out = decoder(mb)
if "margin" in self.loss_criterion:
out = F.sigmoid(out)
if self.loss_criterion == "margin":
tgts = tgts.long()
loss = self.criterion(out, tgts)
loss.backward()
self.optimizer.step()
epoch_losses.append(loss.item())
if self.tqdmiter:
self.tqdmiter.set_postfix({"loss": "{0:.6f}".format(epoch_losses[-1])})
self.tqdmiter.refresh()
else:
print(epoch_losses[-1])
if i % self.log_interval == 0 and self.do_validate and i != 0:
with torch.no_grad():
self.validate(epoch)
self.ds.set_split("train")
t.update()
self.train_losses.append(epoch_losses)
if epoch % 10 == 0 and epoch != 0 and self.use_cache:
self.ds.init_cache()
def validate(self, epoch):
self.ds.set_split("valid", self.args.num_samples)
running_validation_loss = []
accuracies = []
acc = 0
threshold = 1 - (1. / 3.)
num_batches = len(self.dl)
if any(x in self.model_name for x in ["resnet", "squeezenet"]):
m = self.model_list[0]
# set model(s) into eval mode
m.eval()
with tqdm(total=num_batches, leave=True, position=2,
postfix={"acc": acc, "loss": "{0:.6f}".format(0.)}) as t:
for mb_valid, tgts_valid in self.dl:
mb_valid = mb_valid.to(self.device)
tgts_valid = tgts_valid.to(torch.device("cpu"))
out_valid = m(mb_valid)
out_valid = out_valid.to(torch.device("cpu"))
if "margin" in self.loss_criterion:
out_valid = F.sigmoid(out_valid)
if self.loss_criterion == "margin":
tgts_valid = tgts_valid.long()
loss_valid = self.criterion(out_valid, tgts_valid)
running_validation_loss += [loss_valid.item()]
if "margin" not in self.loss_criterion:
out_valid = F.sigmoid(out_valid)
if self.loss_criterion == "crossentropy":
out_pred = out_valid.max(1)[1]
acc = (out_pred == tgts_valid).sum().item() / tgts_valid.size(0)
else:
acc = 0.
num_out = out_valid.size(0)
for ov, tgt in zip(out_valid, tgts_valid):
tgt = torch.LongTensor([i for i, x in enumerate(tgt) if x == 1])
num_tgt = tgt.size(0)
ov = torch.topk(ov, num_tgt)[1]
correct = len(np.intersect1d(tgt.numpy(), ov.numpy()))
acc += (correct / num_tgt) / num_out
accuracies.append(acc)
t.set_postfix({"acc": acc, "loss": "{0:.6f}".format(running_validation_loss[-1])})
t.update()
#correct += (out_valid.detach().max(1)[1] == tgts_valid.detach()).sum()
elif "attn" in self.model_name:
encoder = self.model_list[0]
decoder = self.model_list[1]
# set model(s) into eval mode
encoder.eval()
decoder.eval()
with tqdm(total=num_batches, leave=True, position=2,
postfix={"acc": acc, "loss": "{0:.6f}".format(0.)}) as t:
for i, ((mb_valid, lengths), tgts_valid) in enumerate(self.dl):
# set model into train mode and clear gradients
# move inputs to cuda if required
mb_valid = mb_valid.to(self.device)
tgts_valid = tgts_valid.to(torch.device("cpu"))
# init hidden before packing
encoder_hidden = encoder.initHidden(mb_valid)
# set inputs and targets
mb_valid = pack(mb_valid, lengths, batch_first=True)
#print(mb.size(), tgts.size())
encoder_output, encoder_hidden = encoder(mb_valid, encoder_hidden)
#print(encoder_output.detach().new(dec_size).size())
#enc_out_var, enc_out_len = unpack(encoder_output, batch_first=True)
#dec_i = enc_out_var.new_zeros((self.batch_size, 1, encoder.hidden_size))
dec_h = encoder_hidden # Use last (forward) hidden state from encoder
#print(decoder.n_layers, encoder_hidden.size(), dec_i.size(), dec_h.size())
# run through decoder in one shot
mb_valid, _ = unpack(mb_valid, batch_first=True)
out_valid, dec_h, dec_attn = decoder(mb_valid, dec_h, encoder_output)
# calculate loss
out_valid = out_valid.to(torch.device("cpu"))
out_valid.squeeze_()
if "margin" in self.loss_criterion:
out_valid = F.sigmoid(out_valid)
if self.loss_criterion == "margin":
tgts_valid = tgts_valid.long()
loss_valid = self.criterion(out_valid, tgts_valid)
running_validation_loss += [loss_valid.item()]
if "margin" not in self.loss_criterion:
out_valid = F.sigmoid(out_valid)
if self.loss_criterion == "crossentropy":
out_pred = out_valid.max(1)[1]
acc = (out_pred == tgts_valid).sum().item() / tgts_valid.size(0)
else:
acc = 0.
num_out = out_valid.size(0)
for ov, tgt in zip(out_valid, tgts_valid):
tgt = torch.LongTensor([i for i, x in enumerate(tgt) if x == 1])
num_tgt = tgt.size(0)
ov = torch.topk(ov, num_tgt)[1]
correct = len(np.intersect1d(tgt.numpy(), ov.numpy()))
acc += (correct / num_tgt) / num_out
accuracies.append(acc)
t.set_postfix({"acc": acc, "loss": "{0:.6f}".format(running_validation_loss[-1])})
t.update()
#correct += (dec_o.detach().max(1)[1] == tgts.detach()).sum()
elif "bytenet" in self.model_name:
encoder = self.model_list[0]
decoder = self.model_list[1]
# set model(s) into eval mode
encoder.eval()
decoder.eval()
with tqdm(total=num_batches, leave=True, position=2,
postfix={"acc": acc, "loss": "{0:.6f}".format(0.)}) as t:
for i, (mb_valid, tgts_valid) in enumerate(self.dl):
# set inputs and targets
mb_valid, tgts_valid = mb_valid.to(self.device), tgts_valid.to(torch.device("cpu"))
mb_valid = encoder(mb_valid)
out_valid = decoder(mb_valid)
if "margin" in self.loss_criterion:
out_valid = F.sigmoid(out_valid)
if self.loss_criterion == "margin":
tgts_valid = tgts_valid.long()
out_valid = out_valid.to(torch.device("cpu"))
loss_valid = self.criterion(out_valid, tgts_valid)
running_validation_loss += [loss_valid.item()]
if "margin" not in self.loss_criterion:
out_valid = F.sigmoid(out_valid)
if self.loss_criterion == "crossentropy":
out_pred = out_valid.max(1)[1]
acc = (out_pred == tgts_valid).sum().item() / tgts_valid.size(0)
else:
acc = 0.
num_out = out_valid.size(0)
for ov, tgt in zip(out_valid, tgts_valid):
tgt = torch.LongTensor([i for i, x in enumerate(tgt) if x == 1])
num_tgt = tgt.size(0)
ov = torch.topk(ov, num_tgt)[1]
correct = len(np.intersect1d(tgt.numpy(), ov.numpy()))
acc += (correct / num_tgt) / num_out
accuracies.append(acc)
t.set_postfix({"acc": acc, "loss": "{0:.6f}".format(running_validation_loss[-1])})
t.update()
#correct += (dec_o.detach().max(1)[1] == tgts.detach()).sum()
self.valid_losses.append((running_validation_loss, accuracies))
def test(self):
self.ds.set_split("test", self.args.num_samples)
thresh = 1. / 50.
prec = 0.
reca = 0.
acc = 0.
num_batches = len(self.dl)
num_labels = len(self.ds.labels_dict)
infer_outputs = []
counter_array = np.zeros((num_labels, 6)) # tgts, preds, tp, fp, tn, fn
if any(x in self.model_name for x in ["resnet", "squeezenet"]):
m = self.model_list[0]
# set model(s) into eval mode
m.eval()
with tqdm(total=num_batches, leave=False, position=1,
postfix={"accuracy": acc, "precision": prec}) as t:
for mb, tgts in self.dl:
mb = mb.to(self.device)
tgts = tgts.to(torch.device("cpu"))
# run inference
out = m(mb)
# move output to cpu for analysis / numpy
out = out.to(torch.device("cpu"))
infer_outputs.append((out.numpy().tolist(), tgts.numpy().tolist()))
if self.loss_criterion == "crossentropy":
out = F.softmax(out, dim = 1)
else:
out = F.sigmoid(out)
#out = F.softmax(out, dim = 1)
# out is either size (N, C) or (N, )
for tgt, o in zip(tgts, out):
o_mask = torch.zeros_like(o)
o_mask[torch.topk(o, tgt.sum().int().item())[1]] = 1.
o_mask = o_mask.numpy()
o_mask = o_mask.astype(np.bool)
tgt = tgt.numpy()
tgt_mask = tgt == 1.
counter_array[tgt_mask, 0] += 1
#print(o_mask); break;
counter_array[o_mask, 1] += 1
tp =
|
np.logical_and(tgt_mask==True, o_mask==True)
|
numpy.logical_and
|
'''
Created on Mar 11, 2017
@author: hans-werner
'''
import unittest
# Internal libraries
from assembler import Assembler
from fem import QuadFE
from fem import DofHandler
from function import Nodal
from gmrf import GMRF
from gmrf import distance
from gmrf import CovKernel
from gmrf import Covariance
from mesh import convert_to_array
from mesh import Mesh1D
from mesh import QuadMesh
# Buit-in libraries
import numpy as np
import scipy.sparse as sp
import scipy.sparse.linalg as spla
from sksparse.cholmod import cholesky # @UnresolvedImport
import matplotlib.pyplot as plt
from plot import Plot
def laplacian_precision(n, sparse=True):
"""
Return the laplace precision matrix
"""
a = np.array([1] + [2]*(n-2) + [1])
b = np.array([-1]*(n-1))
Q =
|
np.diag(a,0)
|
numpy.diag
|
import centrosome.filter
import numpy
import six.moves
from cellprofiler_core.constants.measurement import (
GROUP_NUMBER,
GROUP_INDEX,
R_FIRST_IMAGE_NUMBER,
R_SECOND_IMAGE_NUMBER,
R_FIRST_OBJECT_NUMBER,
R_SECOND_OBJECT_NUMBER,
C_COUNT,
MCA_AVAILABLE_POST_GROUP,
M_LOCATION_CENTER_X,
M_LOCATION_CENTER_Y,
)
from cellprofiler_core.image import ImageSetList
import cellprofiler_core.measurement
from cellprofiler_core.object import ObjectSet, Objects
import cellprofiler.modules.trackobjects
import tests.modules
from cellprofiler_core.pipeline import Pipeline, LoadException, RunException
from cellprofiler_core.workspace import Workspace
OBJECT_NAME = "objects"
def test_load_v3():
file = tests.modules.get_test_resources_directory("trackobjects/v3.pipeline")
with open(file, "r") as fd:
data = fd.read()
pipeline = Pipeline()
def callback(caller, event):
assert not isinstance(event, LoadException)
pipeline.add_listener(callback)
pipeline.load(six.moves.StringIO(data))
module = pipeline.modules()[0]
assert isinstance(module, cellprofiler.modules.trackobjects.TrackObjects)
assert module.tracking_method == "LAP"
assert module.object_name.value == "Nuclei"
assert module.pixel_radius.value == 80
assert module.display_type.value == "Color and Number"
assert not module.wants_image
assert module.measurement == "AreaShape_Area"
assert module.image_name == "TrackedCells"
assert module.wants_second_phase
assert module.split_cost == 41
assert module.merge_cost == 42
assert module.max_gap_score == 53
assert module.max_split_score == 54
assert module.max_merge_score == 55
assert module.max_frame_distance == 6
def test_load_v4():
file = tests.modules.get_test_resources_directory("trackobjects/v4.pipeline")
with open(file, "r") as fd:
data = fd.read()
pipeline = Pipeline()
def callback(caller, event):
assert not isinstance(event, LoadException)
pipeline.add_listener(callback)
pipeline.load(six.moves.StringIO(data))
assert len(pipeline.modules()) == 3
for module, tracking_method, model, save_img, phase2, meas, dop in zip(
pipeline.modules(),
("Measurements", "Overlap", "Distance"),
(
cellprofiler.modules.trackobjects.M_BOTH,
cellprofiler.modules.trackobjects.M_RANDOM,
cellprofiler.modules.trackobjects.M_VELOCITY,
),
(True, False, True),
(True, False, True),
("Slothfulness", "Prescience", "Trepidation"),
(
cellprofiler.modules.trackobjects.DT_COLOR_AND_NUMBER,
cellprofiler.modules.trackobjects.DT_COLOR_ONLY,
cellprofiler.modules.trackobjects.DT_COLOR_AND_NUMBER,
),
):
assert isinstance(module, cellprofiler.modules.trackobjects.TrackObjects)
assert module.tracking_method == tracking_method
assert module.model == model
assert module.wants_image.value == save_img
assert module.wants_second_phase.value == phase2
assert module.measurement == meas
assert module.pixel_radius == 50
assert module.display_type == dop
assert module.image_name == "TrackByLAP"
assert module.radius_std == 3
assert module.radius_limit.min == 3.0
assert module.radius_limit.max == 10.0
assert module.gap_cost == 40
assert module.split_cost == 1
assert module.merge_cost == 1
assert module.max_gap_score == 51
assert module.max_split_score == 52
assert module.max_merge_score == 53
assert module.max_frame_distance == 4
def test_load_v5():
file = tests.modules.get_test_resources_directory("trackobjects/v5.pipeline")
with open(file, "r") as fd:
data = fd.read()
pipeline = Pipeline()
def callback(caller, event):
assert not isinstance(event, LoadException)
pipeline.add_listener(callback)
pipeline.load(six.moves.StringIO(data))
assert len(pipeline.modules()) == 1
m = pipeline.modules()[0]
assert isinstance(m, cellprofiler.modules.trackobjects.TrackObjects)
assert m.tracking_method == "LAP"
assert m.object_name == "Turtles"
assert m.measurement == "Steadiness"
assert m.pixel_radius == 44
assert m.display_type == cellprofiler.modules.trackobjects.DT_COLOR_AND_NUMBER
assert not m.wants_image
assert m.image_name == "TrackedTurtles"
assert m.model == cellprofiler.modules.trackobjects.M_BOTH
assert m.radius_std == 3
assert m.radius_limit.min == 3
assert m.radius_limit.max == 11
assert m.wants_second_phase
assert m.gap_cost == 39
assert m.split_cost == 41
assert m.merge_cost == 42
assert m.max_frame_distance == 8
assert m.wants_minimum_lifetime
assert m.min_lifetime == 2
assert not m.wants_maximum_lifetime
assert m.max_lifetime == 1000
def test_load_v6():
file = tests.modules.get_test_resources_directory("trackobjects/v6.pipeline")
with open(file, "r") as fd:
data = fd.read()
pipeline = Pipeline()
def callback(caller, event):
assert not isinstance(event, LoadException)
pipeline.add_listener(callback)
pipeline.load(six.moves.StringIO(data))
assert len(pipeline.modules()) == 1
m = pipeline.modules()[0]
assert isinstance(m, cellprofiler.modules.trackobjects.TrackObjects)
assert m.tracking_method == "LAP"
assert m.object_name == "Turtles"
assert m.measurement == "Steadiness"
assert m.pixel_radius == 44
assert m.display_type == cellprofiler.modules.trackobjects.DT_COLOR_AND_NUMBER
assert not m.wants_image
assert m.image_name == "TrackedTurtles"
assert m.model == cellprofiler.modules.trackobjects.M_BOTH
assert m.radius_std == 3
assert m.radius_limit.min == 3
assert m.radius_limit.max == 11
assert m.wants_second_phase
assert m.gap_cost == 39
assert m.split_cost == 41
assert m.merge_cost == 42
assert m.max_frame_distance == 8
assert m.wants_minimum_lifetime
assert m.min_lifetime == 2
assert not m.wants_maximum_lifetime
assert m.max_lifetime == 1000
assert m.mitosis_cost == 79
assert m.mitosis_max_distance == 41
def runTrackObjects(labels_list, fn=None, measurement=None):
"""Run two cycles of TrackObjects
labels1 - the labels matrix for the first cycle
labels2 - the labels matrix for the second cycle
fn - a callback function called with the module and workspace. It has
the signature, fn(module, workspace, n) where n is 0 when
called prior to prepare_run, 1 prior to first iteration
and 2 prior to second iteration.
returns the measurements
"""
module = cellprofiler.modules.trackobjects.TrackObjects()
module.set_module_num(1)
module.object_name.value = OBJECT_NAME
module.pixel_radius.value = 50
module.measurement.value = "measurement"
measurements = cellprofiler_core.measurement.Measurements()
measurements.add_all_measurements(
"Image", GROUP_NUMBER, [1] * len(labels_list),
)
measurements.add_all_measurements(
"Image", GROUP_INDEX, list(range(1, len(labels_list) + 1)),
)
pipeline = Pipeline()
pipeline.add_module(module)
image_set_list = ImageSetList()
if fn:
fn(module, None, 0)
module.prepare_run(
Workspace(pipeline, module, None, None, measurements, image_set_list)
)
first = True
for labels, index in zip(labels_list, list(range(len(labels_list)))):
object_set = ObjectSet()
objects = Objects()
objects.segmented = labels
object_set.add_objects(objects, OBJECT_NAME)
image_set = image_set_list.get_image_set(index)
if first:
first = False
else:
measurements.next_image_set()
if measurement is not None:
measurements.add_measurement(
OBJECT_NAME, "measurement", numpy.array(measurement[index])
)
workspace = Workspace(
pipeline, module, image_set, object_set, measurements, image_set_list
)
if fn:
fn(module, workspace, index + 1)
module.run(workspace)
return measurements
def test_track_nothing():
"""Run TrackObjects on an empty labels matrix"""
columns = []
def fn(module, workspace, index, columns=columns):
if workspace is not None and index == 0:
columns += module.get_measurement_columns(workspace.pipeline)
measurements = runTrackObjects(
(numpy.zeros((10, 10), int), numpy.zeros((10, 10), int)), fn
)
features = [
feature
for feature in measurements.get_feature_names(OBJECT_NAME)
if feature.startswith(cellprofiler.modules.trackobjects.F_PREFIX)
]
assert all(
[column[1] in features for column in columns if column[0] == OBJECT_NAME]
)
for feature in cellprofiler.modules.trackobjects.F_ALL:
name = "_".join((cellprofiler.modules.trackobjects.F_PREFIX, feature, "50"))
assert name in features
value = measurements.get_current_measurement(OBJECT_NAME, name)
assert len(value) == 0
features = [
feature
for feature in measurements.get_feature_names("Image")
if feature.startswith(cellprofiler.modules.trackobjects.F_PREFIX)
]
assert all([column[1] in features for column in columns if column[0] == "Image"])
for feature in cellprofiler.modules.trackobjects.F_IMAGE_ALL:
name = "_".join(
(cellprofiler.modules.trackobjects.F_PREFIX, feature, OBJECT_NAME, "50")
)
assert name in features
value = measurements.get_current_image_measurement(name)
assert value == 0
def test_00_track_one_then_nothing():
"""Run track objects on an object that disappears
Regression test of IMG-1090
"""
labels = numpy.zeros((10, 10), int)
labels[3:6, 2:7] = 1
measurements = runTrackObjects((labels, numpy.zeros((10, 10), int)))
feature = "_".join(
(
cellprofiler.modules.trackobjects.F_PREFIX,
cellprofiler.modules.trackobjects.F_LOST_OBJECT_COUNT,
OBJECT_NAME,
"50",
)
)
value = measurements.get_current_image_measurement(feature)
assert value == 1
def test_track_one_distance():
"""Track an object that doesn't move using distance"""
labels = numpy.zeros((10, 10), int)
labels[3:6, 2:7] = 1
def fn(module, workspace, idx):
if idx == 0:
module.pixel_radius.value = 1
module.tracking_method.value = "Distance"
measurements = runTrackObjects((labels, labels), fn)
def m(feature):
name = "_".join((cellprofiler.modules.trackobjects.F_PREFIX, feature, "1"))
values = measurements.get_current_measurement(OBJECT_NAME, name)
assert len(values) == 1
return values[0]
assert round(abs(m(cellprofiler.modules.trackobjects.F_TRAJECTORY_X) - 0), 7) == 0
assert round(abs(m(cellprofiler.modules.trackobjects.F_TRAJECTORY_Y) - 0), 7) == 0
assert (
round(abs(m(cellprofiler.modules.trackobjects.F_DISTANCE_TRAVELED) - 0), 7) == 0
)
assert (
round(abs(m(cellprofiler.modules.trackobjects.F_INTEGRATED_DISTANCE) - 0), 7)
== 0
)
assert m(cellprofiler.modules.trackobjects.F_LABEL) == 1
assert m(cellprofiler.modules.trackobjects.F_PARENT_OBJECT_NUMBER) == 1
assert m(cellprofiler.modules.trackobjects.F_PARENT_IMAGE_NUMBER) == 1
assert m(cellprofiler.modules.trackobjects.F_LIFETIME) == 2
def m(feature):
name = "_".join(
(cellprofiler.modules.trackobjects.F_PREFIX, feature, OBJECT_NAME, "1")
)
return measurements.get_current_image_measurement(name)
assert m(cellprofiler.modules.trackobjects.F_NEW_OBJECT_COUNT) == 0
assert m(cellprofiler.modules.trackobjects.F_LOST_OBJECT_COUNT) == 0
assert m(cellprofiler.modules.trackobjects.F_SPLIT_COUNT) == 0
assert m(cellprofiler.modules.trackobjects.F_MERGE_COUNT) == 0
check_relationships(measurements, [1], [1], [2], [1])
def test_track_one_moving():
"""Track an object that moves"""
labels_list = []
distance = 0
last_i, last_j = (0, 0)
for i_off, j_off in ((0, 0), (2, 0), (2, 1), (0, 1)):
distance = i_off - last_i + j_off - last_j
last_i, last_j = (i_off, j_off)
labels = numpy.zeros((10, 10), int)
labels[4 + i_off : 7 + i_off, 4 + j_off : 7 + j_off] = 1
labels_list.append(labels)
def fn(module, workspace, idx):
if idx == 0:
module.pixel_radius.value = 3
module.tracking_method.value = "Distance"
measurements = runTrackObjects(labels_list, fn)
def m(feature, expected):
name = "_".join((cellprofiler.modules.trackobjects.F_PREFIX, feature, "3"))
value_set = measurements.get_all_measurements(OBJECT_NAME, name)
assert len(expected) == len(value_set)
for values, x in zip(value_set, expected):
assert len(values) == 1
assert round(abs(values[0] - x), 7) == 0
m(cellprofiler.modules.trackobjects.F_TRAJECTORY_X, [0, 0, 1, 0])
m(cellprofiler.modules.trackobjects.F_TRAJECTORY_Y, [0, 2, 0, -2])
m(cellprofiler.modules.trackobjects.F_DISTANCE_TRAVELED, [0, 2, 1, 2])
m(cellprofiler.modules.trackobjects.F_INTEGRATED_DISTANCE, [0, 2, 3, 5])
m(cellprofiler.modules.trackobjects.F_LABEL, [1, 1, 1, 1])
m(cellprofiler.modules.trackobjects.F_LIFETIME, [1, 2, 3, 4])
m(
cellprofiler.modules.trackobjects.F_LINEARITY,
[1, 1, numpy.sqrt(5) / 3, 1.0 / 5.0],
)
def m(feature):
name = "_".join(
(cellprofiler.modules.trackobjects.F_PREFIX, feature, OBJECT_NAME, "3")
)
return measurements.get_current_image_measurement(name)
assert m(cellprofiler.modules.trackobjects.F_NEW_OBJECT_COUNT) == 0
assert m(cellprofiler.modules.trackobjects.F_LOST_OBJECT_COUNT) == 0
assert m(cellprofiler.modules.trackobjects.F_SPLIT_COUNT) == 0
assert m(cellprofiler.modules.trackobjects.F_MERGE_COUNT) == 0
image_numbers = numpy.arange(1, len(labels_list) + 1)
object_numbers = numpy.ones(len(image_numbers))
check_relationships(
measurements,
image_numbers[:-1],
object_numbers[:-1],
image_numbers[1:],
object_numbers[1:],
)
def test_track_split():
"""Track an object that splits"""
labels1 = numpy.zeros((11, 9), int)
labels1[1:10, 1:8] = 1
labels2 = numpy.zeros((10, 10), int)
labels2[1:6, 1:8] = 1
labels2[6:10, 1:8] = 2
def fn(module, workspace, idx):
if idx == 0:
module.pixel_radius.value = 5
module.tracking_method.value = "Distance"
measurements = runTrackObjects((labels1, labels2, labels2), fn)
def m(feature, idx):
name = "_".join((cellprofiler.modules.trackobjects.F_PREFIX, feature, "5"))
values = measurements.get_measurement(OBJECT_NAME, name, idx + 1)
assert len(values) == 2
return values
labels = m(cellprofiler.modules.trackobjects.F_LABEL, 2)
assert len(labels) == 2
assert numpy.all(labels == 1)
parents = m(cellprofiler.modules.trackobjects.F_PARENT_OBJECT_NUMBER, 1)
assert numpy.all(parents == 1)
assert numpy.all(m(cellprofiler.modules.trackobjects.F_PARENT_IMAGE_NUMBER, 1) == 1)
parents = m(cellprofiler.modules.trackobjects.F_PARENT_OBJECT_NUMBER, 2)
assert numpy.all(parents == numpy.array([1, 2]))
assert numpy.all(m(cellprofiler.modules.trackobjects.F_PARENT_IMAGE_NUMBER, 2) == 2)
def m(feature):
name = "_".join(
(cellprofiler.modules.trackobjects.F_PREFIX, feature, OBJECT_NAME, "5")
)
return measurements.get_all_measurements("Image", name)[1]
assert m(cellprofiler.modules.trackobjects.F_NEW_OBJECT_COUNT) == 0
assert m(cellprofiler.modules.trackobjects.F_LOST_OBJECT_COUNT) == 0
assert m(cellprofiler.modules.trackobjects.F_SPLIT_COUNT) == 1
assert m(cellprofiler.modules.trackobjects.F_MERGE_COUNT) == 0
check_relationships(
measurements, [1, 1, 2, 2], [1, 1, 1, 2], [2, 2, 3, 3], [1, 2, 1, 2]
)
def test_track_negative():
"""Track unrelated objects"""
labels1 = numpy.zeros((10, 10), int)
labels1[1:5, 1:5] = 1
labels2 = numpy.zeros((10, 10), int)
labels2[6:9, 6:9] = 1
def fn(module, workspace, idx):
if idx == 0:
module.pixel_radius.value = 1
module.tracking_method.value = "Distance"
measurements = runTrackObjects((labels1, labels2), fn)
def m(feature):
name = "_".join((cellprofiler.modules.trackobjects.F_PREFIX, feature, "1"))
values = measurements.get_current_measurement(OBJECT_NAME, name)
assert len(values) == 1
return values[0]
assert m(cellprofiler.modules.trackobjects.F_LABEL) == 2
assert m(cellprofiler.modules.trackobjects.F_PARENT_OBJECT_NUMBER) == 0
def m(feature):
name = "_".join(
(cellprofiler.modules.trackobjects.F_PREFIX, feature, OBJECT_NAME, "1")
)
return measurements.get_current_image_measurement(name)
assert m(cellprofiler.modules.trackobjects.F_NEW_OBJECT_COUNT) == 1
assert m(cellprofiler.modules.trackobjects.F_LOST_OBJECT_COUNT) == 1
assert m(cellprofiler.modules.trackobjects.F_SPLIT_COUNT) == 0
assert m(cellprofiler.modules.trackobjects.F_MERGE_COUNT) == 0
def test_track_ambiguous():
"""Track disambiguation from among two possible parents"""
labels1 = numpy.zeros((20, 20), int)
labels1[1:4, 1:4] = 1
labels1[16:19, 16:19] = 2
labels2 = numpy.zeros((20, 20), int)
labels2[10:15, 10:15] = 1
def fn(module, workspace, idx):
if idx == 0:
module.pixel_radius.value = 20
module.tracking_method.value = "Distance"
measurements = runTrackObjects((labels1, labels2), fn)
def m(feature):
name = "_".join((cellprofiler.modules.trackobjects.F_PREFIX, feature, "20"))
values = measurements.get_current_measurement(OBJECT_NAME, name)
assert len(values) == 1
return values[0]
assert m(cellprofiler.modules.trackobjects.F_LABEL) == 2
assert m(cellprofiler.modules.trackobjects.F_PARENT_OBJECT_NUMBER) == 2
def test_overlap_positive():
"""Track overlapping objects"""
labels1 = numpy.zeros((10, 10), int)
labels1[3:6, 4:7] = 1
labels2 = numpy.zeros((10, 10), int)
labels2[4:7, 5:9] = 1
def fn(module, workspace, idx):
if idx == 0:
module.pixel_radius.value = 2
module.tracking_method.value = "Overlap"
measurements = runTrackObjects((labels1, labels2), fn)
def m(feature):
name = "_".join((cellprofiler.modules.trackobjects.F_PREFIX, feature, "2"))
values = measurements.get_current_measurement(OBJECT_NAME, name)
assert len(values) == 1
return values[0]
assert m(cellprofiler.modules.trackobjects.F_LABEL) == 1
assert m(cellprofiler.modules.trackobjects.F_PARENT_OBJECT_NUMBER) == 1
def test_overlap_negative():
"""Track objects that don't overlap"""
labels1 = numpy.zeros((20, 20), int)
labels1[3:6, 4:7] = 1
labels2 = numpy.zeros((20, 20), int)
labels2[14:17, 15:19] = 1
def fn(module, workspace, idx):
if idx == 0:
module.pixel_radius.value = 2
module.tracking_method.value = "Overlap"
measurements = runTrackObjects((labels1, labels2), fn)
def m(feature):
name = "_".join((cellprofiler.modules.trackobjects.F_PREFIX, feature, "2"))
values = measurements.get_current_measurement(OBJECT_NAME, name)
assert len(values) == 1
return values[0]
assert m(cellprofiler.modules.trackobjects.F_LABEL) == 2
assert m(cellprofiler.modules.trackobjects.F_PARENT_OBJECT_NUMBER) == 0
def test_overlap_ambiguous():
"""Track an object that overlaps two parents"""
labels1 = numpy.zeros((20, 20), int)
labels1[1:5, 1:5] = 1
labels1[15:19, 15:19] = 2
labels2 = numpy.zeros((20, 20), int)
labels2[4:18, 4:18] = 1
def fn(module, workspace, idx):
if idx == 0:
module.pixel_radius.value = 2
module.tracking_method.value = "Overlap"
measurements = runTrackObjects((labels1, labels2), fn)
def m(feature):
name = "_".join((cellprofiler.modules.trackobjects.F_PREFIX, feature, "2"))
values = measurements.get_current_measurement(OBJECT_NAME, name)
assert len(values) == 1
return values[0]
assert m(cellprofiler.modules.trackobjects.F_LABEL) == 2
assert m(cellprofiler.modules.trackobjects.F_PARENT_OBJECT_NUMBER) == 2
def test_measurement_positive():
"""Test tracking an object by measurement"""
labels1 = numpy.zeros((10, 10), int)
labels1[3:6, 4:7] = 1
labels2 = numpy.zeros((10, 10), int)
labels2[4:7, 5:9] = 1
def fn(module, workspace, idx):
if idx == 0:
module.pixel_radius.value = 2
module.tracking_method.value = "Measurements"
measurements = runTrackObjects((labels1, labels2), fn, [[1], [1]])
def m(feature):
name = "_".join((cellprofiler.modules.trackobjects.F_PREFIX, feature, "2"))
values = measurements.get_current_measurement(OBJECT_NAME, name)
assert len(values) == 1
return values[0]
assert m(cellprofiler.modules.trackobjects.F_LABEL) == 1
assert m(cellprofiler.modules.trackobjects.F_PARENT_OBJECT_NUMBER) == 1
def test_measurement_negative():
"""Test tracking with too great a jump between successive images"""
labels1 = numpy.zeros((20, 20), int)
labels1[3:6, 4:7] = 1
labels2 = numpy.zeros((20, 20), int)
labels2[14:17, 15:19] = 1
def fn(module, workspace, idx):
if idx == 0:
module.pixel_radius.value = 2
module.tracking_method.value = "Measurements"
measurements = runTrackObjects((labels1, labels2), fn, [[1], [1]])
def m(feature):
name = "_".join((cellprofiler.modules.trackobjects.F_PREFIX, feature, "2"))
values = measurements.get_current_measurement(OBJECT_NAME, name)
assert len(values) == 1
return values[0]
assert m(cellprofiler.modules.trackobjects.F_LABEL) == 2
assert m(cellprofiler.modules.trackobjects.F_PARENT_OBJECT_NUMBER) == 0
def test_ambiguous():
"""Test measurement with ambiguous parent choice"""
labels1 = numpy.zeros((20, 20), int)
labels1[1:5, 1:5] = 1
labels1[15:19, 15:19] = 2
labels2 = numpy.zeros((20, 20), int)
labels2[6:14, 6:14] = 1
def fn(module, workspace, idx):
if idx == 0:
module.pixel_radius.value = 4
module.tracking_method.value = "Measurements"
measurements = runTrackObjects((labels1, labels2), fn, [[1, 10], [9]])
def m(feature):
name = "_".join((cellprofiler.modules.trackobjects.F_PREFIX, feature, "4"))
values = measurements.get_current_measurement(OBJECT_NAME, name)
assert len(values) == 1
return values[0]
assert m(cellprofiler.modules.trackobjects.F_LABEL) == 2
assert m(cellprofiler.modules.trackobjects.F_PARENT_OBJECT_NUMBER) == 2
def test_cross_numbered_objects():
"""Test labeling when object 1 in one image becomes object 2 in next"""
i, j = numpy.mgrid[0:10, 0:20]
labels = (i > 5) + (j > 10) * 2
pp = numpy.array(list(centrosome.filter.permutations([1, 2, 3, 4])))
def fn(module, workspace, idx):
if idx == 0:
module.tracking_method.value = "LAP"
measurements = runTrackObjects([numpy.array(p)[labels] for p in pp], fn)
def m(feature, i):
name = "_".join((cellprofiler.modules.trackobjects.F_PREFIX, feature))
values = measurements[OBJECT_NAME, name, i + 1]
assert len(values) == 4
return values
for i, p in enumerate(pp):
l = m(cellprofiler.modules.trackobjects.F_LABEL, i)
numpy.testing.assert_array_equal(numpy.arange(1, 5), p[l - 1])
if i > 0:
p_prev = pp[i - 1]
order = numpy.lexsort([p])
expected_po = p_prev[order]
po = m(cellprofiler.modules.trackobjects.F_PARENT_OBJECT_NUMBER, i)
numpy.testing.assert_array_equal(po, expected_po)
pi = m(cellprofiler.modules.trackobjects.F_PARENT_IMAGE_NUMBER, i)
numpy.testing.assert_array_equal(pi, i)
image_numbers, _ = numpy.mgrid[1 : (len(pp) + 1), 0:4]
check_relationships(
measurements,
image_numbers[:-1, :].flatten(),
pp[:-1, :].flatten(),
image_numbers[1:, :].flatten(),
pp[1:, :].flatten(),
)
def test_measurement_columns():
"""Test get_measurement_columns function"""
module = cellprofiler.modules.trackobjects.TrackObjects()
module.object_name.value = OBJECT_NAME
module.tracking_method.value = "Distance"
module.pixel_radius.value = 10
columns = module.get_measurement_columns(None)
assert len(columns) == len(cellprofiler.modules.trackobjects.F_ALL) + len(
cellprofiler.modules.trackobjects.F_IMAGE_ALL
)
for object_name, features in (
(OBJECT_NAME, cellprofiler.modules.trackobjects.F_ALL),
("Image", cellprofiler.modules.trackobjects.F_IMAGE_ALL,),
):
for feature in features:
if object_name == OBJECT_NAME:
name = "_".join(
(cellprofiler.modules.trackobjects.F_PREFIX, feature, "10")
)
else:
name = "_".join(
(
cellprofiler.modules.trackobjects.F_PREFIX,
feature,
OBJECT_NAME,
"10",
)
)
index = [column[1] for column in columns].index(name)
assert index != -1
column = columns[index]
assert column[0] == object_name
def test_measurement_columns_lap():
"""Test get_measurement_columns function for LAP"""
module = cellprofiler.modules.trackobjects.TrackObjects()
module.object_name.value = OBJECT_NAME
module.tracking_method.value = "LAP"
module.model.value = cellprofiler.modules.trackobjects.M_BOTH
second_phase = [
cellprofiler.modules.trackobjects.F_LINKING_DISTANCE,
cellprofiler.modules.trackobjects.F_MOVEMENT_MODEL,
]
for wants in (True, False):
module.wants_second_phase.value = wants
columns = module.get_measurement_columns(None)
# 2, 2, 4 for the static model
# 4, 4, 16 for the velocity model
other_features = [
cellprofiler.modules.trackobjects.F_AREA,
cellprofiler.modules.trackobjects.F_LINKING_DISTANCE,
cellprofiler.modules.trackobjects.F_LINK_TYPE,
cellprofiler.modules.trackobjects.F_MOVEMENT_MODEL,
cellprofiler.modules.trackobjects.F_STANDARD_DEVIATION,
]
if wants:
other_features += [
cellprofiler.modules.trackobjects.F_GAP_LENGTH,
cellprofiler.modules.trackobjects.F_GAP_SCORE,
cellprofiler.modules.trackobjects.F_MERGE_SCORE,
cellprofiler.modules.trackobjects.F_SPLIT_SCORE,
cellprofiler.modules.trackobjects.F_MITOSIS_SCORE,
]
assert (
len(columns)
== len(cellprofiler.modules.trackobjects.F_ALL)
+ len(cellprofiler.modules.trackobjects.F_IMAGE_ALL)
+ len(other_features)
+ 2
+ 2
+ 4
+ 4
+ 4
+ 16
)
kalman_features = [
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_STATIC_MODEL,
cellprofiler.modules.trackobjects.F_STATE,
cellprofiler.modules.trackobjects.F_X,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_STATIC_MODEL,
cellprofiler.modules.trackobjects.F_STATE,
cellprofiler.modules.trackobjects.F_Y,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_VELOCITY_MODEL,
cellprofiler.modules.trackobjects.F_STATE,
cellprofiler.modules.trackobjects.F_X,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_VELOCITY_MODEL,
cellprofiler.modules.trackobjects.F_STATE,
cellprofiler.modules.trackobjects.F_Y,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_VELOCITY_MODEL,
cellprofiler.modules.trackobjects.F_STATE,
cellprofiler.modules.trackobjects.F_VX,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_VELOCITY_MODEL,
cellprofiler.modules.trackobjects.F_STATE,
cellprofiler.modules.trackobjects.F_VY,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_STATIC_MODEL,
cellprofiler.modules.trackobjects.F_NOISE,
cellprofiler.modules.trackobjects.F_X,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_STATIC_MODEL,
cellprofiler.modules.trackobjects.F_NOISE,
cellprofiler.modules.trackobjects.F_Y,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_VELOCITY_MODEL,
cellprofiler.modules.trackobjects.F_NOISE,
cellprofiler.modules.trackobjects.F_X,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_VELOCITY_MODEL,
cellprofiler.modules.trackobjects.F_NOISE,
cellprofiler.modules.trackobjects.F_Y,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_VELOCITY_MODEL,
cellprofiler.modules.trackobjects.F_NOISE,
cellprofiler.modules.trackobjects.F_VX,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_VELOCITY_MODEL,
cellprofiler.modules.trackobjects.F_NOISE,
cellprofiler.modules.trackobjects.F_VY,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_STATIC_MODEL,
cellprofiler.modules.trackobjects.F_COV,
cellprofiler.modules.trackobjects.F_X,
cellprofiler.modules.trackobjects.F_X,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_STATIC_MODEL,
cellprofiler.modules.trackobjects.F_COV,
cellprofiler.modules.trackobjects.F_X,
cellprofiler.modules.trackobjects.F_Y,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_STATIC_MODEL,
cellprofiler.modules.trackobjects.F_COV,
cellprofiler.modules.trackobjects.F_Y,
cellprofiler.modules.trackobjects.F_X,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_STATIC_MODEL,
cellprofiler.modules.trackobjects.F_COV,
cellprofiler.modules.trackobjects.F_X,
cellprofiler.modules.trackobjects.F_Y,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_VELOCITY_MODEL,
cellprofiler.modules.trackobjects.F_COV,
cellprofiler.modules.trackobjects.F_X,
cellprofiler.modules.trackobjects.F_X,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_VELOCITY_MODEL,
cellprofiler.modules.trackobjects.F_COV,
cellprofiler.modules.trackobjects.F_X,
cellprofiler.modules.trackobjects.F_Y,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_VELOCITY_MODEL,
cellprofiler.modules.trackobjects.F_COV,
cellprofiler.modules.trackobjects.F_X,
cellprofiler.modules.trackobjects.F_VX,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_VELOCITY_MODEL,
cellprofiler.modules.trackobjects.F_COV,
cellprofiler.modules.trackobjects.F_X,
cellprofiler.modules.trackobjects.F_VY,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_VELOCITY_MODEL,
cellprofiler.modules.trackobjects.F_COV,
cellprofiler.modules.trackobjects.F_Y,
cellprofiler.modules.trackobjects.F_X,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_VELOCITY_MODEL,
cellprofiler.modules.trackobjects.F_COV,
cellprofiler.modules.trackobjects.F_Y,
cellprofiler.modules.trackobjects.F_Y,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_VELOCITY_MODEL,
cellprofiler.modules.trackobjects.F_COV,
cellprofiler.modules.trackobjects.F_Y,
cellprofiler.modules.trackobjects.F_VX,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_VELOCITY_MODEL,
cellprofiler.modules.trackobjects.F_COV,
cellprofiler.modules.trackobjects.F_Y,
cellprofiler.modules.trackobjects.F_VY,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_VELOCITY_MODEL,
cellprofiler.modules.trackobjects.F_COV,
cellprofiler.modules.trackobjects.F_VX,
cellprofiler.modules.trackobjects.F_X,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_VELOCITY_MODEL,
cellprofiler.modules.trackobjects.F_COV,
cellprofiler.modules.trackobjects.F_VX,
cellprofiler.modules.trackobjects.F_Y,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_VELOCITY_MODEL,
cellprofiler.modules.trackobjects.F_COV,
cellprofiler.modules.trackobjects.F_VX,
cellprofiler.modules.trackobjects.F_VX,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_VELOCITY_MODEL,
cellprofiler.modules.trackobjects.F_COV,
cellprofiler.modules.trackobjects.F_VX,
cellprofiler.modules.trackobjects.F_VY,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_VELOCITY_MODEL,
cellprofiler.modules.trackobjects.F_COV,
cellprofiler.modules.trackobjects.F_VY,
cellprofiler.modules.trackobjects.F_X,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_VELOCITY_MODEL,
cellprofiler.modules.trackobjects.F_COV,
cellprofiler.modules.trackobjects.F_VY,
cellprofiler.modules.trackobjects.F_Y,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_VELOCITY_MODEL,
cellprofiler.modules.trackobjects.F_COV,
cellprofiler.modules.trackobjects.F_VY,
cellprofiler.modules.trackobjects.F_VX,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_VELOCITY_MODEL,
cellprofiler.modules.trackobjects.F_COV,
cellprofiler.modules.trackobjects.F_VY,
cellprofiler.modules.trackobjects.F_VY,
),
]
for object_name, features in (
(
OBJECT_NAME,
cellprofiler.modules.trackobjects.F_ALL
+ kalman_features
+ other_features,
),
("Image", cellprofiler.modules.trackobjects.F_IMAGE_ALL,),
):
for feature in features:
if object_name == OBJECT_NAME:
name = "_".join(
(cellprofiler.modules.trackobjects.F_PREFIX, feature)
)
else:
name = "_".join(
(
cellprofiler.modules.trackobjects.F_PREFIX,
feature,
OBJECT_NAME,
)
)
index = [column[1] for column in columns].index(name)
assert index != -1
column = columns[index]
assert column[0] == object_name
if wants or feature in second_phase:
assert len(column) == 4
assert MCA_AVAILABLE_POST_GROUP in column[3]
assert column[3][MCA_AVAILABLE_POST_GROUP]
else:
assert (
(len(column) == 3)
or (MCA_AVAILABLE_POST_GROUP not in column[3])
or (not column[3][MCA_AVAILABLE_POST_GROUP])
)
def test_measurements():
"""Test the different measurement pieces"""
module = cellprofiler.modules.trackobjects.TrackObjects()
module.object_name.value = OBJECT_NAME
module.image_name.value = "image"
module.pixel_radius.value = 10
categories = module.get_categories(None, "Foo")
assert len(categories) == 0
categories = module.get_categories(None, OBJECT_NAME)
assert len(categories) == 1
assert categories[0] == cellprofiler.modules.trackobjects.F_PREFIX
features = module.get_measurements(None, OBJECT_NAME, "Foo")
assert len(features) == 0
features = module.get_measurements(
None, OBJECT_NAME, cellprofiler.modules.trackobjects.F_PREFIX
)
assert len(features) == len(cellprofiler.modules.trackobjects.F_ALL)
assert all(
[feature in cellprofiler.modules.trackobjects.F_ALL for feature in features]
)
scales = module.get_measurement_scales(
None, OBJECT_NAME, cellprofiler.modules.trackobjects.F_PREFIX, "Foo", "image"
)
assert len(scales) == 0
for feature in cellprofiler.modules.trackobjects.F_ALL:
scales = module.get_measurement_scales(
None,
OBJECT_NAME,
cellprofiler.modules.trackobjects.F_PREFIX,
feature,
"image",
)
assert len(scales) == 1
assert int(scales[0]) == 10
def make_lap2_workspace(objs, nimages, group_numbers=None, group_indexes=None):
"""Make a workspace to test the second half of LAP
objs - a N x 7 array of "objects" composed of the
following pieces per object
objs[0] - image set # for object
objs[1] - label for object
objs[2] - parent image #
objs[3] - parent object #
objs[4] - x coordinate for object
objs[5] - y coordinate for object
objs[6] - area for object
nimages - # of image sets
group_numbers - group numbers for each image set, defaults to all 1
group_indexes - group indexes for each image set, defaults to range
"""
module = cellprofiler.modules.trackobjects.TrackObjects()
module.set_module_num(1)
module.object_name.value = OBJECT_NAME
module.tracking_method.value = "LAP"
module.wants_second_phase.value = True
module.wants_lifetime_filtering.value = False
module.wants_minimum_lifetime.value = False
module.min_lifetime.value = 1
module.wants_maximum_lifetime.value = False
module.max_lifetime.value = 100
module.pixel_radius.value = 50
pipeline = Pipeline()
def callback(caller, event):
assert not isinstance(event, RunException)
pipeline.add_listener(callback)
pipeline.add_module(module)
m = cellprofiler_core.measurement.Measurements()
if objs.shape[0] > 0:
nobjects = numpy.bincount(objs[:, 0].astype(int))
else:
nobjects = numpy.zeros(nimages, int)
for i in range(nimages):
m.next_image_set(i + 1)
for index, feature, dtype in (
(
1,
module.measurement_name(cellprofiler.modules.trackobjects.F_LABEL),
int,
),
(
2,
module.measurement_name(
cellprofiler.modules.trackobjects.F_PARENT_IMAGE_NUMBER
),
int,
),
(
3,
module.measurement_name(
cellprofiler.modules.trackobjects.F_PARENT_OBJECT_NUMBER
),
int,
),
(4, M_LOCATION_CENTER_X, float),
(5, M_LOCATION_CENTER_Y, float),
(
6,
module.measurement_name(cellprofiler.modules.trackobjects.F_AREA),
float,
),
):
values = objs[objs[:, 0] == i, index].astype(dtype)
m.add_measurement(OBJECT_NAME, feature, values, i + 1)
m.add_measurement("Image", "ImageNumber", i + 1)
m.add_measurement(
"Image",
GROUP_NUMBER,
1 if group_numbers is None else group_numbers[i],
image_set_number=i + 1,
)
m.add_measurement(
"Image",
GROUP_INDEX,
i if group_indexes is None else group_indexes[i],
image_set_number=i + 1,
)
#
# Add blanks of the right sizes for measurements that are recalculated
#
m.add_measurement(
"Image",
"_".join((C_COUNT, OBJECT_NAME)),
nobjects[i],
image_set_number=i + 1,
)
for feature in (
cellprofiler.modules.trackobjects.F_DISTANCE_TRAVELED,
cellprofiler.modules.trackobjects.F_DISPLACEMENT,
cellprofiler.modules.trackobjects.F_INTEGRATED_DISTANCE,
cellprofiler.modules.trackobjects.F_TRAJECTORY_X,
cellprofiler.modules.trackobjects.F_TRAJECTORY_Y,
cellprofiler.modules.trackobjects.F_LINEARITY,
cellprofiler.modules.trackobjects.F_LIFETIME,
cellprofiler.modules.trackobjects.F_FINAL_AGE,
cellprofiler.modules.trackobjects.F_LINKING_DISTANCE,
cellprofiler.modules.trackobjects.F_LINK_TYPE,
cellprofiler.modules.trackobjects.F_MOVEMENT_MODEL,
cellprofiler.modules.trackobjects.F_STANDARD_DEVIATION,
):
dtype = (
int
if feature
in (
cellprofiler.modules.trackobjects.F_PARENT_OBJECT_NUMBER,
cellprofiler.modules.trackobjects.F_PARENT_IMAGE_NUMBER,
cellprofiler.modules.trackobjects.F_LIFETIME,
cellprofiler.modules.trackobjects.F_LINK_TYPE,
cellprofiler.modules.trackobjects.F_MOVEMENT_MODEL,
)
else float
)
m.add_measurement(
OBJECT_NAME,
module.measurement_name(feature),
numpy.NaN * numpy.ones(nobjects[i], dtype)
if feature == cellprofiler.modules.trackobjects.F_FINAL_AGE
else numpy.zeros(nobjects[i], dtype),
image_set_number=i + 1,
)
for feature in (
cellprofiler.modules.trackobjects.F_SPLIT_COUNT,
cellprofiler.modules.trackobjects.F_MERGE_COUNT,
):
m.add_measurement(
"Image",
module.image_measurement_name(feature),
0,
image_set_number=i + 1,
)
#
# Figure out how many new and lost objects per image set
#
label_sets = [set() for i in range(nimages)]
for row in objs:
label_sets[row[0]].add(row[1])
if group_numbers is None:
group_numbers = numpy.ones(nimages, int)
if group_indexes is None:
group_indexes = numpy.arange(nimages) + 1
#
# New objects are ones without matching labels in the previous set
#
for i in range(0, nimages):
if group_indexes[i] == 1:
new_objects = len(label_sets[i])
lost_objects = 0
else:
new_objects = sum(
[1 for label in label_sets[i] if label not in label_sets[i - 1]]
)
lost_objects = sum(
[1 for label in label_sets[i - 1] if label not in label_sets[i]]
)
m.add_measurement(
"Image",
module.image_measurement_name(
cellprofiler.modules.trackobjects.F_NEW_OBJECT_COUNT
),
new_objects,
image_set_number=i + 1,
)
m.add_measurement(
"Image",
module.image_measurement_name(
cellprofiler.modules.trackobjects.F_LOST_OBJECT_COUNT
),
lost_objects,
image_set_number=i + 1,
)
m.image_set_number = nimages
image_set_list = ImageSetList()
for i in range(nimages):
image_set = image_set_list.get_image_set(i)
workspace = Workspace(pipeline, module, image_set, ObjectSet(), m, image_set_list,)
return workspace, module
def check_measurements(workspace, d):
"""Check measurements against expected values
workspace - workspace that was run
d - dictionary of feature name and list of expected measurement values
"""
m = workspace.measurements
assert isinstance(m,cellprofiler_core.measurement.Measurements)
module = workspace.module
assert isinstance(module, cellprofiler.modules.trackobjects.TrackObjects)
for feature, expected in list(d.items()):
if numpy.isscalar(expected[0]):
mname = module.image_measurement_name(feature)
values = m.get_all_measurements("Image", mname)
assert len(expected) == len(values), (
"Expected # image sets (%d) != actual (%d) for %s"
% (len(expected), len(values), feature)
)
assert all([v == e for v, e in zip(values, expected)]), (
"Values don't match for " + feature
)
else:
mname = module.measurement_name(feature)
values = m.get_all_measurements(OBJECT_NAME, mname)
assert len(expected) == len(values), (
"Expected # image sets (%d) != actual (%d) for %s"
% (len(expected), len(values), feature)
)
for i, (e, v) in enumerate(zip(expected, values)):
assert len(e) == len(v), (
"Expected # of objects (%d) != actual (%d) for %s:%d"
% (len(e), len(v), feature, i)
)
numpy.testing.assert_almost_equal(v, e)
def check_relationships(
m,
expected_parent_image_numbers,
expected_parent_object_numbers,
expected_child_image_numbers,
expected_child_object_numbers,
):
"""Check the relationship measurements against expected"""
expected_parent_image_numbers = numpy.atleast_1d(expected_parent_image_numbers)
expected_child_image_numbers = numpy.atleast_1d(expected_child_image_numbers)
expected_parent_object_numbers = numpy.atleast_1d(expected_parent_object_numbers)
expected_child_object_numbers = numpy.atleast_1d(expected_child_object_numbers)
assert isinstance(m,cellprofiler_core.measurement.Measurements)
r = m.get_relationships(
1, cellprofiler.modules.trackobjects.R_PARENT, OBJECT_NAME, OBJECT_NAME
)
actual_parent_image_numbers = r[R_FIRST_IMAGE_NUMBER]
actual_parent_object_numbers = r[R_FIRST_OBJECT_NUMBER]
actual_child_image_numbers = r[R_SECOND_IMAGE_NUMBER]
actual_child_object_numbers = r[R_SECOND_OBJECT_NUMBER]
assert len(actual_parent_image_numbers) == len(expected_parent_image_numbers)
#
# Sort similarly
#
for i1, o1, i2, o2 in (
(
expected_parent_image_numbers,
expected_parent_object_numbers,
expected_child_image_numbers,
expected_child_object_numbers,
),
(
actual_parent_image_numbers,
actual_parent_object_numbers,
actual_child_image_numbers,
actual_child_object_numbers,
),
):
order = numpy.lexsort((i1, o1, i2, o2))
for x in (i1, o1, i2, o2):
x[:] = x[order]
for expected, actual in zip(
(
expected_parent_image_numbers,
expected_parent_object_numbers,
expected_child_image_numbers,
expected_child_object_numbers,
),
(
actual_parent_image_numbers,
actual_parent_object_numbers,
actual_child_image_numbers,
actual_child_object_numbers,
),
):
numpy.testing.assert_array_equal(expected, actual)
def test_lap_none():
"""Run the second part of LAP on one image of nothing"""
with MonkeyPatchedDelete():
workspace, module = make_lap2_workspace(numpy.zeros((0, 7)), 1)
assert isinstance(module, cellprofiler.modules.trackobjects.TrackObjects)
module.run_as_data_tool(workspace)
check_measurements(
workspace,
{
cellprofiler.modules.trackobjects.F_LABEL: [numpy.zeros(0, int)],
cellprofiler.modules.trackobjects.F_DISTANCE_TRAVELED: [numpy.zeros(0)],
cellprofiler.modules.trackobjects.F_DISPLACEMENT: [numpy.zeros(0)],
cellprofiler.modules.trackobjects.F_INTEGRATED_DISTANCE: [
numpy.zeros(0)
],
cellprofiler.modules.trackobjects.F_TRAJECTORY_X: [numpy.zeros(0)],
cellprofiler.modules.trackobjects.F_TRAJECTORY_Y: [numpy.zeros(0)],
cellprofiler.modules.trackobjects.F_NEW_OBJECT_COUNT: [0],
cellprofiler.modules.trackobjects.F_LOST_OBJECT_COUNT: [0],
cellprofiler.modules.trackobjects.F_MERGE_COUNT: [0],
cellprofiler.modules.trackobjects.F_SPLIT_COUNT: [0],
},
)
def test_lap_one():
"""Run the second part of LAP on one image of one object"""
with MonkeyPatchedDelete():
workspace, module = make_lap2_workspace(
numpy.array([[0, 1, 0, 0, 100, 100, 25]]), 1
)
assert isinstance(module, cellprofiler.modules.trackobjects.TrackObjects)
module.run_as_data_tool(workspace)
check_measurements(
workspace,
{
cellprofiler.modules.trackobjects.F_LABEL: [numpy.array([1])],
cellprofiler.modules.trackobjects.F_PARENT_IMAGE_NUMBER: [
numpy.array([0])
],
cellprofiler.modules.trackobjects.F_PARENT_OBJECT_NUMBER: [
numpy.array([0])
],
cellprofiler.modules.trackobjects.F_DISPLACEMENT: [numpy.zeros(1)],
cellprofiler.modules.trackobjects.F_INTEGRATED_DISTANCE: [
numpy.zeros(1)
],
cellprofiler.modules.trackobjects.F_TRAJECTORY_X: [numpy.zeros(1)],
cellprofiler.modules.trackobjects.F_TRAJECTORY_Y: [numpy.zeros(1)],
cellprofiler.modules.trackobjects.F_NEW_OBJECT_COUNT: [1],
cellprofiler.modules.trackobjects.F_LOST_OBJECT_COUNT: [0],
cellprofiler.modules.trackobjects.F_MERGE_COUNT: [0],
cellprofiler.modules.trackobjects.F_SPLIT_COUNT: [0],
},
)
def test_bridge_gap():
"""Bridge a gap of zero frames between two objects"""
with MonkeyPatchedDelete():
workspace, module = make_lap2_workspace(
numpy.array([[0, 1, 0, 0, 1, 2, 25], [2, 2, 0, 0, 101, 102, 25]]), 3
)
assert isinstance(module, cellprofiler.modules.trackobjects.TrackObjects)
#
# The cost of bridging the gap should be 141. We set the alternative
# score to 142 so that bridging wins.
#
module.gap_cost.value = 142
module.max_gap_score.value = 142
module.run_as_data_tool(workspace)
distance = numpy.array([numpy.sqrt(2 * 100 * 100)])
check_measurements(
workspace,
{
cellprofiler.modules.trackobjects.F_LABEL: [
numpy.array([1]),
numpy.zeros(0),
numpy.array([1]),
],
cellprofiler.modules.trackobjects.F_PARENT_IMAGE_NUMBER: [
numpy.array([0]),
numpy.zeros(0, int),
numpy.array([1]),
],
cellprofiler.modules.trackobjects.F_PARENT_OBJECT_NUMBER: [
numpy.array([0]),
numpy.zeros(0, int),
numpy.array([1]),
],
cellprofiler.modules.trackobjects.F_DISTANCE_TRAVELED: [
numpy.zeros(1),
numpy.zeros(0),
distance,
],
cellprofiler.modules.trackobjects.F_INTEGRATED_DISTANCE: [
numpy.zeros(1),
numpy.zeros(0),
distance,
],
cellprofiler.modules.trackobjects.F_TRAJECTORY_X: [
numpy.zeros(1),
numpy.zeros(0),
numpy.array([100]),
],
cellprofiler.modules.trackobjects.F_TRAJECTORY_Y: [
numpy.zeros(1),
numpy.zeros(0),
numpy.array([100]),
],
cellprofiler.modules.trackobjects.F_LINEARITY: [
numpy.array([numpy.nan]),
numpy.zeros(0),
numpy.array([1]),
],
cellprofiler.modules.trackobjects.F_LIFETIME: [
numpy.ones(1),
numpy.zeros(0),
numpy.array([2]),
],
cellprofiler.modules.trackobjects.F_FINAL_AGE: [
numpy.array([numpy.nan]),
numpy.zeros(0),
numpy.array([2]),
],
cellprofiler.modules.trackobjects.F_NEW_OBJECT_COUNT: [1, 0, 0],
cellprofiler.modules.trackobjects.F_LOST_OBJECT_COUNT: [0, 0, 0],
cellprofiler.modules.trackobjects.F_MERGE_COUNT: [0, 0, 0],
cellprofiler.modules.trackobjects.F_SPLIT_COUNT: [0, 0, 0],
},
)
check_relationships(workspace.measurements, [1], [1], [3], [1])
def test_maintain_gap():
"""Maintain object identity across a large gap"""
with MonkeyPatchedDelete():
workspace, module = make_lap2_workspace(
numpy.array([[0, 1, 0, 0, 1, 2, 25], [2, 2, 0, 0, 101, 102, 25]]), 3
)
assert isinstance(module, cellprofiler.modules.trackobjects.TrackObjects)
#
# The cost of creating the gap should be 140 and the cost of
# bridging the gap should be 141.
#
module.gap_cost.value = 140
module.max_gap_score.value = 142
module.run_as_data_tool(workspace)
check_measurements(
workspace,
{
cellprofiler.modules.trackobjects.F_LABEL: [
numpy.array([1]),
numpy.zeros(0),
numpy.array([2]),
],
cellprofiler.modules.trackobjects.F_PARENT_IMAGE_NUMBER: [
numpy.array([0]),
numpy.zeros(0),
numpy.array([0]),
],
cellprofiler.modules.trackobjects.F_PARENT_OBJECT_NUMBER: [
numpy.array([0]),
numpy.zeros(0),
numpy.array([0]),
],
cellprofiler.modules.trackobjects.F_NEW_OBJECT_COUNT: [1, 0, 1],
cellprofiler.modules.trackobjects.F_LOST_OBJECT_COUNT: [0, 1, 0],
cellprofiler.modules.trackobjects.F_MERGE_COUNT: [0, 0, 0],
cellprofiler.modules.trackobjects.F_SPLIT_COUNT: [0, 0, 0],
},
)
def test_filter_gap():
"""Filter a gap due to an unreasonable score"""
with MonkeyPatchedDelete():
workspace, module = make_lap2_workspace(
numpy.array([[0, 1, 0, 0, 1, 2, 25], [2, 2, 0, 0, 101, 102, 25]]), 3
)
assert isinstance(module, cellprofiler.modules.trackobjects.TrackObjects)
#
# The cost of creating the gap should be 142 and the cost of
# bridging the gap should be 141. However, the gap should be filtered
# by the max score
#
module.gap_cost.value = 142
module.max_gap_score.value = 140
module.run_as_data_tool(workspace)
check_measurements(
workspace,
{
cellprofiler.modules.trackobjects.F_LABEL: [
numpy.array([1]),
numpy.zeros(0),
numpy.array([2]),
],
cellprofiler.modules.trackobjects.F_PARENT_IMAGE_NUMBER: [
numpy.array([0]),
numpy.zeros(0),
numpy.array([0]),
],
cellprofiler.modules.trackobjects.F_PARENT_OBJECT_NUMBER: [
numpy.array([0]),
numpy.zeros(0),
numpy.array([0]),
],
},
)
def test_split():
"""Track an object splitting"""
workspace, module = make_lap2_workspace(
numpy.array(
[
[0, 1, 0, 0, 100, 100, 50],
[1, 1, 1, 1, 110, 110, 25],
[1, 2, 0, 0, 90, 90, 25],
[2, 1, 2, 1, 113, 114, 25],
[2, 2, 2, 2, 86, 87, 25],
]
),
3,
)
assert isinstance(module, cellprofiler.modules.trackobjects.TrackObjects)
#
# The split score should be 20*sqrt(2) more than the null so a split
# alternative cost of 15 is too much and 14 too little. Values
# doulbed to mat
#
module.split_cost.value = 30
module.max_split_score.value = 30
module.run_as_data_tool(workspace)
d200 = numpy.sqrt(200)
tot = numpy.sqrt(13 ** 2 + 14 ** 2)
lin = tot / (d200 + 5)
check_measurements(
workspace,
{
cellprofiler.modules.trackobjects.F_LABEL: [
numpy.array([1]),
numpy.array([1, 1]),
numpy.array([1, 1]),
],
cellprofiler.modules.trackobjects.F_PARENT_IMAGE_NUMBER: [
numpy.array([0]),
numpy.array([1, 1]),
numpy.array([2, 2]),
],
cellprofiler.modules.trackobjects.F_PARENT_OBJECT_NUMBER: [
numpy.array([0]),
numpy.array([1, 1]),
numpy.array([1, 2]),
],
cellprofiler.modules.trackobjects.F_DISTANCE_TRAVELED: [
numpy.zeros(1),
numpy.ones(2) * d200,
numpy.array([5, 5]),
],
cellprofiler.modules.trackobjects.F_DISPLACEMENT: [
numpy.zeros(1),
numpy.ones(2) * d200,
numpy.array([tot, tot]),
],
cellprofiler.modules.trackobjects.F_INTEGRATED_DISTANCE: [
numpy.zeros(1),
numpy.ones(2) * d200,
numpy.ones(2) * d200 + 5,
],
cellprofiler.modules.trackobjects.F_TRAJECTORY_X: [
numpy.zeros(1),
numpy.array([10, -10]),
numpy.array([3, -4]),
],
cellprofiler.modules.trackobjects.F_TRAJECTORY_Y: [
numpy.zeros(1),
numpy.array([10, -10]),
numpy.array([4, -3]),
],
cellprofiler.modules.trackobjects.F_LINEARITY: [
numpy.array([numpy.nan]),
numpy.array([1, 1]),
numpy.array([lin, lin]),
],
cellprofiler.modules.trackobjects.F_LIFETIME: [
numpy.ones(1),
numpy.array([2, 2]),
numpy.array([3, 3]),
],
cellprofiler.modules.trackobjects.F_FINAL_AGE: [
numpy.array([numpy.nan]),
numpy.array([numpy.nan, numpy.nan]),
numpy.array([3, 3]),
],
cellprofiler.modules.trackobjects.F_NEW_OBJECT_COUNT: [1, 0, 0],
cellprofiler.modules.trackobjects.F_LOST_OBJECT_COUNT: [0, 0, 0],
cellprofiler.modules.trackobjects.F_MERGE_COUNT: [0, 0, 0],
cellprofiler.modules.trackobjects.F_SPLIT_COUNT: [0, 1, 0],
},
)
def test_dont_split():
"""Track an object splitting"""
workspace, module = make_lap2_workspace(
numpy.array(
[
[0, 1, 0, 0, 100, 100, 50],
[1, 1, 1, 1, 110, 110, 25],
[1, 2, 0, 0, 90, 90, 25],
[2, 1, 2, 1, 110, 110, 25],
[2, 2, 2, 2, 90, 90, 25],
]
),
3,
)
assert isinstance(module, cellprofiler.modules.trackobjects.TrackObjects)
module.split_cost.value = 28
module.max_split_score.value = 30
module.run_as_data_tool(workspace)
check_measurements(
workspace,
{
cellprofiler.modules.trackobjects.F_LABEL: [
numpy.array([1]),
numpy.array([1, 2]),
numpy.array([1, 2]),
],
cellprofiler.modules.trackobjects.F_PARENT_IMAGE_NUMBER: [
numpy.array([0]),
numpy.array([1, 0]),
numpy.array([2, 2]),
],
cellprofiler.modules.trackobjects.F_PARENT_OBJECT_NUMBER: [
numpy.array([0]),
numpy.array([1, 0]),
numpy.array([1, 2]),
],
cellprofiler.modules.trackobjects.F_LIFETIME: [
numpy.ones(1),
numpy.array([2, 1]),
numpy.array([3, 2]),
],
cellprofiler.modules.trackobjects.F_FINAL_AGE: [
numpy.array([numpy.nan]),
numpy.array([numpy.nan, numpy.nan]),
numpy.array([3, 2]),
],
cellprofiler.modules.trackobjects.F_NEW_OBJECT_COUNT: [1, 1, 0],
cellprofiler.modules.trackobjects.F_LOST_OBJECT_COUNT: [0, 0, 0],
cellprofiler.modules.trackobjects.F_MERGE_COUNT: [0, 0, 0],
cellprofiler.modules.trackobjects.F_SPLIT_COUNT: [0, 0, 0],
},
)
def test_split_filter():
"""Prevent a split by setting the filter too low"""
workspace, module = make_lap2_workspace(
numpy.array(
[
[0, 1, 0, 0, 100, 100, 50],
[1, 1, 1, 1, 110, 110, 25],
[1, 2, 0, 0, 90, 90, 25],
[2, 1, 2, 1, 110, 110, 25],
[2, 2, 2, 2, 90, 90, 25],
]
),
3,
)
assert isinstance(module, cellprofiler.modules.trackobjects.TrackObjects)
module.split_cost.value = 30
module.max_split_score.value = 28
module.run_as_data_tool(workspace)
check_measurements(
workspace,
{
cellprofiler.modules.trackobjects.F_LABEL: [
numpy.array([1]),
numpy.array([1, 2]),
numpy.array([1, 2]),
],
cellprofiler.modules.trackobjects.F_PARENT_IMAGE_NUMBER: [
numpy.array([0]),
numpy.array([1, 0]),
numpy.array([2, 2]),
],
cellprofiler.modules.trackobjects.F_PARENT_OBJECT_NUMBER: [
numpy.array([0]),
numpy.array([1, 0]),
numpy.array([1, 2]),
],
cellprofiler.modules.trackobjects.F_LIFETIME: [
numpy.array([1]),
numpy.array([2, 1]),
numpy.array([3, 2]),
],
cellprofiler.modules.trackobjects.F_FINAL_AGE: [
numpy.array([numpy.nan]),
numpy.array([numpy.nan, numpy.nan]),
numpy.array([3, 2]),
],
cellprofiler.modules.trackobjects.F_NEW_OBJECT_COUNT: [1, 1, 0],
cellprofiler.modules.trackobjects.F_LOST_OBJECT_COUNT: [0, 0, 0],
cellprofiler.modules.trackobjects.F_MERGE_COUNT: [0, 0, 0],
cellprofiler.modules.trackobjects.F_SPLIT_COUNT: [0, 0, 0],
},
)
def test_merge():
"""Merge two objects into one"""
workspace, module = make_lap2_workspace(
numpy.array(
[
[0, 1, 0, 0, 110, 110, 25],
[0, 2, 0, 0, 90, 90, 25],
[1, 1, 1, 1, 110, 110, 25],
[1, 2, 1, 2, 90, 90, 25],
[2, 1, 2, 1, 100, 100, 50],
]
),
3,
)
assert isinstance(module, cellprofiler.modules.trackobjects.TrackObjects)
module.merge_cost.value = 30
module.max_merge_score.value = 30
module.run_as_data_tool(workspace)
check_measurements(
workspace,
{
cellprofiler.modules.trackobjects.F_LABEL: [
|
numpy.array([1, 1])
|
numpy.array
|
from matplotlib.colors import LinearSegmentedColormap
from matplotlib.ticker import FuncFormatter
from matplotlib import ticker
import numpy as np
###
blue = np.array([0.,13.,120.])/255.
orange = np.array([255.,102.,0.])/255.
green = np.array([9.,84.,0.])/255.
purple =
|
np.array([138.,0.,84.])
|
numpy.array
|
import numpy as np
import parameters as p
from importlib import reload
reload(p)
def get_E(Chl, Z, Env, modname):
"""
Profile of photosynthetically available radiation vs. z
NOTE: all inputs except swrad0 must be vectors (z)
NOTE: we assume z_rho, z_w, and Chl are packed bottom-to-top
NOTE: corrected Chl integral to use mean, as per notes in ROMS Forum:
https://www.myroms.org/forum/viewtopic.php?p=2444&hilit=AttChl+units#p2444
Input:
swrad0 = incoming swrad at surface [W m-2] (called I_0 in Fennel)
z_rho = vertical positions of cell centers, positive up, 0 at surface [m]
z_w = vertical positions of cell boundaries, positive up, 0 at surface [m]
Chl = chlorophyll concentration profile at cell centers [mg Chl m-3]
salt = salinity [psu]
Output:
E = photosynthetically available radiation [W m-2]
"""
dz = np.diff(Z['z_w'])
N = len(Chl)
mean_Chl = np.zeros(N)
for ii in range(N):
this_dz = dz[ii:]
this_dz[0] *= 0.5
this_Chl = Chl[ii:]
mean_Chl[ii] = np.sum(this_dz * this_Chl) / np.sum(this_dz)
if modname in ['banas', 'mix0']:
AttSFW_nb = p.AttSW_nb - p.AttFW_nb * (Env['salt'] - 32)
E = Env['swrad0'] * p.PARfrac * np.exp( Z['z_rho'] * (AttSFW_nb + p.AttChl_nb*mean_Chl))
elif modname == 'fennel':
E = Env['swrad0'] * p.PARfrac * np.exp( Z['z_rho'] * (p.AttSW + p.AttChl*mean_Chl))
return E
def sink(vn, C, max_denitrification, Wsink, dt, Z):
h = Wsink * dt
nn = int(
|
np.floor(h / Z['Dz'])
|
numpy.floor
|
import numpy as np
from gensim.models.keyedvectors import KeyedVectors
from nltk import word_tokenize
from data.Resources import Resources
class Data:
"""
Class responsible for padding, prepossessing and serving data for batching
"""
def __init__(self, filename, comment_padding_size, post_padding_size, word2vec_dim, binary_sentiment):
print("Loading word2vec...")
# word2vec model
self.word2vec = KeyedVectors.load_word2vec_format(filename, binary=False)
print("Done!")
# parameters
self.word2vec_dim = word2vec_dim
self.comment_padding_size = comment_padding_size
self.post_padding_size = post_padding_size
self.binary_sentiment = binary_sentiment
# fetch resources
self.stop_words = Resources.stop_words()
self.topic_one_hot = Resources.topics()
self.emotion_one_hot = Resources.emotions()
self.speech_acts_one_hot = Resources.speech_acts()
self.label_one_hot = Resources.sentiment()
# get resources length
self.topic_count = len(Resources.topics())
self.emotion_count = len(Resources.emotions())
self.speech_acts_count = len(Resources.speech_acts())
# get sentiment resource and size based on binary or not flag is set
if binary_sentiment:
self.labels_count = 2
self.label_one_hot = Resources.binary_sentiment()
else:
self.labels_count = 3
self.label_one_hot = Resources.sentiment()
def preprocess(self, sentence):
"""
Preprocess text
:param sentence:
:return purified list of word2vec vectors
"""
# create tokens
tokens = word_tokenize(sentence.lower())
# clean sentences
sentence = [word for word in tokens if word not in self.stop_words and (word.isalpha() or word.isnumeric())]
# if not found in word2vec, remove it
sentence = [self.word2vec[word] if word in self.word2vec.vocab else np.zeros(self.word2vec_dim) for word in
sentence]
purified = [word for word in sentence if not np.array_equal(word, np.zeros(self.word2vec_dim))]
# if needed pad comment with zeros
sequence_length = len(purified)
if sequence_length == 0:
return np.zeros([self.comment_padding_size, self.word2vec_dim])
else:
purified = self.pad_x(purified, self.comment_padding_size, "comment")
return purified
def get_next(self, text):
"""
Handles post creation and post padding
"""
x = []
sequence_length = 0
y_labels_array = []
y_topics_array = []
y_emotions_array = []
y_speech_act_array = []
posts = text.rstrip("\n").split("*|*")
for post in posts:
if post == "":
continue
else:
# fetch all features
post_text = post.split("||")[0]
post_topics = post.split("||")[1]
post_emotion = post.split("||")[2]
post_speech_acts = post.split("||")[3]
label = post.split("||")[4]
# preprocess it
post_text_proccesed = self.preprocess(post_text)
# one hot transform features
y_topics = self.one_hot_transform(post_topics, "topic")
y_emotion = self.one_hot_transform(post_emotion, "emotion")
y_speech_act = self.one_hot_transform(post_speech_acts, "speech_act")
y_sentiment = self.one_hot_transform(label, "sentiment")
# append to placeholder
x.append(post_text_proccesed)
# count length for lstm
sequence_length += 1
# append y labels for each feature
y_labels_array.append(y_sentiment)
y_topics_array.append(y_topics)
y_emotions_array.append(y_emotion)
y_speech_act_array.append(y_speech_act)
return self.pad_x(x, self.post_padding_size, "post"),\
sequence_length, \
self.pad_y(y_labels_array, self.post_padding_size, "sentiment"), \
self.pad_y(y_topics_array, self.post_padding_size, "topic"), \
self.pad_y(y_emotions_array, self.post_padding_size, "emotion"), \
self.pad_y(y_speech_act_array, self.post_padding_size, "speech_act")
'''
def pad_sequence(self, sequence, size):
if len(sequence) > size:
new_text = []
for i in range(0, size):
new_text.append(sequence[i])
return np.array(new_text)
if len(sequence) < size:
for i in range(len(sequence), size):
sequence.append(0)
return np.array(sequence)
return sequence
'''
def pad_x(self, text, size, x_type):
"""
Pad text to desired size
"""
if len(text) > size:
new_text = []
for i in range(0, size):
new_text.append(text[i])
return np.array(new_text)
if len(text) < size:
for i in range(len(text), size):
if x_type == "comment":
text.append(np.zeros(self.word2vec_dim))
else:
text.append(np.zeros([self.comment_padding_size, self.word2vec_dim]))
return np.array(text)
return text
def pad_y(self, data, size, y_type):
"""
Pad labels to desired size
"""
if y_type == "topic":
one_hot_size = self.topic_count
elif y_type == "emotion":
one_hot_size = self.emotion_count
elif y_type == "speech_act":
one_hot_size = self.speech_acts_count
else:
one_hot_size = self.labels_count
# padding
if len(data) > size:
new_text = []
for i in range(0, size):
new_text.append(data[i])
return np.array(new_text)
if len(data) < size:
for i in range(len(data), size):
data.append(np.zeros(one_hot_size))
return np.array(data)
return data
def one_hot_transform(self, data, label_type):
"""
One hot transform
"""
if label_type == "topic":
if data == "None": # some weird bug
return np.zeros(self.topic_count)
else:
response = np.zeros(self.topic_count)
for each in data.split(","):
response[self.topic_one_hot[each.replace("|", "")]] = 1
return response
elif label_type == "emotion":
if data == "None":
return np.zeros(self.emotion_count)
else:
response = np.zeros(self.emotion_count)
for each in data.split(","):
response[self.emotion_one_hot[each.replace("|", "")]] = 1
return response
elif label_type == "speech_act":
if data == "None":
return
|
np.zeros(self.speech_acts_count)
|
numpy.zeros
|
# -*- coding: utf-8 -*-
# """
# Created on Thu Sep 12 17:34:06 2019
# @author: <NAME> and <NAME>
# """
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import solve_ivp
import math
from mpl_toolkits.mplot3d import Axes3D
from scipy.optimize import fsolve
import matplotlib as mpl
mpl.rcParams['mathtext.fontset'] = 'cm'
mpl.rcParams['mathtext.rm'] = 'serif'
#%%
def get_eq_pts(eqNum, init_guess_eqpt_model, grad_pot_model, par):
"""
Returns configuration space coordinates of the equilibrium points.
get_eq_pts(eqNum, init_guess_eqpt_model, grad_pot_model, par) solves the
coordinates of the equilibrium point for a Hamiltonian of the form kinetic
energy (KE) + potential energy (PE).
Parameters
----------
eqNum : int
1 is the saddle-center equilibrium point
2 or 3 is the center-center equilibrium point
init_guess_eqpt_model : function name
function that returns the initial guess for the equilibrium point
grad_pot_model : function name
function that defines the vector of potential gradient
par : float (list)
model parameters
Returns
-------
float (list)
configuration space coordinates
"""
#fix the equilibrium point numbering convention here and make a
#starting guess at the solution
x0 = init_guess_eqpt_model(eqNum, par)
# F(xEq) = 0 at the equilibrium point, solve using in-built function
F = lambda x: grad_pot_model(x, par)
eqPt = fsolve(F, x0, fprime = None) # Call solver
return eqPt
#%
def get_total_energy(orbit, pot_energy_model, parameters):
"""
Returns total energy (value of Hamiltonian) of a phase space point on an orbit
get_total_energy(orbit, pot_energy_model, parameters) returns the total energy for a
Hamiltonian of the form KE + PE.
Parameters
----------
orbit : float (list)
phase space coordinates (x,y,px,py) of a point on an orbit
pot_energy_model : function name
function that returns the potential energy of Hamiltonian
parameters : float (list)
model parameters
Returns
-------
scalar
total energy (value of Hamiltonian)
"""
x = orbit[0]
y = orbit[1]
px = orbit[2]
py = orbit[3]
return (1.0/(2*parameters[0]))*(px**2.0) + (1.0/(2*parameters[1]))*(py**2.0) + \
pot_energy_model(x, y, parameters)
#%%
def get_pot_surf_proj(xVec, yVec, pot_energy_model, par):
"""
Returns projection of the potential energy (PE) surface on the configuration space
Parameters
----------
xVec, yVec : 1d numpy arrays
x,y-coordinates that discretizes the x, y domain of the configuration space
pot_energy_model : function name
function that returns the potential energy of Hamiltonian
parameters : float (list)
model parameters
Returns
-------
2d numpy array
values of the PE
"""
resX = np.size(xVec)
resY = np.size(xVec)
surfProj = np.zeros([resX, resY])
for i in range(len(xVec)):
for j in range(len(yVec)):
surfProj[i,j] = pot_energy_model(xVec[j], yVec[i], par)
return surfProj
#%
def state_transit_matrix(tf,x0,par,variational_eqns_model,fixed_step=0):
"""
Returns state transition matrix, the trajectory, and the solution of the
variational equations over a length of time
In particular, for periodic solutions of % period tf=T, one can obtain
the monodromy matrix, PHI(0,T).
Parameters
----------
tf : float
final time for the integration
x0 : float
initial condition
par : float (list)
model parameters
variational_eqns_model : function name
function that returns the variational equations of the dynamical system
Returns
-------
t : 1d numpy array
solution time
x : 2d numpy array
solution of the phase space coordinates
phi_tf : 2d numpy array
state transition matrix at the final time, tf
PHI : 1d numpy array,
solution of phase space coordinates and corresponding tangent space coordinates
"""
N = len(x0) # N=4
RelTol=3e-14
AbsTol=1e-14
tf = tf[-1]
if fixed_step == 0:
TSPAN = [ 0 , tf ]
else:
TSPAN = np.linspace(0, tf, fixed_step)
PHI_0 = np.zeros(N+N**2)
PHI_0[0:N**2] = np.reshape(np.identity(N),(N**2)) #initial condition for state transition matrix
PHI_0[N**2:N+N**2] = x0 # initial condition for trajectory
f = lambda t,PHI: variational_eqns_model(t,PHI,par) # Use partial in order to pass parameters to function
soln = solve_ivp(f, TSPAN, list(PHI_0), method='RK45', dense_output=True, \
events = None, rtol=RelTol, atol=AbsTol)
t = soln.t
PHI = soln.y
PHI = PHI.transpose()
x = PHI[:,N**2:N+N**2] # trajectory from time 0 to tf
phi_tf = np.reshape(PHI[len(t)-1,0:N**2],(N,N)) # state transition matrix, PHI(O,tf)
return t,x,phi_tf,PHI
#%%
def dotproduct(guess1, guess2,n_turn, ham2dof_model, half_period_model, \
variational_eqns_model, par):
"""
Returns x,y coordinates of the turning points for guess initial conditions guess1, guess2 and the defined product product for the 2 turning points
Uses turning point method(defined a dot product form before the "actual" turning point.)
Parameters
----------
guess1 : 1d numpy array
guess initial condition 1 for the unstable periodic orbit
guess2 : 1d numpy array
guess initial condition 2 for the unstable periodic orbit
n_turn : int
nth turning point that is used to define the dot product
ham2dof_model : function name
function that returns the Hamiltonian vector field at an input phase space coordinate
and time
variational_eqns_model : function name
function that returns the variational equations of the dynamical system
par : float (list)
model parameters
Returns
-------
x_turn1 : float
x coordinate of the turning point with initional condition guess1
x_turn2 : float
x coordinate of the turning point with initional condition guess2
y_turn1 : float
y coordinate of the turning point with initional condition guess1
y_turn2 : float
y coordinate of the turning point with initional condition guess2
dotproduct : float
value of the dot product
"""
TSPAN = [0,40]
RelTol = 3.e-10
AbsTol = 1.e-10
f1 = lambda t,x: ham2dof_model(t,x,par)
soln1 = solve_ivp(f1, TSPAN, guess1, method='RK45', dense_output=True, \
events = lambda t,x: half_period_model(t,x,par),rtol=RelTol, atol=AbsTol)
te1 = soln1.t_events[0]
t1 = [0,te1[n_turn]]#[0,te1[1]]
turn1 = soln1.sol(t1)
x_turn1 = turn1[0,-1]
y_turn1 = turn1[1,-1]
t,xx1,phi_t1,PHI = state_transit_matrix(t1,guess1,par,variational_eqns_model)
x1 = xx1[:,0]
y1 = xx1[:,1]
p1 = xx1[:,2:]
p_perpendicular_1 = math.sqrt(np.dot(p1[-3,:],p1[-3,:]))*p1[-2,:] - np.dot(p1[-2,:],p1[-3,:])*p1[-3,:]
f2 = lambda t,x: ham2dof_model(t,x,par)
soln2 = solve_ivp(f2, TSPAN, guess2,method='RK45',dense_output=True, \
events = lambda t,x: half_period_model(t,x,par),rtol=RelTol, atol=AbsTol)
te2 = soln2.t_events[0]
t2 = [0,te2[n_turn]]#[0,te2[1]]
turn2 = soln1.sol(t2)
x_turn2 = turn2[0,-1]
y_turn2 = turn2[1,-1]
t,xx2,phi_t1,PHI = state_transit_matrix(t2,guess2,par,variational_eqns_model)
x2 = xx2[:,0]
y2 = xx2[:,1]
p2 = xx2[:,2:]
p_perpendicular_2 = math.sqrt(np.dot(p2[-3,:],p2[-3,:]))*p2[-2,:] - np.dot(p2[-2,:],p2[-3,:])*p2[-3,:]
dotproduct = np.dot(p_perpendicular_1,p_perpendicular_2)
print("Initial guess1%s, initial guess2%s, dot product is%s" %(guess1,guess2,dotproduct))
return x_turn1,x_turn2,y_turn1,y_turn2, dotproduct
#%%
def turningPoint(begin1, begin2, get_coord_model, guess_coords_model, ham2dof_model, \
half_period_model, variational_eqns_model, pot_energy_model, plot_iter_orbit_model, par, \
e, n, n_turn, show_itrsteps_plots, po_fam_file):
"""
turningPoint computes the periodic orbit of target energy using turning point method.
Given 2 inital conditions begin1, begin2, the periodic orbit is assumed to exist between begin1, begin2 such that
trajectories with inital conditions begin1, begin2 are turning in different directions,
which results in a negative value of the dot product
Parameters
----------
begin1 : function name
guess initial condition 1 for the unstable periodic orbit
begin2 : function name
guess initial condition 2 for the unstable periodic orbit
get_coord_model : function name
function that returns the phase space coordinate for a given x/y value and total energy E
guess_coord_model : function name
function that returns the coordinates as guess for the next iteration of the
turning point
ham2dof_model : function name
function that returns the Hamiltonian vector field at an input phase space coordinate
and time
half_period_model : function name
function that returns the event criteria in terms of the coordinate that is set to zero
for half-period of the unstable periodic orbit
pot_energy_model : function name
function that returns the potential energy of Hamiltonian
variational_eqns_model : function name
function that returns the variational equations of the dynamical system
plot_iter_orbit_model : function name
function to plot the computed orbit in the 3D phase space of 2 position and 1 momentum
coordinate
par: float (list)
model parameters
e: float
total energy of the system
n: int
number of intervals that is divided bewteen the 2 initial guesses begin1 and begin2
n_turn: int
nth turning point that is used to define the dot product
show_itrsteps_plots: logical
flag (True or False) to show iteration of the UPOs in plots
po_fam_file : function name
file name to save the members in the family of the unstable periodic orbits
Returns
-------
x0po : 1d numpy array
Initial condition of the target unstable periodic orbit
T : float
Time period of the target unstable periodic orbit
energyPO : float
Energy of the target unstable periodic orbit.
"""
axis_fs = 15
guess1 = begin1
guess2 = begin2
MAXiter = 30
dum = np.zeros(((n+1)*MAXiter ,7))
result = np.zeros(((n+1),3)) # record data for each iteration
result2 = np.zeros(((n+1)*MAXiter ,3)) # record all data for every iteration
x0po = np.zeros((MAXiter ,4))
i_turn = np.zeros((MAXiter ,1))
T = np.zeros((MAXiter ,1))
energyPO = np.zeros((MAXiter ,1))
iter = 0
iter_diff =0 # for counting the correct index
while iter < MAXiter and n_turn < 10:
for i in range(0,n+1):
xguess, yguess = guess_coords_model(guess1, guess2, i, n, e, get_coord_model, par)
guess = [xguess,yguess,0, 0]
x_turn1,x_turn2,y_turn1,y_turn2, dotpro = dotproduct(guess1, guess, n_turn, \
ham2dof_model, \
half_period_model, \
variational_eqns_model, \
par)
result[i,0] = dotpro
result[i,1] = guess[0]
result[i,2] = guess[1]
result2[(n+1)*iter+i,:] = result[i,:]
i_turn_iter = 0
for i in range(0,n+1):
# we record the sign change for each pair of inital conditions
# i_turn_iter is the coordinate which sign changes from positive to negative
# we only want the first i_turn_iter terms to have positve sign and the rest of n-i_turn_iter+1 terms to have negative signs to avoid error
#
if np.sign(result[i,0]) <0 and np.sign(result[i-1,0]) >0:
i_turn[iter] = i
i_turn_iter = int(i_turn[iter])
check = np.sign(result[:,0])
check_same= sum(check[0:i_turn_iter])
check_diff= sum(check[i_turn_iter:])
print(check_same == i_turn[iter])
print(check_diff == -n+i_turn[iter]-1)
if check_same == i_turn[iter] and check_diff == -n+i_turn[iter]-1 and i_turn_iter>0:
# if the follwing condition holds, we can zoom in to a smaller interval and continue our procedure
index = int(i_turn[iter])
guesspo = [result[index-1,1],result[index-1,2],0,0]
print("Our guess of the inital condition is", guesspo)
x0po[iter,:] = guesspo[:]
TSPAN = [0,10]
RelTol = 3.e-10
AbsTol = 1.e-10
f = lambda t,x: ham2dof_model(t,x,par)
soln = solve_ivp(f, TSPAN, guesspo,method='RK45', dense_output=True, \
events = lambda t,x: half_period_model(t,x,par),rtol=RelTol, atol=AbsTol)
te = soln.t_events[0]
tt = [0,te[1]]
t,x,phi_t1,PHI = state_transit_matrix(tt,guesspo,par,variational_eqns_model)
T[iter] = tt[-1]*2
print("period is%s " %T[iter])
energy = np.zeros(len(x))
#print(len(t))
for j in range(len(t)):
energy[j] = get_total_energy(x[j,:], pot_energy_model, par)
energyPO[iter] =
|
np.mean(energy)
|
numpy.mean
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.