repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
msvi | msvi-main/experiments/bballs/train.py | from types import SimpleNamespace
import torch
import torch.nn as nn
import torch.optim as optim
import wandb
from tqdm import tqdm
import msvi.utils.bballs as data_utils
import utils
torch.backends.cudnn.benchmark = True # type: ignore
# Read parameters.
argparser = data_utils.create_argparser()
param = SimpleNamespace(**vars(argparser.parse_args()))
param.tags.append("train")
# Load data.
train_dataset, val_dataset, _ = data_utils.create_datasets(param)
train_loader, val_loader, _ = data_utils.create_dataloaders(param, train_dataset, val_dataset, val_dataset)
# Create model.
utils.set_seed(param.seed)
device = torch.device(param.device)
g, F, h = data_utils.get_model_components(param)
elbo = data_utils.create_elbo(g, F, h, param).to(device)
# Training.
optimizer = optim.Adam(elbo.parameters(), lr=param.lr)
scheduler = data_utils.get_scheduler(optimizer, param.n_iters, param.lr)
bma = utils.BatchMovingAverage(k=10)
data_transform = data_utils.get_data_transform()
wandb.init(
mode="disabled", # online/disabled
project="AVMS",
group=param.group,
tags=param.tags,
name=param.name,
config=vars(param),
save_code=True,
)
utils.set_seed(param.seed)
for i in tqdm(range(param.n_iters), total=param.n_iters):
elbo.train()
t, y, traj_inds = [bi.to(device) for bi in next(iter(train_loader))]
# t = t + (torch.rand_like(t) - 0.5) * 2 * param.sigT
y = data_transform(y)
L1, L2, L3, x, s = elbo(t, y, traj_inds, param.block_size, scaler=1.0)
L1 *= len(train_dataset) / param.batch_size
L2 *= len(train_dataset) / param.batch_size
loss = -(L1 - L2 - L3)
optimizer.zero_grad()
loss.backward()
optimizer.step()
scheduler.step()
# Validation on full trajectory predictions.
if i % int(0.00333 * param.n_iters) == 0 or i == param.n_iters - 1:
with torch.no_grad():
elbo.eval()
t_val, y_val, _ = [bi.to(device) for bi in next(iter(val_loader))]
y_full_traj = utils.pred_full_traj(param, elbo, t, y)
y_val_full_traj = utils.pred_full_traj(param, elbo, t_val, y_val)
train_full_traj_mse = nn.MSELoss()(y_full_traj, y).item()
val_full_traj_mse = nn.MSELoss()(y_val_full_traj, y_val).item()
bma.add_value(val_full_traj_mse)
if bma.get_average() <= bma.get_min_average():
utils.save_model(elbo, param.model_folder, param.name)
wandb.log(
{
"-L1": -L1.item(),
"L2": L2.item(),
"L3": L3.item(),
"-ELBO": loss.item(),
"train_full_traj_mse": train_full_traj_mse,
"val_full_traj_mse": val_full_traj_mse,
"lr": optimizer.param_groups[0]["lr"],
"scaler": 1.0,
},
step=i
)
if param.visualize == 1:
data_utils.visualize_trajectories(
traj=[
y[[0]].detach().cpu().numpy(),
y_full_traj[[0]].detach().cpu().numpy(),
y_val[[0]].detach().cpu().numpy(),
y_val_full_traj[[0]].detach().cpu().numpy(),
],
vis_inds=list(range(y.shape[1]))[:-1:max(1, int(0.09*y.shape[1]))],
title=f"Iteration {i}",
path=f"./img/{param.name}/",
img_name=f"iter_{i}.png",
)
| 3,556 | 29.663793 | 107 | py |
msvi | msvi-main/msvi/model.py | from typing import Union
from abc import ABC, abstractmethod
import torch
import torch.nn as nn
from torch.distributions.normal import Normal
from torch.distributions.bernoulli import Bernoulli
from torch.distributions.continuous_bernoulli import ContinuousBernoulli
from einops import reduce
from msvi.decoder import IDecoder
from msvi.trans_func import ITransitionFunction
from msvi.posterior import extract_time_grids
Tensor = torch.Tensor
ParameterDict = nn.ParameterDict
class IModel(ABC, nn.Module):
@property
@abstractmethod
def g(self) -> IDecoder:
"""Returns the decoder."""
pass
@property
@abstractmethod
def F(self) -> ITransitionFunction:
"""Returns the transition function."""
pass
@property
@abstractmethod
def prior_param(self) -> ParameterDict:
"""Returns parameters of prior distributions."""
pass
@abstractmethod
def sample(self, t: Tensor, x0: Tensor) -> Tensor:
"""Samples a trajectory from the model. If x0=None, uses the prior to
sample the initial condition.
Args:
t: Time points at which to evaluate the trajectory. Has shape (M, ).
x0: Initial condition. Has shape (K, ).
Returns:
Trajectory sampled from the model. Has shape (1, M, N, D).
"""
pass
@abstractmethod
def loglik(self, y: Tensor, x: Tensor) -> Tensor:
"""Evaluates log likelihood p(y|x) for each snapshot.
Args:
y: Observations. Has shape (S, M, N, D).
x: Latent states. Has shape (S, M, K).
Returns:
Log likelihood for each snapshot. Has shape (S, M, 1).
"""
pass
@abstractmethod
def set_theta(self, theta: dict[str, Tensor]) -> None:
"""Sets parameters of g and F to theta["theta_g"] and theta["theta_F"] respectively.
Args:
theta: Dictionary with new parameter values. Must contain keys
theta_g and theta_F.
"""
pass
class ModelBase(IModel):
def __init__(
self,
prior_param_dict: ParameterDict,
g: IDecoder,
F: ITransitionFunction,
) -> None:
super().__init__()
self._check_param_shapes(prior_param_dict)
self._prior_param = prior_param_dict
self._g = g
self._F = F
@property
def g(self) -> IDecoder:
return self._g
@property
def F(self) -> ITransitionFunction:
return self._F
@property
def prior_param(self) -> ParameterDict:
return self._prior_param
def sample(self, t: Tensor, x0: Tensor) -> Tensor:
x = self._sample_x(t, x0)
y = self._sample_lik(x)
return y
def loglik(self, y: Tensor, x: Tensor) -> Tensor:
return self._eval_loglik(y, x)
def set_theta(self, theta: dict[str, Tensor]) -> None:
self.g.set_param(theta["theta_g"])
self.F.set_param(theta["theta_F"])
def _sample_x(self, t: Tensor, x0: Union[None, Tensor] = None) -> Tensor:
if x0 is None:
x0 = self._sample_ic()
x = self._sample_traj(t, x0)
return x
def _sample_ic(self):
mu0, sig0 = self.prior_param["mu0"], self.prior_param["sig0"]
x0 = mu0 + sig0 * torch.randn_like(sig0)
return x0
def _sample_traj(self, t, x0):
x = torch.empty((1, t.shape[0], x0.shape[0]), device=x0.device)
x[0, 0, :] = x0
s_curr = x0
for i in range(1, t.shape[0]):
x[:, [i], :] = self.F(s_curr, t=extract_time_grids(t[:, i-1:i+1, :], n_blocks=1))
eps = self.prior_param["sigXi"] * torch.randn_like(x[:, [i], :])
s_curr = x[:, [i], :] + eps
return x
def _check_param_shapes(self, d: ParameterDict) -> None:
scalar_param_names = ["sigXi", "mu_theta", "sig_theta"]
for param_name in scalar_param_names:
assert d[param_name].shape == torch.Size([1]), f"{param_name} must have shape (1, ) but has {d[param_name].shape}."
assert len(d["mu0"].shape) == 1, f"mu0 must have shape (K, ) but has {d['mu0'].shape}."
assert len(d["sig0"].shape) == 1, f"sig0 must have shape (K, ) but has {d['sig0'].shape}."
def _sample_lik(self, x: Tensor) -> Tensor:
raise NotImplementedError()
def _eval_loglik(self, y: Tensor, x: Tensor) -> Tensor:
raise NotImplementedError()
class ModelNormal(ModelBase):
def _sample_lik(self, x: Tensor) -> Tensor:
param = self.g(x)
mu, sig = param[..., 0], param[..., 1]
y = Normal(mu, sig).rsample()
return y
def _eval_loglik(self, y: Tensor, x: Tensor) -> Tensor:
param = self.g(x)
mu, sig = param[..., 0], param[..., 1]
loglik = Normal(mu, sig).log_prob(y)
loglik = reduce(loglik, "s m n d -> s m ()", "sum")
return loglik
class ModelNormalSecondOrder(ModelNormal):
def _sample_lik(self, x: Tensor) -> Tensor:
mask = self.create_mask(x)
return super()._sample_lik(x * mask)
def _eval_loglik(self, y: Tensor, x: Tensor) -> Tensor:
mask = self.create_mask(x)
return super()._eval_loglik(y, x * mask)
def create_mask(self, x: Tensor) -> Tensor:
"""Masks the 'velocity' part of the latent space since we want to use
only the 'position' to reconstruct the observsations."""
K = x.shape[2]
mask = torch.ones_like(x)
mask[:, :, K//2:] = 0.0
return mask
class ModelBernoulli(ModelBase):
def _sample_lik(self, x: Tensor) -> Tensor:
p = self.g(x)[..., 0]
y = Bernoulli(p).rsample()
return y
def _eval_loglik(self, y: Tensor, x: Tensor) -> Tensor:
p = self.g(x)[..., 0]
loglik = Bernoulli(p).log_prob(y)
loglik = reduce(loglik, "s m n d -> s m ()", "sum")
return loglik
class ModelContinuousBernoulli(ModelBase):
def _sample_lik(self, x: Tensor) -> Tensor:
p = self.g(x)[..., 0]
y = ContinuousBernoulli(p).rsample()
return y
def _eval_loglik(self, y: Tensor, x: Tensor) -> Tensor:
p = self.g(x)[..., 0]
loglik = ContinuousBernoulli(p).log_prob(y)
loglik = reduce(loglik, "s m n d -> s m ()", "sum")
return loglik
| 6,335 | 29.315789 | 127 | py |
msvi | msvi-main/msvi/dataset.py | from typing import Union
import torch
from torch.utils.data import Dataset
Tensor = torch.Tensor
class TrajectoryDataset(Dataset):
"""Stores trajectories and time grids.
Used to store trajectories `y` and the corresponding time grids `t`.
Each trajectory is assumed to have three dimensions:
(time points, observation points, observation dim.).
Each time grid is assimed to have two dimensions: (time points, 1).
If `max_len` is not None, a subtrajectory of length `max_len` is
selected from each trajectory and time grid.
Args:
t: Contains time grids of various lengths M.
The shape of each time grid t[i] must be (M_i, 1).
y: Contrains trajectories of various lengths.
The shape of each trajectory y[i] must be (M_i, N, D).
max_len: Length of subtrajectories selected from each trajectory and time grid.
"""
def __init__(self, t: list[Tensor], y: list[Tensor], max_len: Union[None, int] = None) -> None:
self.t = t
self.y = y
self.max_len = max_len
def __len__(self) -> int:
return len(self.t)
def __getitem__(self, idx: int) -> tuple[Tensor, Tensor, Tensor]:
t = self.t[idx]
y = self.y[idx]
traj_ind = torch.tensor(idx, dtype=torch.long)
if self.max_len is not None:
t = t[:self.max_len]
y = y[:self.max_len]
return t, y, traj_ind
| 1,444 | 31.840909 | 99 | py |
msvi | msvi-main/msvi/decoder.py | from abc import ABC, abstractmethod
import torch
import torch.nn as nn
Tensor = torch.Tensor
Module = nn.Module
Parameter = nn.parameter.Parameter
class IDecoder(Module, ABC):
@abstractmethod
def forward(self, x: Tensor) -> Tensor:
"""Maps latent state to parameters of p(y|x).
Args:
x: Latent state. Has shape (S, M, K).
Returns:
param: Parameters of p(y|x). Has shape (S, M, N, D, num. of param. groups in p(y|x)).
For example, the number of parameter groups in a Normal p(y|x) is 2 (mean and variance).
"""
pass
@abstractmethod
def set_param(self, param: Tensor) -> None:
"""Sets parameters to `param`.
Args:
param: New parameter values.
"""
pass
@abstractmethod
def param_count(self) -> int:
"""Calculates the number of parameters.
Returns:
The number of parameters.
"""
pass
class NeuralDecoder(IDecoder):
"""Neural-network-based decoder."""
def __init__(self, decoder: Module, layers_to_count: list = []) -> None:
super().__init__()
self.decoder = decoder
self.layers_to_count = [
nn.Linear,
nn.Conv2d, nn.ConvTranspose2d,
nn.LayerNorm, nn.BatchNorm1d, nn.BatchNorm2d,
] # default
self.layers_to_count.extend(layers_to_count) # user-specified (must contain weight and bias)
def forward(self, x: Tensor) -> Tensor:
return self.decoder(x)
def set_param(self, param: Tensor) -> None:
# Note: after calling set_param() weight and bias of each layer will become tensors,
# so calling .parameters() will not show them.
assert self.param_count() == param.numel(), (
f"The size of param ({param.numel()}) must be the same as self.param_count()"
f"({self.param_count()})"
)
layers = self._get_layers(self.layers_to_count)
self._set_layer_param_to_vec(layers, param)
def param_count(self) -> int:
param_count = 0
layers = self._get_layers(self.layers_to_count)
for layer in layers:
self._check_weight_and_bias_of_layer(layer)
layer_param_count = layer.weight.numel() + layer.bias.numel()
param_count += layer_param_count
return param_count
def _get_layers(self, layer_types: list) -> list:
"""Returns all layers in `self.decoder` whose type is present in `layer_types`.
Args:
layer_types: A list with the requred layer types (e.g. nn.Linear).
Returns:
Layers of `self.decoder` whose type is in `layer_types`.
"""
return_layers = []
for layer in self.decoder.modules():
if type(layer) in layer_types:
return_layers.append(layer)
return return_layers
def _set_layer_param_to_vec(self, layers: list[Module], vec: Tensor) -> None:
"""Sets parameters of Modules in `layers` to elements of `vec`.
Args:
layers: List of Modules whose parameters need to be set.
vec: 1D Tensor with parameters.
"""
pointer = 0
for layer in layers:
self._check_weight_and_bias_of_layer(layer)
layer_param_count = layer.weight.numel() + layer.bias.numel() # type: ignore
layer_weight_count = layer.weight.numel() # type: ignore
layer_param = vec[pointer:pointer + layer_param_count]
layer_weight = layer_param[:layer_weight_count].view_as(layer.weight) # type: ignore
layer_bias = layer_param[layer_weight_count:].view_as(layer.bias) # type: ignore
self._del_set_layer_attr(layer, "weight", layer_weight)
self._del_set_layer_attr(layer, "bias", layer_bias)
pointer += layer_param_count
def _del_set_layer_attr(self, layer, attr_name, attr_val):
delattr(layer, attr_name)
setattr(layer, attr_name, attr_val)
def _check_weight_and_bias_of_layer(self, layer: Module) -> None:
assert (type(layer.weight) is Tensor or type(layer.weight) is Parameter), (
f"weight of layer {layer} must be Tensor or Parameter.")
assert (type(layer.bias) is Tensor or type(layer.bias) is Parameter), (
f"bias of layer {layer} must be Tensor or Parameter.")
| 4,429 | 34.15873 | 104 | py |
msvi | msvi-main/msvi/tf_encoder.py | import torch
import torch.nn as nn
Tensor = torch.Tensor
Module = nn.Module
class TFEncoder(nn.Module):
# Modified https://pytorch.org/docs/stable/_modules/torch/nn/modules/transformer.html#TransformerEncoderLayer
def __init__(
self,
d_model: int,
self_attn: Module,
t: Tensor,
dim_feedforward: int = 2048,
dropout: float = 0.0,
layer_norm_eps: float = 1e-5,
**kwargs,
) -> None:
super().__init__()
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm([d_model], eps=layer_norm_eps)
self.norm2 = nn.LayerNorm([d_model], eps=layer_norm_eps)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.activation = torch.nn.functional.relu # type: ignore
self.self_attn = self_attn
def forward(self, x: Tensor) -> Tensor:
x = self.norm1(x + self._sa_block(x))
x = self.norm2(x + self._ff_block(x))
return x
# self-attention block
def _sa_block(self, x: Tensor) -> Tensor:
x = self.self_attn(x, return_weights=False)[0]
return self.dropout1(x)
# feed forward block
def _ff_block(self, x: Tensor) -> Tensor:
x = self.linear2(self.dropout(self.activation(self.linear1(x))))
return self.dropout2(x)
| 1,465 | 27.745098 | 113 | py |
msvi | msvi-main/msvi/elbo.py | from abc import ABC, abstractmethod
import torch
import torch.nn as nn
from msvi.model import IModel
from msvi.posterior import IVariationalPosterior, AmortizedMultipleShootingPosterior
from einops import repeat
Tensor = torch.Tensor
class IELBO(nn.Module, ABC):
@abstractmethod
def forward(
self,
t: Tensor,
y: Tensor,
batch_ids: Tensor,
block_size: int,
scaler: float = 1.0,
) -> tuple[Tensor, ...]:
"""Evaluates ELBO for the observations y.
Args:
t: Time grid for the observations. Has shape (S, M, 1).
y: A batch of observations. Has shape (S, M, N, D).
batch_ids: Global indices of trajectories in the batch. Has shape (S, ).
block_size: Block size.
scaler: Scaler for KL(q(s_i)||p(s_i|s_i-1)) terms.
Returns:
Parts of the ELBO (L1, L2, L3), states (x), and shooting variables (s).
"""
pass
class ELBOBase(IELBO):
def __init__(
self,
p: IModel,
q: IVariationalPosterior,
) -> None:
super().__init__()
self.p = p
self.q = q
def forward(
self,
t: Tensor,
y: Tensor,
batch_ids: Tensor,
block_size: int,
scaler: float = 1.0,
) -> tuple[Tensor, ...]:
# Sample approximate posterior.
self.p.set_theta(self.q.sample_theta())
s, x = self.q.sample_s(t, y, batch_ids, block_size)
# Calculate parts of ELBO.
L1 = self.calc_L1(x, y)
L2 = self.calc_L2(x, batch_ids, block_size, scaler)
L3 = self.calc_L3()
return L1, L2, L3, x, s
def calc_L1(self, x: Tensor, y: Tensor) -> Tensor:
return self.p.loglik(y, x).sum()
def calc_L2(self, x: Tensor, batch_ids: Tensor, block_size: int, scaler: float) -> Tensor:
raise NotImplementedError()
def calc_L3(self) -> Tensor:
n = self.q.posterior_param["mu_theta_g"].numel()
L3_0 = self.kl_norm_norm(
self.q.posterior_param["mu_theta_g"],
self.p.prior_param["mu_theta"].expand(n),
torch.exp(self.q.posterior_param["log_sig_theta_g"]),
self.p.prior_param["sig_theta"].expand(n),
).sum()
n = self.q.posterior_param["mu_theta_F"].numel()
L3_1 = self.kl_norm_norm(
self.q.posterior_param["mu_theta_F"],
self.p.prior_param["mu_theta"].expand(n),
torch.exp(self.q.posterior_param["log_sig_theta_F"]),
self.p.prior_param["sig_theta"].expand(n),
).sum()
return L3_0 + L3_1
def kl_norm_norm(self, mu0: Tensor, mu1: Tensor, sig0: Tensor, sig1: Tensor) -> Tensor:
"""Calculates KL divergence between two K-dimensional Normal
distributions with diagonal covariance matrices.
Args:
mu0: Mean of the first distribution. Has shape (*, K).
mu1: Mean of the second distribution. Has shape (*, K).
std0: Diagonal of the covatiance matrix of the first distribution. Has shape (*, K).
std1: Diagonal of the covatiance matrix of the second distribution. Has shape (*, K).
Returns:
KL divergence between the distributions. Has shape (*, 1).
"""
assert mu0.shape == mu1.shape == sig0.shape == sig1.shape, (f"{mu0.shape=} {mu1.shape=} {sig0.shape=} {sig1.shape=}")
a = (sig0 / sig1).pow(2).sum(-1, keepdim=True)
b = ((mu1 - mu0).pow(2) / sig1**2).sum(-1, keepdim=True)
c = 2 * (torch.log(sig1) - torch.log(sig0)).sum(-1, keepdim=True)
kl = 0.5 * (a + b + c - mu0.shape[-1])
return kl
class SingleShootingELBO(ELBOBase):
def calc_L2(self, x: Tensor, batch_ids: Tensor, block_size: int, scaler: float) -> Tensor:
S, M, K = x.shape
gamma = self.q.posterior_param["gamma"][batch_ids]
tau = torch.exp(self.q.posterior_param["log_tau"][batch_ids])
L2_0 = self.kl_norm_norm(
gamma[:, 0, :],
repeat(self.p.prior_param["mu0"], "k -> s k", s=S, k=K),
tau[:, 0, :],
repeat(self.p.prior_param["sig0"], "k -> s k", s=S, k=K)
).sum()
L2_1 = self.kl_norm_norm(
x[:, 1:-1, :],
x[:, 1:-1, :],
tau[:, 1:, :],
repeat(self.p.prior_param["sigXi"], "() -> s m k", s=S, m=M-2, k=K)
).sum()
return L2_0 + scaler * L2_1
class MultipleShootingELBO(ELBOBase):
def calc_L2(self, x: Tensor, batch_ids: Tensor, block_size: int, scaler: float) -> Tensor:
gamma = self.q.posterior_param["gamma"][batch_ids, ::block_size, :]
tau = torch.exp(self.q.posterior_param["log_tau"][batch_ids, ::block_size, :])
x_sub = x[:, 0:-1:block_size, :]
S, B, K = x_sub.shape
L2_0 = self.kl_norm_norm(
gamma[:, 0, :],
repeat(self.p.prior_param["mu0"], "k -> s k", s=S, k=K),
tau[:, 0, :],
repeat(self.p.prior_param["sig0"], "k -> s k", s=S, k=K)
).sum()
L2_1 = self.kl_norm_norm(
gamma[:, 1:, :],
x_sub[:, 1:, :],
tau[:, 1:, :],
repeat(self.p.prior_param["sigXi"], "() -> s b k", s=S, b=B-1, k=K)
).sum()
return L2_0 + scaler * L2_1
class AmortizedMultipleShootingELBO(ELBOBase):
def __init__(self, p: IModel, q: AmortizedMultipleShootingPosterior) -> None:
super().__init__(p, q)
self.q = q
def forward(
self,
t: Tensor,
y: Tensor,
batch_ids: Tensor,
block_size: int,
scaler: float = 1.0,
) -> tuple[Tensor, ...]:
self.q.rec_net.update_time_grids(t) # update recognition network before sampling s
return super().forward(t, y, batch_ids, block_size, scaler)
def calc_L2(self, x: Tensor, batch_ids: Tensor, block_size: int, scaler: float) -> Tensor:
gamma = self.q.gamma[:, ::block_size, :]
tau = self.q.tau[:, ::block_size, :]
x_sub = x[:, 0:-1:block_size, :]
S, B, K = x_sub.shape
L2_0 = self.kl_norm_norm(
gamma[:, 0, :],
repeat(self.p.prior_param["mu0"], "k -> s k", s=S, k=K),
tau[:, 0, :],
repeat(self.p.prior_param["sig0"], "k -> s k", s=S, k=K)
).sum()
L2_1 = self.kl_norm_norm(
gamma[:, 1:, :],
x_sub[:, 1:, :],
tau[:, 1:, :],
repeat(self.p.prior_param["sigXi"], "() -> s b k", s=S, b=B-1, k=K)
).sum()
return L2_0 + scaler * L2_1
| 6,622 | 31.465686 | 125 | py |
msvi | msvi-main/msvi/__init__.py | import msvi.decoder # noqa
import msvi.trans_func # noqa
import msvi.model # noqa
import msvi.posterior # noqa
import msvi.elbo # noqa
import msvi.rec_net # noqa
import msvi.utils # noqa
| 195 | 20.777778 | 30 | py |
msvi | msvi-main/msvi/attention.py | from abc import ABC, abstractmethod
from typing import Union
import numpy as np
import torch
import torch.nn as nn
Tensor = torch.Tensor
Module = nn.Module
class IAttention(Module, ABC):
@abstractmethod
def forward(
self,
x: Tensor,
return_weights: bool = True
) -> Union[tuple[Tensor, Tensor], tuple[Tensor, None]]:
"""Maps input sequence x to output sequence.
Args:
x: Input sequence. Has shape (S, M, K).
return_weights: If True, returns attention weights. Otherwise, returns None.
Returns:
y: Output sequence. Has shape (S, M, K).
attn_weights: Attention weights. Has shape (S, M, M).
None is returned if `return_weights` is False.
"""
pass
@abstractmethod
def update_time_grid(self, t: Tensor) -> None:
"""Updates all parts of the class that depend on time grids (except submodules
which might also depend on time grids, those must be upated separately
(see msvi.rec_net)).
Args:
t: New time grids. Has shape (S, M, 1).
"""
pass
class AttentionBase(IAttention):
def __init__(self, d_model: int, rpe: Union[Module, None] = None, drop_prob: float = 0.0):
super().__init__()
self.d_model = d_model
self.rpe = rpe
self.drop_prob = drop_prob
def forward(self, x: Tensor, return_weights: bool = True) -> Union[tuple[Tensor, Tensor], tuple[Tensor, None]]:
attn_weights = self._eval_attn_weights(x)
output = self._eval_output(attn_weights, x)
if return_weights:
return output, attn_weights
else:
return output, None
def drop(self, w: Tensor) -> Tensor:
"""Sets an element of w to -inf with probability self.drop_prob.
Does not drop the diagonal and one of the neighboring elements."""
dont_drop = torch.eye(w.shape[1], dtype=w.dtype, device=w.device) # leave the diagonal
inds = torch.arange(0, w.shape[1], 1, device=w.device)
shift = torch.randint(low=0, high=2, size=(w.shape[1],), device=w.device)
shift[0] = 1 # leave right neighbor for y1
shift[-1] = -1 # leave left neighbor for yM
shift[shift == 0] = -1 # randomly leave left or right neighbor for y2,...yM-1
dont_drop[inds, inds+shift] = 1
prob = torch.ones_like(w) * (1.0 - self.drop_prob)
prob = torch.clip(prob + dont_drop, 0, 1)
mask = torch.bernoulli(prob) # 1 - don't drop, 0 - drop
mask[mask == 0] = torch.inf
mask[mask == 1] = 0
return w - mask
def update_time_grid(self, t: Tensor) -> None:
pass
def _eval_attn_weights(self, x: Tensor) -> Tensor:
raise NotImplementedError()
def _eval_output(self, attn_weights: Tensor, x: Tensor) -> Tensor:
raise NotImplementedError()
class DotProductAttention(AttentionBase):
def __init__(self, d_model: int, rpe: Union[Module, None] = None, **kwargs):
super().__init__(d_model, rpe)
self.W_k = nn.Linear(self.d_model, self.d_model, bias=False)
self.W_v = nn.Linear(self.d_model, self.d_model, bias=False)
self.W_q = nn.Linear(self.d_model, self.d_model, bias=False)
self.W_out = nn.Linear(self.d_model, self.d_model, bias=False)
def _eval_attn_weights(self, x: Tensor) -> Tensor:
Q, K = self.W_q(x), self.W_k(x)
unnorm_attn_weights = torch.bmm(Q, torch.transpose(K, 1, 2)) / self.d_model**0.5
attn_weights = nn.Softmax(-1)(unnorm_attn_weights)
return attn_weights
def _eval_output(self, attn_weights: Tensor, x: Tensor) -> Tensor:
V = self.W_v(x)
if self.rpe is None:
output = torch.bmm(attn_weights, V)
else:
output = torch.bmm(attn_weights, V) + (attn_weights.unsqueeze(-1) * self.rpe()).sum(2)
return self.W_out(output)
class TemporalAttention(AttentionBase):
def __init__(
self,
d_model: int,
t: Tensor,
eps: float,
delta_r: float,
p: float,
rpe: Union[Module, None] = None,
drop_prob: float = 0.0,
**kwargs
) -> None:
super().__init__(d_model, rpe, drop_prob)
self.eps = eps
self.delta_r = delta_r
self.p = p if p != -1 else torch.inf
self.W_v = nn.Linear(self.d_model, self.d_model, bias=False)
self.W_out = nn.Linear(self.d_model, self.d_model, bias=False)
self.attn_weights: Tensor
self.update_time_grid(t)
def _eval_attn_weights(self, x: Tensor) -> Tensor:
if self.training:
attn_weights = nn.Softmax(-1)(self.drop(self.unnorm_temporal_attn_weights))
else:
attn_weights = nn.Softmax(-1)(self.unnorm_temporal_attn_weights)
return attn_weights
def _eval_output(self, attn_weights: Tensor, x: Tensor) -> Tensor:
assert x.shape[0:2] == attn_weights.shape[0:2], (
"Batch size and number of time points in `x` and `attn_weights` must be the same. "
f"Currently {x.shape=} and {attn_weights.shape=}."
)
V = self.W_v(x)
if self.rpe is None:
output = torch.bmm(attn_weights, V)
else:
output = torch.bmm(attn_weights, V) + (attn_weights.unsqueeze(-1) * self.rpe()).sum(2)
return self.W_out(output)
@torch.no_grad()
def update_time_grid(self, t: Tensor) -> None:
dt = torch.cdist(t, t, p=1).float()
self.unnorm_temporal_attn_weights = np.log(self.eps) * torch.pow(dt/self.delta_r, self.p)
class TemporalDotProductAttention(AttentionBase):
def __init__(
self,
d_model: int,
t: Tensor,
eps: float,
delta_r: float,
p: float,
rpe: Union[Module, None] = None,
drop_prob: float = 0.0,
**kwargs
) -> None:
super().__init__(d_model, rpe, drop_prob)
self.eps = eps
self.delta_r = delta_r
self.p = p if p != -1 else torch.inf
self.W_k = nn.Linear(self.d_model, self.d_model, bias=False)
self.W_v = nn.Linear(self.d_model, self.d_model, bias=False)
self.W_q = nn.Linear(self.d_model, self.d_model, bias=False)
self.W_out = nn.Linear(self.d_model, self.d_model, bias=False)
self.unnorm_temporal_attn_weights: Tensor
self.update_time_grid(t)
def _eval_attn_weights(self, x: Tensor) -> Tensor:
Q, K = self.W_q(x), self.W_k(x)
unnorm_dotprod_attn_weights = torch.bmm(Q, torch.transpose(K, 1, 2)) / self.d_model**0.5
if self.training:
attn_weights = nn.Softmax(-1)(self.drop(unnorm_dotprod_attn_weights + self.unnorm_temporal_attn_weights))
else:
attn_weights = nn.Softmax(-1)(unnorm_dotprod_attn_weights + self.unnorm_temporal_attn_weights)
return attn_weights
def _eval_output(self, attn_weights: Tensor, x: Tensor) -> Tensor:
assert x.shape[0:2] == attn_weights.shape[0:2], (
"Batch size and number of time points in `x` and `attn_weights` must be the same. "
f"Currently {x.shape=} and {attn_weights.shape=}."
)
V = self.W_v(x)
if self.rpe is None:
output = torch.bmm(attn_weights, V)
else:
output = torch.bmm(attn_weights, V) + (attn_weights.unsqueeze(-1) * self.rpe()).sum(2)
return self.W_out(output)
@torch.no_grad()
def update_time_grid(self, t: Tensor) -> None:
dt = torch.cdist(t, t, p=1).float()
self.unnorm_temporal_attn_weights = np.log(self.eps) * torch.pow(dt/self.delta_r, self.p)
class TemporalDotProductAttentionBaseline(TemporalDotProductAttention):
def __init__(
self,
d_model: int,
t: Tensor,
eps: float,
delta_r: float,
p: float,
n: int,
rpe: Union[Module, None] = None,
drop_prob: float = 0.0,
**kwargs
) -> None:
self.n = n
super().__init__(d_model, t, eps, delta_r, p, rpe, drop_prob, **kwargs)
@torch.no_grad()
def update_time_grid(self, t: Tensor) -> None:
super().update_time_grid(t)
self.unnorm_temporal_attn_weights += self._create_mask()
def _create_mask(self) -> Tensor:
M = self.unnorm_temporal_attn_weights.shape[1]
device = self.unnorm_temporal_attn_weights.device
ones = torch.ones((M, M), device=device).triu(self.n+1)
mask = ones + ones.T
mask[mask == 1] = -torch.inf
return mask.unsqueeze(0)
| 8,637 | 33.690763 | 117 | py |
msvi | msvi-main/msvi/trans_func.py | from abc import ABC, abstractmethod
import torch
import torch.nn as nn
from torchdiffeq import odeint
from torchdiffeq import odeint_adjoint
from einops import rearrange
Tensor = torch.Tensor
Module = nn.Module
Parameter = nn.parameter.Parameter
class ITransitionFunction(Module, ABC):
@abstractmethod
def forward(self, s: Tensor, t: Tensor) -> Tensor:
"""Moves latent states forward in time.
Args:
s: Latent states. Has shape (S, B, K).
t: Time grids at which the latent states are evaluated (except the first time point).
The first time point of each time grid must be the temporal position
of the corresponding latent state. Has shape (S, B, block_size+1).
Returns:
s_new: New latent states. Has shape (S, B*block_size, K).
"""
pass
@abstractmethod
def set_param(self, param: Tensor) -> None:
"""Sets parameters of the module to `params`.
Args:
param: New parameter values.
"""
pass
@abstractmethod
def param_count(self) -> int:
"""Calculates the number of parameters over which the posterior is to be evaluated.
Returns:
The number of parameters.
"""
pass
class NeuralTransitionFunctionBase(ITransitionFunction):
def __init__(self, f: Module, layers_to_count: list = []):
super().__init__()
self.f = f
self.layers_to_count = [nn.Linear, nn.LayerNorm, nn.BatchNorm1d] # default
self.layers_to_count.extend(layers_to_count) # user-specified (must contain weight and bias)
def forward(self, s: Tensor, t: Tensor) -> Tensor:
raise NotImplementedError()
def set_param(self, param: Tensor) -> None:
assert self.param_count() == param.numel(), (
f"The size of param ({param.numel()}) must be the same as self.param_count()"
f"({self.param_count()})"
)
layers = self._get_layers(self.f, self.layers_to_count)
self._set_layer_param_to_vec(layers, param)
def param_count(self) -> int:
"""Each layer must contain weight and bias variables."""
param_count = 0
layers = self._get_layers(self.f, self.layers_to_count)
for layer in layers:
self._check_weight_and_bias_of_layer(layer)
layer_param_count = layer.weight.numel() + layer.bias.numel()
param_count += layer_param_count
return param_count
def _get_layers(self, f, layer_types: list) -> list:
"""Returns all layers in `f` whose type is present in `layer_types`.
Args:
layer_types: A list with the requred layer types (e.g. [nn.Linear]).
Returns:
A list of layers in `f` whose types are in `layer_types`
"""
return_layers = []
for fi in f.modules():
if type(fi) in layer_types:
return_layers.append(fi)
return return_layers
def _set_layer_param_to_vec(self, layers: list[Module], vec: torch.Tensor) -> None:
"""Sets parameters of Modules in `layers` to elements of `vec`.
Args:
layers: A list of Modules whose parameters need to be set.
vec: A 1D Tensor with the parameters.
"""
pointer = 0
for layer in layers:
self._check_weight_and_bias_of_layer(layer)
layer_param_count = layer.weight.numel() + layer.bias.numel() # type: ignore
layer_weight_count = layer.weight.numel() # type: ignore
layer_param = vec[pointer:pointer + layer_param_count]
layer_weight = layer_param[:layer_weight_count].view_as(layer.weight) # type: ignore
layer_bias = layer_param[layer_weight_count:].view_as(layer.bias) # type: ignore
self._del_set_layer_attr(layer, "weight", layer_weight)
self._del_set_layer_attr(layer, "bias", layer_bias)
pointer += layer_param_count
def _del_set_layer_attr(self, layer, attr_name, attr_val):
delattr(layer, attr_name)
setattr(layer, attr_name, attr_val)
def _check_weight_and_bias_of_layer(self, layer: Module) -> None:
assert (type(layer.weight) is Tensor or type(layer.weight) is Parameter), (
f"weight of layer {layer} must be Tensor or Parameter.")
assert (type(layer.bias) is Tensor or type(layer.bias) is Parameter), (
f"bias of layer {layer} must be Tensor or Parameter.")
class MLPTransitionFunction(NeuralTransitionFunctionBase):
"""Time steps must be uniform and the number of blocks must be M-1."""
def forward(self, s: Tensor, t: Tensor) -> Tensor:
return self.f(s)
class ODETransitionFunction(NeuralTransitionFunctionBase):
def __init__(self, f: Module, layers_to_count: list = [], solver_kwargs: dict = {}):
super().__init__(f, layers_to_count=layers_to_count)
if "adjoint" in solver_kwargs.keys():
self.adjoint = solver_kwargs["adjoint"] == 1
del solver_kwargs["adjoint"]
else:
self.adjoint = False
self.solver_kwargs = solver_kwargs
def forward(self, s: Tensor, t: Tensor) -> Tensor:
S, B, K, block_size = *s.shape, t.shape[2] - 1
s_new = torch.zeros((S, B, block_size, K), dtype=s.dtype, device=s.device)
delta = torch.diff(t, dim=2).to(s.dtype)
t_sim = torch.tensor([0., 1.], dtype=t.dtype, device=t.device)
for i in range(block_size):
f = self.get_scaled_dynamics_function(delta[:, :, [i]])
# Squeeze-unsqueeze to avoid in-place modification which causes error during backward pass.
if i == 0:
s0 = s.unsqueeze(-2)
else:
s0 = s_new[:, :, [i-1], :]
if self.adjoint is True:
s_new[:, :, i, :] = odeint_adjoint(f, s0.squeeze(-2), t_sim, **self.solver_kwargs)[-1] # type: ignore
else:
s_new[:, :, i, :] = odeint(f, s0.squeeze(-2), t_sim, **self.solver_kwargs)[-1] # type: ignore
return rearrange(s_new, "S B block_size K -> S (B block_size) K")
def get_dynamics_function(self):
def dynamics_function(t, x):
return self.f(x)
return dynamics_function
def get_scaled_dynamics_function(self, delta):
f = self.get_dynamics_function()
def scaled_dynamics_function(t, x):
return f(t, x) * delta
return scaled_dynamics_function
class ODETransitionFunctionSecondOrder(ODETransitionFunction):
def get_dynamics_function(self):
"""Assumes that x = (x^s || x^d), then returns dxdt=(x^d || f(x^s||x^d))."""
def dynamics_function(t, x):
K = x.shape[2]
dxdt = torch.cat((x[:, :, K//2:], self.f(x)), dim=2)
return dxdt
return dynamics_function
| 6,931 | 34.917098 | 118 | py |
msvi | msvi-main/msvi/pos_enc.py | from typing import Union
import numpy as np
import torch
import torch.nn as nn
Tensor = torch.Tensor
Module = nn.Module
Sequential = nn.Sequential
class DiscreteSinCosPositionalEncoding(Module):
# Modified https://pytorch.org/tutorials/beginner/transformer_tutorial.html
def __init__(self, d_model: int, t: Tensor, max_tokens: int, dropout: float = 0.0, **kwargs):
assert d_model % 2 == 0, "d_model must be even."
super().__init__()
self.dropout = nn.Dropout(p=dropout)
self.d_model = d_model
self.max_tokens = max_tokens
self.update_time_grid(t)
def forward(self, x: Tensor) -> Tensor:
# x: Tensor, shape (S, M, K).
x = x + self.pe
return self.dropout(x)
def update_time_grid(self, t: Tensor) -> None:
# t: Tensor, shape (S, M, 1).
# assert torch.all((t - t[0]) < 1e-7).item() is True, "All time grids must be the same."
_, M, _ = t.shape
position = torch.arange(M, device=t.device).unsqueeze(1)
div_term = torch.exp(torch.arange(0, self.d_model, 2, device=t.device) * (-np.log(self.max_tokens) / self.d_model))
pe = torch.zeros(1, M, self.d_model, device=t.device)
pe[0, :, 0::2] = torch.sin(position * div_term)
pe[0, :, 1::2] = torch.cos(position * div_term)
self.pe = pe
class ContinuousSinCosPositionalEncoding(Module):
# Modified https://pytorch.org/tutorials/beginner/transformer_tutorial.html
def __init__(self, d_model: int, t: Tensor, max_tokens: int, max_time: float, dropout: float = 0.0, **kwargs):
assert d_model % 2 == 0, "d_model must be even."
super().__init__()
self.dropout = nn.Dropout(p=dropout)
self.d_model = d_model
self.max_tokens = max_tokens
self.max_time = max_time
self.update_time_grid(t)
def forward(self, x: Tensor) -> Tensor:
# x: Tensor, shape (S, M, K).
x = x + self.pe
return self.dropout(x)
def update_time_grid(self, t: Tensor) -> None:
# t: Tensor, shape (S, M, 1).
S, M, _ = t.shape
position = t / self.max_time * (self.max_tokens - 1)
div_term = torch.exp(torch.arange(0, self.d_model, 2, device=t.device) * (-np.log(self.max_tokens) / self.d_model)) # (K/2,)
pe = torch.zeros(S, M, self.d_model, device=t.device)
pe[:, :, 0::2] = torch.sin(position * div_term)
pe[:, :, 1::2] = torch.cos(position * div_term)
self.pe = pe
class RelativePositionalEncodingNN(Module):
def __init__(self, f: Union[Module, Sequential], t: Tensor, delta_r: float, **kwargs):
super().__init__()
self.f = f
self.delta_r = delta_r
self.squish_fn = nn.Hardtanh()
self.update_time_grid(t)
def forward(self) -> Tensor:
rpe = self.f(self.dt_prime_mat)
return rpe
def update_time_grid(self, t: Tensor) -> None:
# t: Tensor, shape (S, M, 1).
dt_mat = self._get_dt_matrix(t)
self.dt_prime_mat = self.squish_fn(dt_mat / self.delta_r).float()
def _get_dt_matrix(self, t: Tensor) -> Tensor:
"""Calculates the matrix of relative distances between all time points in `t`."""
dist_mat = torch.cdist(t, t, p=1) # (S, M, M)
dir_mat = torch.ones_like(dist_mat).triu() - torch.ones_like(dist_mat).tril() # (S, M, M)
dt_mat = (dir_mat * dist_mat).unsqueeze(-1) # (S, M, M, 1)
return dt_mat
class RelativePositionalEncodingInterp(Module):
def __init__(self, d_model: int, t: Tensor, delta_r: float, **kwargs):
super().__init__()
self.delta_r = delta_r
self.squish_fn = nn.Hardtanh()
self._set_random_vectors(d_model)
self.update_time_grid(t)
def forward(self) -> Tensor:
return self.pe
def update_time_grid(self, t: Tensor) -> None:
# t: Tensor, shape (S, M, 1).
dt_mat = self._get_dt_matrix(t)
dt_prime_mat = self.squish_fn(dt_mat / self.delta_r).float()
self.lm = (dt_prime_mat + 1) / 2
pe = ((1 - self.lm) * self.va + self.lm * self.vb)
self.pe = pe
def _set_random_vectors(self, d_model: int) -> None:
va_ = (torch.rand((1, d_model)) - 0.5) * 2
va = va_ / torch.linalg.norm(va_, ord=np.inf)
vb = -va
self.register_buffer("va", va)
self.register_buffer("vb", vb)
def _get_dt_matrix(self, t: Tensor) -> Tensor:
"""Calculates the matrix of relative distances between all time points in `t`."""
dist_mat = torch.cdist(t, t, p=1) # (S, M, M)
dir_mat = torch.ones_like(dist_mat).triu() - torch.ones_like(dist_mat).tril() # (S, M, M)
dt_mat = (dir_mat * dist_mat).unsqueeze(-1) # (S, M, M, 1)
return dt_mat
| 4,793 | 35.045113 | 133 | py |
msvi | msvi-main/msvi/posterior.py | from abc import ABC, abstractmethod
import torch
import torch.nn as nn
from msvi.trans_func import ITransitionFunction
from msvi.rec_net import RecognitionNet
Tensor = torch.Tensor
Module = nn.Module
ParameterDict = nn.ParameterDict
class IVariationalPosterior(ABC, Module):
@property
@abstractmethod
def posterior_param(self) -> nn.ParameterDict:
"""Returns parameters of the posterior distribution."""
pass
@abstractmethod
def sample_s(
self,
t: Tensor,
y: Tensor,
batch_ids: Tensor,
block_size: int,
) -> tuple[Tensor, Tensor]:
"""Samples shooting variables (s_1, ..., s_B) from the posterior q(s|y).
Also returns states (x_1, ..., x_M).
Args:
t: Time points at which to evaluate the latent states. Has shape (S, M, 1).
y: Observations corresponding to the latent states. Used only for
amortized variational inference. Has shape (S, M, N, D).
batch_ids: Indices of the trajectories for which to sample the shooting variables.
Has shape (S, ).
block_size: Size of the blocks.
Returns:
A sample of the shooting variables with shape (S, B, K)
and the corresponding latent states with shape (S, M, K).
"""
pass
@abstractmethod
def sample_theta(self) -> dict[str, Tensor]:
"""Samples parameters of g and F from the posterior.
Returns:
Dictionary with a sample of the parameters.
"""
pass
class VariationalPosteriorBase(IVariationalPosterior):
def __init__(self, posterior_param_dict: ParameterDict):
super().__init__()
self._check_param_shapes(posterior_param_dict)
self._posterior_param = posterior_param_dict
@property
def posterior_param(self):
return self._posterior_param
def sample_theta(self):
mu_g, sig_g = self.posterior_param["mu_theta_g"], torch.exp(self.posterior_param["log_sig_theta_g"])
mu_F, sig_F = self.posterior_param["mu_theta_F"], torch.exp(self.posterior_param["log_sig_theta_F"])
theta = {
"theta_g": mu_g + sig_g * torch.randn_like(sig_g),
"theta_F": mu_F + sig_F * torch.randn_like(sig_F),
}
return theta
def _check_param_shapes(self, p: ParameterDict) -> None:
raise NotImplementedError()
def sample_s(self, t: Tensor, y: Tensor, batch_ids: Tensor, block_size: int) -> tuple[Tensor, Tensor]:
raise NotImplementedError()
class SingleShootingPosterior(VariationalPosteriorBase):
def __init__(
self,
posterior_param_dict: ParameterDict,
F: ITransitionFunction,
) -> None:
super().__init__(posterior_param_dict)
self.F = F
def sample_s(
self,
t: Tensor,
y: Tensor,
batch_ids: Tensor,
block_size: int,
) -> tuple[Tensor, Tensor]:
gamma_0 = self.posterior_param["gamma"][batch_ids]
tau = torch.exp(self.posterior_param["log_tau"][batch_ids])
S, M, K = batch_ids.shape[0], t.shape[1], gamma_0.shape[2]
s = torch.zeros((S, M-1, K), device=tau.device)
x = torch.zeros((S, M, K), device=tau.device)
s[:, [0], :] = gamma_0 + tau[:, [0], :] * torch.randn((S, 1, K), device=tau.device)
x[:, [0], :] = s[:, [0], :]
for i in range(1, M):
x_i = self.F(s[:, [i-1], :], t=extract_time_grids(t[:, i-1:i+1, :], n_blocks=1))
x[:, [i], :] = x_i
if i < (M - 1):
s[:, [i], :] = x_i + tau[:, [i], :] * torch.randn((S, 1, K), device=tau.device)
return s, x
def _check_param_shapes(self, p: dict[str, Tensor]) -> None:
model_parameter_names = ["mu_theta_g", "mu_theta_F", "log_sig_theta_g", "log_sig_theta_F"]
for param_name in model_parameter_names:
assert len(p[param_name].shape) == 1, f"{param_name} must have shape (num_parameters, ) but has {p[param_name].shape}."
assert len(p["gamma"].shape) == 3 and p["gamma"].shape[1] == 1, f"gamma must have shape (S, 1, K) but has {p['gamma'].shape}."
assert len(p["log_tau"].shape) == 3, f"log_tau must have shape (S, M-1, K) but has {p['log_tau'].shape}."
class MultipleShootingPosterior(VariationalPosteriorBase):
def __init__(
self,
posterior_param_dict: ParameterDict,
F: ITransitionFunction
) -> None:
super().__init__(posterior_param_dict)
self.F = F
def sample_s(
self,
t: Tensor,
y: Tensor,
batch_ids: Tensor,
block_size: int,
) -> tuple[Tensor, Tensor]:
gamma = self.posterior_param["gamma"][batch_ids, ::block_size, :]
tau = torch.exp(self.posterior_param["log_tau"][batch_ids, ::block_size, :])
s = gamma + tau * torch.randn_like(gamma)
S, M, B, K = batch_ids.shape[0], t.shape[1], gamma.shape[1], gamma.shape[2]
x = torch.zeros((S, M, K), device=tau.device)
x[:, [0], :] = s[:, [0], :]
x[:, 1:, :] = self.F(s, t=extract_time_grids(t, n_blocks=B))
return s, x
def _check_param_shapes(self, p: dict[str, Tensor]) -> None:
model_parameter_names = ["mu_theta_g", "mu_theta_F", "log_sig_theta_g", "log_sig_theta_F"]
for param_name in model_parameter_names:
assert len(p[param_name].shape) == 1, f"{param_name} must have shape (num_parameters, ) but has {p[param_name].shape}."
assert len(p["gamma"].shape) == 3, f"gamma must have shape (S, M, K) but has {p['gamma'].shape}."
assert p["gamma"].shape == p["log_tau"].shape, f"shapes of gamma ({p['gamma'].shape}) and log_tau ({p['log_tau'].shape}) must be the same."
class AmortizedMultipleShootingPosterior(VariationalPosteriorBase):
def __init__(
self,
posterior_param_dict: ParameterDict,
F: ITransitionFunction,
rec_net: RecognitionNet,
) -> None:
super().__init__(posterior_param_dict)
self.F = F
self.rec_net = rec_net
self.gamma: Tensor
self.tau: Tensor
def sample_s(
self,
t: Tensor,
y: Tensor,
batch_ids: Tensor,
block_size: int,
) -> tuple[Tensor, Tensor]:
assert y is not None, "Amortized posterior requires data y."
gamma, tau = self.rec_net(y)
self.gamma, self.tau = gamma[:, :-1, :], tau[:, :-1, :]
gamma = self.gamma[:, ::block_size, :]
tau = self.tau[:, ::block_size, :]
s = gamma + tau * torch.randn_like(tau)
S, M, B, K = batch_ids.shape[0], t.shape[1], gamma.shape[1], gamma.shape[2]
x = torch.zeros((S, M, K), device=tau.device)
x[:, [0], :] = s[:, [0], :]
x[:, 1:, :] = self.F(s, t=extract_time_grids(t, n_blocks=B))
return s, x
def _check_param_shapes(self, p: dict[str, Tensor]) -> None:
model_parameter_names = ["mu_theta_g", "mu_theta_F", "log_sig_theta_g", "log_sig_theta_F"]
for param_name in model_parameter_names:
assert len(p[param_name].shape) == 1, f"{param_name} must have shape (num_parameters, ) but has {p[param_name].shape}."
def extract_time_grids(t: Tensor, n_blocks: int) -> Tensor:
"""Extracts overlapping sub-grids from `t` for the given number of blocks.
Args:
t: Full time grids. Has shape (S, M, 1).
n_blocks: Number of blocks.
Returns:
sub_t: Overlapping sub-grids. Has shape (S, n_blocks, grid_size).
Simplified example:
For t=(t1, t2, t3, t4, t5) and b_blocks=2 returns (t1, t2, t3), (t3, t4, t5).
"""
S, M = t.shape[0:2]
assert (M - 1) % n_blocks == 0, "All blocks must be of equal size."
grid_size = int((M - 1) / n_blocks) + 1
sub_t = torch.empty((S, n_blocks, grid_size), dtype=t.dtype, device=t.device)
for b, i in enumerate(range(0, M-grid_size+1, grid_size-1)):
sub_t[:, [b], :] = torch.transpose(t[:, i:i+grid_size, :], 1, 2)
return sub_t
| 8,083 | 33.4 | 147 | py |
msvi | msvi-main/msvi/rec_net.py | import torch
import torch.nn as nn
from einops import rearrange
Tensor = torch.Tensor
Module = nn.Module
class RecognitionNet(Module):
def __init__(
self,
phi_enc: Module,
phi_agg: Module,
phi_gamma: Module,
phi_tau: Module,
tau_min: float,
) -> None:
"""This class is used to convert observations to variational parameters.
There are four main components:
phi_enc: a point-wise encoder which maps y:(S, M, N, D) to a:(S, M, K').
phi_agg: a sequence to sequence function which maps a:(S, M, K') to b:(S, M, K').
phi_gamma/phi_tau: a point-wise function which maps b:(S, M, K') to gamma/tau:(S, M, K).
First, observations `y` are converted to a lower-dimensional form `a` by the encoder `phi_enc`.
Then, sequence `a` is aggregated into another sequence `b` by `phi_agg`.
Finally, variational parameters are extracted from `b` by `phi_gamma` and `phi_tau`.
"""
super().__init__()
self.phi_enc = phi_enc
self.phi_agg = phi_agg
self.phi_gamma = phi_gamma
self.phi_tau = phi_tau
self.tau_min = tau_min
def forward(self, y: Tensor) -> tuple[Tensor, Tensor]:
"""Converts observations to variational parameters.
Args:
y: Observations. Has shape (S, M, N, D).
Returns:
gamma: Variational parameters. Has shape (S, M, K).
tau: Variational parameters. Has shape (S, M, K).
"""
a = self.phi_enc(y)
b = self.phi_agg(a)
gamma = self.phi_gamma(b)
tau = torch.exp(self.phi_tau(b)) + self.tau_min
return gamma, tau
def apply_batch_norm(self, gamma, bn):
S, M, _ = gamma.shape
gamma = rearrange(gamma, "s m k -> (s m) k")
gamma = bn(gamma)
gamma = rearrange(gamma, "(s m) k -> s m k", s=S, m=M)
return gamma
def update_time_grids(self, t: Tensor) -> None:
"""Updates all parts of aggregation net that depend on time grids."""
for module in self.phi_agg.modules():
if not hasattr(module, "update_time_grid"):
continue
if callable(getattr(module, "update_time_grid")):
module.update_time_grid(t) # type: ignore
class RecognitionNetSecondOrder(RecognitionNet):
"""Same as RecognitionNet but splits variational parameters into two groups."""
def __init__(
self,
phi_enc: Module,
phi_agg: Module,
phi_gamma: Module,
phi_tau: Module,
phi_agg_dyn: Module,
phi_gamma_dyn: Module,
phi_tau_dyn: Module,
tau_min: float,
) -> None:
super().__init__(phi_enc, phi_agg, phi_gamma, phi_tau, tau_min)
self.phi_agg_dyn = phi_agg_dyn
self.phi_gamma_dyn = phi_gamma_dyn
self.phi_tau_dyn = phi_tau_dyn
def forward(self, y: Tensor) -> tuple[Tensor, Tensor]:
a = self.phi_enc(y)
b_stat = self.phi_agg(a)
b_dyn = self.phi_agg_dyn(a)
gamma_stat = self.phi_gamma(b_stat)
tau_stat = torch.exp(self.phi_tau(b_stat)) + self.tau_min
gamma_dyn = self.phi_gamma_dyn(b_dyn)
tau_dyn = torch.exp(self.phi_tau_dyn(b_dyn))
gamma = torch.cat((gamma_stat, gamma_dyn), dim=2)
tau = torch.cat((tau_stat, tau_dyn), dim=2)
return gamma, tau
def update_time_grids(self, t: Tensor) -> None:
for agg_net in [self.phi_agg, self.phi_agg_dyn]:
for module in agg_net.modules():
if not hasattr(module, "update_time_grid"):
continue
if callable(getattr(module, "update_time_grid")):
module.update_time_grid(t) # type: ignore
def set_module_requires_grad(m: Module, value: bool):
for p in m.parameters():
p.requires_grad = value
| 3,937 | 31.01626 | 103 | py |
msvi | msvi-main/msvi/utils/utils.py | from types import SimpleNamespace
import torch
import torch.nn as nn
from einops import rearrange
from msvi.pos_enc import (
DiscreteSinCosPositionalEncoding,
ContinuousSinCosPositionalEncoding,
RelativePositionalEncodingInterp,
RelativePositionalEncodingNN
)
from msvi.attention import (
DotProductAttention,
TemporalAttention,
TemporalDotProductAttention,
TemporalDotProductAttentionBaseline,
)
from msvi.tf_encoder import TFEncoder
Tensor = torch.Tensor
Module = torch.nn.Module
Sequential = torch.nn.Sequential
def create_agg_net(param: SimpleNamespace, net_type: str) -> Sequential:
"""Constructs aggregation network."""
pos_enc_layers = {
"dsc": DiscreteSinCosPositionalEncoding,
"csc": ContinuousSinCosPositionalEncoding,
"rpeNN": RelativePositionalEncodingNN,
"rpeInterp": RelativePositionalEncodingInterp,
"none": None,
}
attn_layers = {
"dp": DotProductAttention,
"t": TemporalAttention,
"tdp": TemporalDotProductAttention,
"tdp_b": TemporalDotProductAttentionBaseline,
}
attn_key, pos_enc_key = param.h_agg_attn, param.h_agg_pos_enc
assert pos_enc_key in pos_enc_layers.keys(), f"Wrong position encoding name: {pos_enc_key}."
assert attn_key in attn_layers.keys(), f"Wrong attention layer name: {attn_key}."
t_init = torch.linspace(0, 1, 3).view(1, -1, 1) # update it later
pos_enc_args = {
"d_model": param.m_h*param.K,
"t": t_init,
"max_tokens": param.h_agg_max_tokens,
"max_time": param.h_agg_max_time,
"delta_r": param.h_agg_delta_r,
"f": nn.Linear(1, param.m_h*param.K, bias=False),
}
attn_args = {
"d_model": param.m_h*param.K,
"t": t_init,
"eps": 1e-2,
"delta_r": param.h_agg_delta_r,
"p": param.h_agg_p,
"n": param.n,
"drop_prob": param.drop_prob,
}
if net_type == "static":
param.h_agg_layers = param.h_agg_stat_layers
elif net_type == "dynamic":
param.h_agg_layers = param.h_agg_dyn_layers
modules = []
if pos_enc_key in ["dsc", "csc"]: # absolute positional encodings
pos_enc = pos_enc_layers[pos_enc_key](**pos_enc_args)
tf_enc_blocks = []
for _ in range(param.h_agg_layers):
tf_enc_block = TFEncoder(
d_model=param.m_h*param.K,
self_attn=attn_layers[attn_key](**attn_args),
t=t_init,
dim_feedforward=2*param.m_h*param.K,
)
tf_enc_blocks.append(tf_enc_block)
modules.extend([pos_enc, *tf_enc_blocks])
else: # relative positional encodings
if pos_enc_key == "none":
print("Using no positional encodings!")
pos_enc = None
else:
pos_enc = pos_enc_layers[pos_enc_key](**pos_enc_args)
tf_enc_blocks = []
for i in range(param.h_agg_layers):
if i == 0:
self_attn = attn_layers["t"](rpe=pos_enc, **attn_args)
else:
self_attn = attn_layers[attn_key](rpe=pos_enc, **attn_args)
tf_enc_block = TFEncoder(
d_model=param.m_h*param.K,
self_attn=self_attn,
t=t_init,
dim_feedforward=2*param.m_h*param.K,
)
tf_enc_blocks.append(tf_enc_block)
modules.extend(tf_enc_blocks)
return nn.Sequential(*modules)
class CNNEncoder(Module):
"""Mapping from R^{NxD} to R^K."""
def __init__(self, K: int, N: int, D: int, n_channels: int) -> None:
super().__init__()
self.K = K
self.N = N
self.D = D
self.n_channels = n_channels
self.img_size = int(N**0.5)
self.n_feat = (self.img_size//16)**2 * (8 * n_channels)
self.f = nn.Sequential(
nn.Conv2d(D, n_channels, kernel_size=5, stride=2, padding=2),
nn.ReLU(),
nn.BatchNorm2d(n_channels), # img_size/2
nn.Conv2d(n_channels, 2*n_channels, kernel_size=5, stride=2, padding=2),
nn.ReLU(),
nn.BatchNorm2d(2*n_channels), # img_size/4
nn.Conv2d(2*n_channels, 4*n_channels, kernel_size=5, stride=2, padding=2),
nn.ReLU(),
nn.BatchNorm2d(4*n_channels), # img_size/8
nn.Conv2d(4*n_channels, 8*n_channels, kernel_size=2, stride=2, padding=0),
nn.ReLU(),
nn.BatchNorm2d(8*n_channels), # img_size/16
nn.Flatten(),
nn.Linear(self.n_feat, K),
)
def forward(self, x: Tensor) -> Tensor:
# x: Tensor, shape (S, M, N, D)
S, M, _, _ = x.shape
x = rearrange(x, "s m (h w) d -> (s m) d h w", h=self.img_size, w=self.img_size)
x = self.f(x)
x = rearrange(x, "(s m) k -> s m k", s=S, m=M)
return x
class CNNDecoder(Module):
"""Mapping from R^K to R^{NxDxn_param}."""
def __init__(self, K: int, N: int, D: int, n_param: int, n_channels: int) -> None:
super().__init__()
self.K = K
self.N = N
self.D = D
self.n_param = n_param
self.n_channels = n_channels
self.img_size = int(N**0.5)
self.n_feat = (self.img_size//16)**2 * (8 * n_channels)
self.lin_layer = nn.Linear(K, self.n_feat)
self.f = nn.Sequential(
nn.ConvTranspose2d(8*n_channels, 4*n_channels, kernel_size=2, stride=2),
nn.ReLU(),
nn.BatchNorm2d(4*n_channels), # img_size/8
nn.ConvTranspose2d(4*n_channels, 2*n_channels, kernel_size=2, stride=2),
nn.ReLU(),
nn.BatchNorm2d(2*n_channels), # img_size/4
nn.ConvTranspose2d(2*n_channels, n_channels, kernel_size=2, stride=2),
nn.ReLU(),
nn.BatchNorm2d(n_channels), # img_size/2
nn.ConvTranspose2d(n_channels, n_channels, kernel_size=2, stride=2),
nn.ReLU(),
nn.BatchNorm2d(n_channels), # img_size
nn.Conv2d(n_channels, D*n_param, kernel_size=5, padding=2),
)
def forward(self, x: Tensor) -> Tensor:
# x: Tensor, shape (S, M, K)
S, M, _ = x.shape
nc, h = 8*self.n_channels, self.img_size//16
x = rearrange(self.lin_layer(x), "s m (nc h w) -> (s m) nc h w", nc=nc, h=h, w=h)
x = self.f(x)
x = rearrange(x, "(s m) (d npar) h w -> s m (h w) d npar", s=S, m=M, d=self.D, npar=self.n_param)
return x
class Sine(nn.Module):
def __init__(self, w=1.0):
super().__init__()
self.weight = nn.parameter.Parameter(torch.tensor(w), True)
self.bias = nn.parameter.Parameter(torch.tensor(0.0), False)
def forward(self, x):
return torch.sin(self.weight * x)
| 6,830 | 31.221698 | 105 | py |
msvi | msvi-main/msvi/utils/pendulum.py | import os
import pickle
import argparse
from typing import Union
from types import SimpleNamespace
import torch
import torch.nn as nn
import torchvision.transforms
from torch.nn.parameter import Parameter
from torch.utils.data import DataLoader
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from einops import rearrange
from msvi.model import ModelNormal, ModelNormalSecondOrder
from msvi.posterior import AmortizedMultipleShootingPosterior
from msvi.elbo import AmortizedMultipleShootingELBO
from msvi.decoder import NeuralDecoder
from msvi.trans_func import ODETransitionFunction, ODETransitionFunctionSecondOrder
from msvi.rec_net import RecognitionNet, RecognitionNetSecondOrder
from msvi.dataset import TrajectoryDataset
from msvi.utils.utils import create_agg_net, Sine, CNNEncoder, CNNDecoder
plt.style.use("seaborn") # type: ignore
sns.set_style("whitegrid")
ndarray = np.ndarray
Tensor = torch.Tensor
Sequential = nn.Sequential
DataDict = dict[str, dict[str, list]]
TensorDataDict = dict[str, dict[str, list[Tensor]]]
Module = nn.Module
DATASET_NAME = "PENDULUM"
def create_argparser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser()
# Data.
parser.add_argument("--data_folder", type=str, default="./experiments/data/datasets/pendulum/", help="Path to the dataset.")
parser.add_argument("--N", type=int, default=1024, help="Number of observation points.")
parser.add_argument("--D", type=int, default=1, help="Dimensionality of observation.")
parser.add_argument("--max_len", type=int, default=None, help="Truncation length for trajectories.")
parser.add_argument("--sigY", type=float, default=1e-3, help="Observation noise.")
# Model (common).
parser.add_argument("--K", type=int, default=32, help="Latent space dimension.")
parser.add_argument("--Xi", type=float, default=1e-4, help="Used to set variance for the continuity prior.")
parser.add_argument("--block_size", type=int, default=1, help="Number of time points in each block.")
# Model (g).
parser.add_argument("--g_cnn_channels", type=int, default=8, help="Channels in CNNDecoder.")
# Model (F).
parser.add_argument("--m_F", type=int, default=8, help="Dimensionality scaler for F.")
parser.add_argument("--F_nonlin", type=str, default="relu", help="Nonlinearity for F.")
parser.add_argument("--dyn_order", type=int, default=2, help="Order of the dynamcis function, must be 1 or 2.")
# Model (h).
parser.add_argument("--m_h", type=int, default=4, help="Dimensionality scaler for h.")
parser.add_argument("--h_enc_cnn_channels", type=int, default=8, help="Channels in CNNEncoder.")
parser.add_argument("--h_agg_attn", type=str, default="tdp", help="Attention type (dp, t, tdp, tdp_b).")
parser.add_argument("--h_agg_pos_enc", type=str, default="rpeNN", help="Position encoding type (csc, rpeNN, rpeInterp).")
parser.add_argument("--h_agg_stat_layers", type=int, default=4, help="Number of TFEncoder layers in static aggregation net.")
parser.add_argument("--h_agg_dyn_layers", type=int, default=8, help="Number of TFEncoder layers in dynamic aggregation net.")
parser.add_argument("--h_agg_max_tokens", type=int, default=51, help="Maximum expected number of tokens.")
parser.add_argument("--h_agg_max_time", type=float, default=3.0, help="Maximum expected observation time.")
parser.add_argument("--h_agg_delta_r", type=float, default=0.45, help="Attention time span at training time.")
parser.add_argument("--h_agg_p", type=float, default=-1, help="Exponent for temporal attention (use -1 for p=inf).")
parser.add_argument("--n", type=int, default=1, help="Number of nearest neighbors used for baseline aggregation net.")
parser.add_argument("--drop_prob", type=float, default=0.1, help="Attention dropout probability.") # 0.1
parser.add_argument("--tau_min", type=float, default=2e-2, help="Lower bound on the variance of q(s_i).") # 2e-2
parser.add_argument("--sigT", type=float, default=0.0, help="Scale of the noise added to the time grids for temporal neighborhood adjustment.") # 0.00025
# Training/validation/testing.
parser.add_argument("--scaler", type=float, default=1, help="Scaler for ELBO L2 term.")
parser.add_argument("--n_iters", type=int, default=300000, help="Number of training iterations.")
parser.add_argument("--lr", type=float, default=3e-4, help="Learning rate.")
parser.add_argument("--batch_size", type=int, default=16, help="Batch size.")
parser.add_argument("--solver", type=str, default="dopri5", help="Name of the ODE solver (see torchdiffeq).")
parser.add_argument("--rtol", type=float, default=1e-5, help="Relative tolerance for ODE solver.")
parser.add_argument("--atol", type=float, default=1e-5, help="Absolute tolerance for ODE solver.")
parser.add_argument("--adjoint", type=int, default=0, help="Use adjoint to evaluate gradient flag (0 - no, 1 - yes).")
parser.add_argument("--device", type=str, default="cuda", help="Device (cpu or cuda)")
parser.add_argument("--seed", type=int, default=13, help="Random seed.")
parser.add_argument("--group", default="None", help="Group for wandb.")
parser.add_argument("--tags", default=["no_tag"], nargs="+", help="Tags for wandb.")
parser.add_argument("--name", type=str, default="tmp", help="Name of the run.")
parser.add_argument("--visualize", type=int, default=1, help="Visualize predictions on validation set flag (0 - no, 1 - yes).")
parser.add_argument("--n_mc_samples", type=int, default=10, help="Number of samples for Monte Carlo integration.")
parser.add_argument("--delta_inf", type=float, default=0.45, help="Fraction of obsevations used for x0 inference at test time.")
parser.add_argument("--model_folder", type=str, default="./models/pendulum/", help="Folder for saving/loading models.")
return parser
def create_datasets(param: SimpleNamespace, device=None) -> tuple[TrajectoryDataset, ...]:
data = read_data(param.data_folder)
data = preprocess_data(data)
data = to_tensors(data, device)
train_dataset = TrajectoryDataset(data["train"]["t"], data["train"]["y"], param.max_len)
val_dataset = TrajectoryDataset(data["val"]["t"], data["val"]["y"], param.max_len)
test_dataset = TrajectoryDataset(data["test"]["t"], data["test"]["y"], param.max_len)
return train_dataset, val_dataset, test_dataset
def read_data(path: str) -> DataDict:
"""Reads data from folder `path` which contains subfolders train, val and test.
Each subfolder contains ndarrays with time grids and trajectories stored as
t.pkl and y.pkl files."""
data = {}
data["train"] = read_pickle(["t", "y"], path+"train/")
data["val"] = read_pickle(["t", "y"], path+"val/")
data["test"] = read_pickle(["t", "y"], path+"test/")
return data
def preprocess_data(data: DataDict) -> DataDict:
data["train"], train_stats = _preprocess_data(data["train"])
data["val"], _ = _preprocess_data(data["val"], train_stats)
data["test"], _ = _preprocess_data(data["test"], train_stats)
return data
def add_noise(data: DataDict, sig: float, seed: int) -> DataDict:
np.random.seed(seed)
for i in range(len(data["train"]["y"])):
data["train"]["y"][i] += np.random.randn(*data["train"]["y"][i].shape) * sig
for i in range(len(data["val"]["y"])):
data["val"]["y"][i] += np.random.randn(*data["val"]["y"][i].shape) * sig
for i in range(len(data["test"]["y"])):
data["test"]["y"][i] += np.random.randn(*data["test"]["y"][i].shape) * sig
return data
def to_tensors(data: DataDict, device=None) -> TensorDataDict:
tensor_data = {}
tensor_data["train"] = {
"t": [torch.tensor(ti, dtype=torch.float64).to(device) for ti in data["train"]["t"]],
"y": [torch.tensor(yi, dtype=torch.float32).to(device) for yi in data["train"]["y"]],
}
tensor_data["val"] = {
"t": [torch.tensor(ti, dtype=torch.float64).to(device) for ti in data["val"]["t"]],
"y": [torch.tensor(yi, dtype=torch.float32).to(device) for yi in data["val"]["y"]],
}
tensor_data["test"] = {
"t": [torch.tensor(ti, dtype=torch.float64).to(device) for ti in data["test"]["t"]],
"y": [torch.tensor(yi, dtype=torch.float32).to(device) for yi in data["test"]["y"]],
}
return tensor_data
def create_dataloaders(
param: SimpleNamespace,
train_dataset: TrajectoryDataset,
val_dataset: TrajectoryDataset,
test_dataset: TrajectoryDataset
) -> tuple[DataLoader, ...]:
train_loader = DataLoader(
train_dataset,
batch_size=param.batch_size,
shuffle=True,
pin_memory=False,
)
val_loader = DataLoader(val_dataset, batch_size=param.batch_size, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=param.batch_size, shuffle=False)
return train_loader, val_loader, test_loader
def _preprocess_data(
data: dict[str, list],
stats: Union[None, dict] = None
) -> tuple[dict[str, list], Union[None, dict]]:
is_train = stats is None
if is_train:
stats = {
"T_max": np.max([np.max(ti) for ti in data["t"]]),
"y_max": np.max([np.max(yi) for yi in data["y"]]),
}
for i in range(len(data["t"])):
# Normalize time grid.
# data["t"][i] = data["t"][i].astype(np.float64) / stats["T_max"]
# Normalize images.
data["y"][i] = data["y"][i].astype(np.float32) / stats["y_max"]
# Swap last two dimensions for compatibility with (S, M, N, D) dimensions.
data["y"][i] = np.transpose(data["y"][i], (0, 2, 1))
if is_train:
return data, stats
else:
return data, None
def read_pickle(keys: list[str], path: str = "./") -> dict[str, ndarray]:
data_dict = {}
for key in keys:
with open(path+key+".pkl", "rb") as f:
data_dict[key] = pickle.load(f)
return data_dict
def get_model_components(
param: SimpleNamespace,
) -> tuple[NeuralDecoder, ODETransitionFunction, RecognitionNet]:
nonlins = {
"relu": nn.ReLU,
"tanh": nn.Tanh,
"gelu": nn.GELU,
"mish": nn.Mish,
"sine": Sine,
}
# Decoder.
g = NeuralDecoder(
decoder=nn.Sequential(
CNNDecoder(param.K, param.N, param.D, 2, param.g_cnn_channels),
ToNormalParameters(param.sigY),
),
)
# Transition function and recognition network.
solver_kwargs = {
"method": param.solver,
"rtol": param.rtol,
"atol": param.atol,
"adjoint": param.adjoint,
"options": {"step_size": 0.2},
}
if param.dyn_order == 1:
F = ODETransitionFunction(
f=nn.Sequential(
nn.Linear(param.K, param.m_F*param.K), nonlins[param.F_nonlin](),
nn.Linear(param.m_F*param.K, param.m_F*param.K), nonlins[param.F_nonlin](),
nn.Linear(param.m_F*param.K, param.K)
),
layers_to_count=[],
solver_kwargs=solver_kwargs
)
h = RecognitionNet(
phi_enc=CNNEncoder(param.m_h*param.K, param.N, param.D, param.h_enc_cnn_channels),
phi_agg=create_agg_net(param, "static"),
phi_gamma=nn.Linear(param.m_h*param.K, param.K),
phi_tau=nn.Linear(param.m_h*param.K, param.K),
tau_min=param.tau_min,
)
elif param.dyn_order == 2:
assert param.K % 2 == 0, "Latent dimension `K` must be divisible by 2."
F = ODETransitionFunctionSecondOrder(
f=nn.Sequential(
nn.Linear(param.K, param.m_F*param.K), nonlins[param.F_nonlin](),
nn.Linear(param.m_F*param.K, param.m_F*param.K), nonlins[param.F_nonlin](),
nn.Linear(param.m_F*param.K, param.K//2)
),
layers_to_count=[],
solver_kwargs=solver_kwargs
)
h = RecognitionNetSecondOrder(
phi_enc=CNNEncoder(param.m_h*param.K, param.N, param.D, param.h_enc_cnn_channels),
phi_agg=create_agg_net(param, "static"),
phi_agg_dyn=create_agg_net(param, "dynamic"),
phi_gamma=nn.Linear(param.m_h*param.K, param.K//2),
phi_gamma_dyn=nn.Linear(param.m_h*param.K, param.K//2),
phi_tau=nn.Linear(param.m_h*param.K, param.K//2),
phi_tau_dyn=nn.Linear(param.m_h*param.K, param.K//2),
tau_min=param.tau_min,
)
else:
raise RuntimeError("Wrong dynamics order. Must be 1 or 2.")
return g, F, h
def create_elbo(
g: NeuralDecoder,
F: ODETransitionFunction,
h: RecognitionNet,
param: SimpleNamespace
) -> AmortizedMultipleShootingELBO:
prior_param_dict = nn.ParameterDict({
"mu0": Parameter(0.0 * torch.ones([param.K]), False),
"sig0": Parameter(1.0 * torch.ones([param.K]), False),
"sigXi": Parameter(param.Xi / param.K**0.5 * torch.ones([1]), False),
"mu_theta": Parameter(0.0 * torch.ones([1]), False),
"sig_theta": Parameter(1.0 * torch.ones([1]), False),
})
posterior_param_dict = nn.ParameterDict({
"mu_theta_g": Parameter(torch.cat([par.detach().reshape(-1) for par in g.parameters()])),
"log_sig_theta_g": Parameter(-7.0 * torch.ones(g.param_count())),
"mu_theta_F": Parameter(torch.cat([par.detach().reshape(-1) for par in F.parameters()])),
"log_sig_theta_F": Parameter(-7.0 * torch.ones(F.param_count())),
})
if param.dyn_order == 1:
p = ModelNormal(prior_param_dict, g, F)
elif param.dyn_order == 2:
p = ModelNormalSecondOrder(prior_param_dict, g, F)
else:
raise RuntimeError("Wrong dynamics order. Must be 1 or 2.")
q = AmortizedMultipleShootingPosterior(posterior_param_dict, F, h)
elbo = AmortizedMultipleShootingELBO(p, q)
elbo.p.set_theta(elbo.q.sample_theta())
return elbo
def visualize_trajectories(
traj: list[ndarray],
vis_inds: list[int],
title: str,
path: str,
img_name: str,
) -> None:
if not os.path.isdir(path):
os.makedirs(path)
img_size = 32
panel_size = 5
n_row = len(traj)
n_col = len(vis_inds)
fig, ax = plt.subplots(n_row, n_col, figsize=(panel_size*n_col, panel_size*n_row), squeeze=False)
for i in range(n_row):
for j in range(n_col):
ax[i, j].imshow(traj[i][0, vis_inds[j], :, 0].reshape(img_size, img_size)) # type: ignore
ax[i, j].grid(False) # type: ignore
# fig.colorbar(im, ax=ax[i, j], orientation='vertical') # type: ignore
fig.suptitle(title, fontsize=45)
fig.tight_layout()
plt.savefig(path+img_name)
plt.close()
class ToNormalParameters(Module):
"""Converts output of CNNDecoder to parameters of p(y|x)."""
def __init__(self, sigY) -> None:
super().__init__()
self.sigY = sigY
def forward(self, x):
x[..., 0] = torch.sigmoid(x[..., 0]) # to keep mean \in (0, 1)
x[..., 1] = self.sigY # fix standard deviation
return x
def get_data_transform():
transform = torchvision.transforms.Compose(
[
torchvision.transforms.RandomHorizontalFlip(p=0.5),
]
)
def apply_transform(y: Tensor) -> Tensor:
_, m, n, d = y.shape
h, w = 32, 32
y = rearrange(y, "s m (h w) d -> s (m d) h w", h=h, w=w)
y = transform(y)
y = rearrange(y, "s (m d) h w -> s m (h w) d", m=m, d=d)
return y
return apply_transform
def get_scheduler(optimizer, n_iters, lr):
sched = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=(1e-5/lr)**(1.0/n_iters))
return sched
| 15,776 | 39.557841 | 158 | py |
msvi | msvi-main/msvi/utils/bballs.py | import os
import pickle
import argparse
from typing import Union
from types import SimpleNamespace
import torch
import torch.nn as nn
import torchvision.transforms
from torch.nn.parameter import Parameter
from torch.utils.data import DataLoader
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from msvi.model import ModelNormal, ModelNormalSecondOrder
from msvi.posterior import AmortizedMultipleShootingPosterior
from msvi.elbo import AmortizedMultipleShootingELBO
from msvi.decoder import NeuralDecoder
from msvi.trans_func import ODETransitionFunction, ODETransitionFunctionSecondOrder
from msvi.rec_net import RecognitionNet, RecognitionNetSecondOrder
from msvi.dataset import TrajectoryDataset
from msvi.utils.utils import create_agg_net, Sine, CNNEncoder, CNNDecoder
from einops import rearrange
plt.style.use("seaborn") # type: ignore
sns.set_style("whitegrid")
ndarray = np.ndarray
Tensor = torch.Tensor
Sequential = nn.Sequential
DataDict = dict[str, dict[str, list]]
TensorDataDict = dict[str, dict[str, list[Tensor]]]
Module = nn.Module
DATASET_NAME = "BOUNCING_BALLS"
def create_argparser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser()
# Data.
parser.add_argument("--data_folder", type=str, default="./experiments/data/datasets/bballs/", help="Path to the dataset.")
parser.add_argument("--N", type=int, default=1024, help="Number of observation points.")
parser.add_argument("--D", type=int, default=1, help="Dimensionality of observation.")
parser.add_argument("--max_len", type=int, default=None, help="Truncation length for trajectories.")
parser.add_argument("--sigY", type=float, default=1e-3, help="Observation noise.")
# Model (common).
parser.add_argument("--K", type=int, default=32, help="Latent space dimension.")
parser.add_argument("--Xi", type=float, default=1e-3, help="Used to set variance for the continuity prior.")
parser.add_argument("--block_size", type=int, default=5, help="Number of time points in each block.")
# Model (g).
parser.add_argument("--g_cnn_channels", type=int, default=32, help="Channels in CNNDecoder.")
# Model (F).
parser.add_argument("--m_F", type=int, default=32, help="Dimensionality scaler for F.")
parser.add_argument("--F_nonlin", type=str, default="relu", help="Nonlinearity for F.")
parser.add_argument("--dyn_order", type=int, default=2, help="Order of the dynamcis function, must be 1 or 2.")
# Model (h).
parser.add_argument("--m_h", type=int, default=4, help="Dimensionality scaler for h.")
parser.add_argument("--h_enc_cnn_channels", type=int, default=32, help="Channels in CNNEncoder.")
parser.add_argument("--h_agg_attn", type=str, default="tdp", help="Attention type (dp, t, tdp, tdp_b).")
parser.add_argument("--h_agg_pos_enc", type=str, default="rpeNN", help="Position encoding type (csc, rpeNN, rpeInterp).")
parser.add_argument("--h_agg_stat_layers", type=int, default=4, help="Number of TFEncoder layers in static aggregation net.")
parser.add_argument("--h_agg_dyn_layers", type=int, default=8, help="Number of TFEncoder layers in dynamic aggregation net.")
parser.add_argument("--h_agg_max_tokens", type=int, default=51, help="Maximum expected number of tokens.")
parser.add_argument("--h_agg_max_time", type=float, default=20.0, help="Maximum expected observation time.")
parser.add_argument("--h_agg_delta_r", type=float, default=3.0, help="Attention time span at training time.")
parser.add_argument("--h_agg_p", type=float, default=-1, help="Exponent for temporal attention (use -1 for p=inf).")
parser.add_argument("--n", type=int, default=1, help="Number of nearest neighbors used for baseline aggregation net.")
parser.add_argument("--drop_prob", type=float, default=0.1, help="Attention dropout probability.") # 0.1
parser.add_argument("--tau_min", type=float, default=2e-2, help="Lower bound on the variance of q(s_i).") # 2e-2
parser.add_argument("--sigT", type=float, default=0.0, help="Scale of the noise added to the time grids for temporal neighborhood adjustment.") # TODO: X for regular grids
# Training/validation/testing.
parser.add_argument("--scaler", type=float, default=1, help="Scaler for ELBO L2 term.")
parser.add_argument("--n_iters", type=int, default=300000, help="Number of training iterations.")
parser.add_argument("--lr", type=float, default=3e-4, help="Learning rate.")
parser.add_argument("--batch_size", type=int, default=64, help="Batch size.") # 16 at least for block_size=1
parser.add_argument("--solver", type=str, default="dopri5", help="Name of the ODE solver (see torchdiffeq).")
parser.add_argument("--rtol", type=float, default=1e-5, help="Relative tolerance for ODE solver.")
parser.add_argument("--atol", type=float, default=1e-5, help="Absolute tolerance for ODE solver.")
parser.add_argument("--adjoint", type=int, default=0, help="Use adjoint to evaluate gradient flag (0 - no, 1 - yes).")
parser.add_argument("--device", type=str, default="cuda", help="Device (cpu or cuda)")
parser.add_argument("--seed", type=int, default=13, help="Random seed.")
parser.add_argument("--group", default="None", help="Group for wandb.")
parser.add_argument("--tags", default=["no_tag"], nargs="+", help="Tags for wandb.")
parser.add_argument("--name", type=str, default="tmp", help="Name of the run.")
parser.add_argument("--visualize", type=int, default=1, help="Visualize predictions on validation set flag (0 - no, 1 - yes).")
parser.add_argument("--n_mc_samples", type=int, default=10, help="Number of samples for Monte Carlo integration.")
parser.add_argument("--delta_inf", type=float, default=3.0, help="Attention time span at test time.")
parser.add_argument("--model_folder", type=str, default="./models/bballs/", help="Folder for saving/loading models.")
return parser
def create_datasets(param: SimpleNamespace, device=None) -> tuple[TrajectoryDataset, ...]:
data = read_data(param.data_folder)
data = preprocess_data(data)
data = to_tensors(data, device)
train_dataset = TrajectoryDataset(data["train"]["t"], data["train"]["y"], param.max_len)
val_dataset = TrajectoryDataset(data["val"]["t"], data["val"]["y"], param.max_len)
test_dataset = TrajectoryDataset(data["test"]["t"], data["test"]["y"], param.max_len)
return train_dataset, val_dataset, test_dataset
def read_data(path: str) -> DataDict:
"""Reads data from folder `path` which contains subfolders train, val and test.
Each subfolder contains ndarrays with time grids and trajectories stored as
t.pkl and y.pkl files."""
data = {}
data["train"] = read_pickle(["t", "y"], path+"train/")
data["val"] = read_pickle(["t", "y"], path+"val/")
data["test"] = read_pickle(["t", "y"], path+"test/")
return data
def preprocess_data(data: DataDict) -> DataDict:
data["train"], train_stats = _preprocess_data(data["train"])
data["val"], _ = _preprocess_data(data["val"], train_stats)
data["test"], _ = _preprocess_data(data["test"], train_stats)
return data
def add_noise(data: DataDict, sig: float, seed: int) -> DataDict:
np.random.seed(seed)
for i in range(len(data["train"]["y"])):
data["train"]["y"][i] += np.random.randn(*data["train"]["y"][i].shape) * sig
for i in range(len(data["val"]["y"])):
data["val"]["y"][i] += np.random.randn(*data["val"]["y"][i].shape) * sig
for i in range(len(data["test"]["y"])):
data["test"]["y"][i] += np.random.randn(*data["test"]["y"][i].shape) * sig
return data
def to_tensors(data: DataDict, device=None) -> TensorDataDict:
tensor_data = {}
tensor_data["train"] = {
"t": [torch.tensor(ti, dtype=torch.float64).to(device) for ti in data["train"]["t"]],
"y": [torch.tensor(yi, dtype=torch.float32).to(device) for yi in data["train"]["y"]],
}
tensor_data["val"] = {
"t": [torch.tensor(ti, dtype=torch.float64).to(device) for ti in data["val"]["t"]],
"y": [torch.tensor(yi, dtype=torch.float32).to(device) for yi in data["val"]["y"]],
}
tensor_data["test"] = {
"t": [torch.tensor(ti, dtype=torch.float64).to(device) for ti in data["test"]["t"]],
"y": [torch.tensor(yi, dtype=torch.float32).to(device) for yi in data["test"]["y"]],
}
return tensor_data
def create_dataloaders(
param: SimpleNamespace,
train_dataset: TrajectoryDataset,
val_dataset: TrajectoryDataset,
test_dataset: TrajectoryDataset
) -> tuple[DataLoader, ...]:
train_loader = DataLoader(
train_dataset,
batch_size=param.batch_size,
shuffle=True,
pin_memory=False,
)
val_loader = DataLoader(val_dataset, batch_size=param.batch_size, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=param.batch_size, shuffle=False)
return train_loader, val_loader, test_loader
def _preprocess_data(
data: dict[str, list],
stats: Union[None, dict] = None
) -> tuple[dict[str, list], Union[None, dict]]:
is_train = stats is None
if is_train:
stats = {
"T_max": np.max([np.max(ti) for ti in data["t"]]),
"y_max": np.max([np.max(yi) for yi in data["y"]]),
}
for i in range(len(data["t"])):
# Normalize time grid.
# data["t"][i] = data["t"][i].astype(np.float64) / stats["T_max"]
# Normalize images.
data["y"][i] = data["y"][i].astype(np.float32) / stats["y_max"]
# Swap last two dimensions for compatibility with (S, M, N, D) dimensions.
data["y"][i] = np.transpose(data["y"][i], (0, 2, 1))
if is_train:
return data, stats
else:
return data, None
def read_pickle(keys: list[str], path: str = "./") -> dict[str, ndarray]:
data_dict = {}
for key in keys:
with open(path+key+".pkl", "rb") as f:
data_dict[key] = pickle.load(f)
return data_dict
def get_model_components(
param: SimpleNamespace,
) -> tuple[NeuralDecoder, ODETransitionFunction, RecognitionNet]:
nonlins = {
"relu": nn.ReLU,
"tanh": nn.Tanh,
"gelu": nn.GELU,
"mish": nn.Mish,
"sine": Sine,
}
# Decoder.
g = NeuralDecoder(
decoder=nn.Sequential(
CNNDecoder(param.K, param.N, param.D, 2, param.g_cnn_channels),
ToNormalParameters(param.sigY),
),
)
# Transition function and recognition network.
solver_kwargs = {
"method": param.solver,
"rtol": param.rtol,
"atol": param.atol,
"adjoint": param.adjoint,
"options": {"step_size": 0.2},
}
if param.dyn_order == 1:
F = ODETransitionFunction(
f=nn.Sequential(
nn.Linear(param.K, param.m_F*param.K), nonlins[param.F_nonlin](),
nn.Linear(param.m_F*param.K, param.m_F*param.K), nonlins[param.F_nonlin](),
nn.Linear(param.m_F*param.K, param.m_F*param.K), nonlins[param.F_nonlin](),
nn.Linear(param.m_F*param.K, param.K)
),
layers_to_count=[],
solver_kwargs=solver_kwargs
)
h = RecognitionNet(
phi_enc=CNNEncoder(param.m_h*param.K, param.N, param.D, param.h_enc_cnn_channels),
phi_agg=create_agg_net(param, "static"),
phi_gamma=nn.Linear(param.m_h*param.K, param.K),
phi_tau=nn.Linear(param.m_h*param.K, param.K),
tau_min=param.tau_min,
)
elif param.dyn_order == 2:
assert param.K % 2 == 0, "Latent dimension `K` must be divisible by 2."
F = ODETransitionFunctionSecondOrder(
f=nn.Sequential(
nn.Linear(param.K, param.m_F*param.K), nonlins[param.F_nonlin](),
nn.Linear(param.m_F*param.K, param.m_F*param.K), nonlins[param.F_nonlin](),
nn.Linear(param.m_F*param.K, param.m_F*param.K), nonlins[param.F_nonlin](),
nn.Linear(param.m_F*param.K, param.K//2)
),
layers_to_count=[],
solver_kwargs=solver_kwargs
)
h = RecognitionNetSecondOrder(
phi_enc=CNNEncoder(param.m_h*param.K, param.N, param.D, param.h_enc_cnn_channels),
phi_agg=create_agg_net(param, "static"),
phi_agg_dyn=create_agg_net(param, "dynamic"),
phi_gamma=nn.Linear(param.m_h*param.K, param.K//2),
phi_gamma_dyn=nn.Linear(param.m_h*param.K, param.K//2),
phi_tau=nn.Linear(param.m_h*param.K, param.K//2),
phi_tau_dyn=nn.Linear(param.m_h*param.K, param.K//2),
tau_min=param.tau_min,
)
else:
raise RuntimeError("Wrong dynamics order. Must be 1 or 2.")
return g, F, h
def create_elbo(
g: NeuralDecoder,
F: ODETransitionFunction,
h: RecognitionNet,
param: SimpleNamespace
) -> AmortizedMultipleShootingELBO:
prior_param_dict = nn.ParameterDict({
"mu0": Parameter(0.0 * torch.ones([param.K]), False),
"sig0": Parameter(1.0 * torch.ones([param.K]), False),
"sigXi": Parameter(param.Xi / param.K**0.5 * torch.ones([1]), False),
"mu_theta": Parameter(0.0 * torch.ones([1]), False),
"sig_theta": Parameter(1.0 * torch.ones([1]), False),
})
posterior_param_dict = nn.ParameterDict({
"mu_theta_g": Parameter(torch.cat([par.detach().reshape(-1) for par in g.parameters()])),
"log_sig_theta_g": Parameter(-7.0 * torch.ones(g.param_count())),
"mu_theta_F": Parameter(torch.cat([par.detach().reshape(-1) for par in F.parameters()])),
"log_sig_theta_F": Parameter(-7.0 * torch.ones(F.param_count())),
})
if param.dyn_order == 1:
p = ModelNormal(prior_param_dict, g, F)
elif param.dyn_order == 2:
p = ModelNormalSecondOrder(prior_param_dict, g, F)
else:
raise RuntimeError("Wrong dynamics order. Must be 1 or 2.")
q = AmortizedMultipleShootingPosterior(posterior_param_dict, F, h)
elbo = AmortizedMultipleShootingELBO(p, q)
elbo.p.set_theta(elbo.q.sample_theta())
return elbo
def visualize_trajectories(
traj: list[ndarray],
vis_inds: list[int],
title: str,
path: str,
img_name: str,
) -> None:
if not os.path.isdir(path):
os.makedirs(path)
img_size = 32
panel_size = 5
n_row = len(traj)
n_col = len(vis_inds)
fig, ax = plt.subplots(n_row, n_col, figsize=(panel_size*n_col, panel_size*n_row), squeeze=False)
for i in range(n_row):
for j in range(n_col):
ax[i, j].imshow(traj[i][0, vis_inds[j], :, 0].reshape(img_size, img_size)) # type: ignore
ax[i, j].grid(False) # type: ignore
# fig.colorbar(im, ax=ax[i, j], orientation='vertical') # type: ignore
fig.suptitle(title, fontsize=45)
fig.tight_layout()
plt.savefig(path+img_name)
plt.close()
class ToNormalParameters(Module):
"""Converts output of CNNDecoder to parameters of p(y|x)."""
def __init__(self, sigY) -> None:
super().__init__()
self.sigY = sigY
def forward(self, x):
x[..., 0] = torch.sigmoid(x[..., 0]) # to keep mean \in (0, 1)
x[..., 1] = self.sigY # fix standard deviation
return x
def get_data_transform():
transform = torchvision.transforms.Compose(
[
torchvision.transforms.RandomVerticalFlip(p=0.5),
torchvision.transforms.RandomHorizontalFlip(p=0.5),
]
)
def apply_transform(y: Tensor) -> Tensor:
_, m, n, d = y.shape
h, w = int(n**0.5), int(n**0.5)
y = rearrange(y, "s m (h w) d -> s (m d) h w", h=h, w=w)
y = transform(y)
y = rearrange(y, "s (m d) h w -> s m (h w) d", m=m, d=d)
return y
return apply_transform
def get_scheduler(optimizer, n_iters, lr):
sched = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=(1e-5/lr)**(1.0/n_iters))
return sched
| 16,068 | 39.992347 | 176 | py |
msvi | msvi-main/msvi/utils/rmnist.py | import os
import pickle
import argparse
from typing import Union
from types import SimpleNamespace
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
from torch.utils.data import DataLoader
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from msvi.model import ModelNormal, ModelNormalSecondOrder
from msvi.posterior import AmortizedMultipleShootingPosterior
from msvi.elbo import AmortizedMultipleShootingELBO
from msvi.decoder import NeuralDecoder
from msvi.trans_func import ODETransitionFunction, ODETransitionFunctionSecondOrder
from msvi.rec_net import RecognitionNet, RecognitionNetSecondOrder
from msvi.dataset import TrajectoryDataset
from msvi.utils.utils import create_agg_net, Sine, CNNEncoder, CNNDecoder
plt.style.use("seaborn") # type: ignore
sns.set_style("whitegrid")
ndarray = np.ndarray
Tensor = torch.Tensor
Sequential = nn.Sequential
DataDict = dict[str, dict[str, list]]
TensorDataDict = dict[str, dict[str, list[Tensor]]]
Module = nn.Module
DATASET_NAME = "RMNIST"
def create_argparser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser()
# Data.
parser.add_argument("--data_folder", type=str, default="./experiments/data/datasets/rmnist/", help="Path to the dataset.")
parser.add_argument("--N", type=int, default=1024, help="Number of observation points.")
parser.add_argument("--D", type=int, default=1, help="Dimensionality of observation.")
parser.add_argument("--max_len", type=int, default=None, help="Truncation length for trajectories.")
parser.add_argument("--sigY", type=float, default=1e-3, help="Observation noise.")
# Model (common).
parser.add_argument("--K", type=int, default=32, help="Latent space dimension.")
parser.add_argument("--Xi", type=float, default=1e-4, help="Used to set variance for the continuity prior.")
parser.add_argument("--block_size", type=int, default=1, help="Number of time points in each block.")
# Model (g).
parser.add_argument("--g_cnn_channels", type=int, default=16, help="Channels in CNNDecoder.")
# Model (F).
parser.add_argument("--m_F", type=int, default=16, help="Dimensionality scaler for F.")
parser.add_argument("--F_nonlin", type=str, default="relu", help="Nonlinearity for F.")
parser.add_argument("--dyn_order", type=int, default=2, help="Order of the dynamcis function, must be 1 or 2.")
# Model (h).
parser.add_argument("--m_h", type=int, default=4, help="Dimensionality scaler for h.")
parser.add_argument("--h_enc_cnn_channels", type=int, default=16, help="Channels in CNNEncoder.")
parser.add_argument("--h_agg_attn", type=str, default="tdp", help="Attention type (dp, t, tdp, tdp_b).")
parser.add_argument("--h_agg_pos_enc", type=str, default="rpeNN", help="Position encoding type (csc, rpeNN, rpeInterp).")
parser.add_argument("--h_agg_stat_layers", type=int, default=4, help="Number of TFEncoder layers in static aggregation net.")
parser.add_argument("--h_agg_dyn_layers", type=int, default=8, help="Number of TFEncoder layers in dynamic aggregation net.")
parser.add_argument("--h_agg_max_tokens", type=int, default=51, help="Maximum expected number of tokens.")
parser.add_argument("--h_agg_max_time", type=float, default=2.0, help="Maximum expected observation time.")
parser.add_argument("--h_agg_delta_r", type=float, default=0.3, help="Attention time span at training time.")
parser.add_argument("--h_agg_p", type=float, default=-1, help="Exponent for temporal attention (use -1 for p=inf).")
parser.add_argument("--n", type=int, default=1, help="Number of nearest neighbors used for baseline aggregation net.")
parser.add_argument("--drop_prob", type=float, default=0.1, help="Attention dropout probability.") # 0.1
parser.add_argument("--tau_min", type=float, default=2e-2, help="Lower bound on the variance of q(s_i).") # 2e-2
parser.add_argument("--sigT", type=float, default=0.0, help="Scale of the noise added to the time grids for temporal neighborhood adjustment.") # 0.0004
# Training/validation/testing.
parser.add_argument("--scaler", type=float, default=1, help="Scaler for ELBO L2 term.")
parser.add_argument("--n_iters", type=int, default=300000, help="Number of training iterations.")
parser.add_argument("--lr", type=float, default=3e-4, help="Learning rate.")
parser.add_argument("--batch_size", type=int, default=16, help="Batch size.")
parser.add_argument("--solver", type=str, default="dopri5", help="Name of the ODE solver (see torchdiffeq).")
parser.add_argument("--rtol", type=float, default=1e-5, help="Relative tolerance for ODE solver.")
parser.add_argument("--atol", type=float, default=1e-5, help="Absolute tolerance for ODE solver.")
parser.add_argument("--adjoint", type=int, default=0, help="Use adjoint to evaluate gradient flag (0 - no, 1 - yes).")
parser.add_argument("--device", type=str, default="cuda", help="Device (cpu or cuda)")
parser.add_argument("--seed", type=int, default=13, help="Random seed.")
parser.add_argument("--group", default="None", help="Group for wandb.")
parser.add_argument("--tags", default=["no_tag"], nargs="+", help="Tags for wandb.")
parser.add_argument("--name", type=str, default="tmp", help="Name of the run.")
parser.add_argument("--visualize", type=int, default=1, help="Visualize predictions on validation set flag (0 - no, 1 - yes).")
parser.add_argument("--n_mc_samples", type=int, default=10, help="Number of samples for Monte Carlo integration.")
parser.add_argument("--delta_inf", type=float, default=0.3, help="Attention time span at test time.")
parser.add_argument("--model_folder", type=str, default="./models/rmnist/", help="Folder for saving/loading models.")
return parser
def create_datasets(param: SimpleNamespace, device=None) -> tuple[TrajectoryDataset, ...]:
data = read_data(param.data_folder)
data = preprocess_data(data)
data = to_tensors(data, device)
train_dataset = TrajectoryDataset(data["train"]["t"], data["train"]["y"], param.max_len)
val_dataset = TrajectoryDataset(data["val"]["t"], data["val"]["y"], param.max_len)
test_dataset = TrajectoryDataset(data["test"]["t"], data["test"]["y"], param.max_len)
return train_dataset, val_dataset, test_dataset
def read_data(path: str) -> DataDict:
"""Reads data from folder `path` which contains subfolders train, val and test.
Each subfolder contains ndarrays with time grids and trajectories stored as
t.pkl and y.pkl files."""
data = {}
data["train"] = read_pickle(["t", "y"], path+"train/")
data["val"] = read_pickle(["t", "y"], path+"val/")
data["test"] = read_pickle(["t", "y"], path+"test/")
return data
def preprocess_data(data: DataDict) -> DataDict:
data["train"], train_stats = _preprocess_data(data["train"])
data["val"], _ = _preprocess_data(data["val"], train_stats)
data["test"], _ = _preprocess_data(data["test"], train_stats)
return data
def add_noise(data: DataDict, sig: float, seed: int) -> DataDict:
np.random.seed(seed)
for i in range(len(data["train"]["y"])):
data["train"]["y"][i] += np.random.randn(*data["train"]["y"][i].shape) * sig
for i in range(len(data["val"]["y"])):
data["val"]["y"][i] += np.random.randn(*data["val"]["y"][i].shape) * sig
for i in range(len(data["test"]["y"])):
data["test"]["y"][i] += np.random.randn(*data["test"]["y"][i].shape) * sig
return data
def to_tensors(data: DataDict, device=None) -> TensorDataDict:
tensor_data = {}
tensor_data["train"] = {
"t": [torch.tensor(ti, dtype=torch.float64).to(device) for ti in data["train"]["t"]],
"y": [torch.tensor(yi, dtype=torch.float32).to(device) for yi in data["train"]["y"]],
}
tensor_data["val"] = {
"t": [torch.tensor(ti, dtype=torch.float64).to(device) for ti in data["val"]["t"]],
"y": [torch.tensor(yi, dtype=torch.float32).to(device) for yi in data["val"]["y"]],
}
tensor_data["test"] = {
"t": [torch.tensor(ti, dtype=torch.float64).to(device) for ti in data["test"]["t"]],
"y": [torch.tensor(yi, dtype=torch.float32).to(device) for yi in data["test"]["y"]],
}
return tensor_data
def create_dataloaders(
param: SimpleNamespace,
train_dataset: TrajectoryDataset,
val_dataset: TrajectoryDataset,
test_dataset: TrajectoryDataset
) -> tuple[DataLoader, ...]:
train_loader = DataLoader(
train_dataset,
batch_size=param.batch_size,
shuffle=True,
pin_memory=False,
)
val_loader = DataLoader(val_dataset, batch_size=param.batch_size, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=param.batch_size, shuffle=False)
return train_loader, val_loader, test_loader
def _preprocess_data(
data: dict[str, list],
stats: Union[None, dict] = None
) -> tuple[dict[str, list], Union[None, dict]]:
is_train = stats is None
if is_train:
stats = {
"T_max": np.max([np.max(ti) for ti in data["t"]]),
"y_max": np.max([np.max(yi) for yi in data["y"]]),
}
for i in range(len(data["t"])):
# Normalize time grid.
# data["t"][i] = data["t"][i].astype(np.float64) / stats["T_max"]
# Normalize images.
data["y"][i] = data["y"][i].astype(np.float32) / stats["y_max"]
# Swap last two dimensions for compatibility with (S, M, N, D) dimensions.
data["y"][i] = np.transpose(data["y"][i], (0, 2, 1))
if is_train:
return data, stats
else:
return data, None
def read_pickle(keys: list[str], path: str = "./") -> dict[str, ndarray]:
data_dict = {}
for key in keys:
with open(path+key+".pkl", "rb") as f:
data_dict[key] = pickle.load(f)
return data_dict
def get_model_components(
param: SimpleNamespace,
) -> tuple[NeuralDecoder, ODETransitionFunction, RecognitionNet]:
nonlins = {
"relu": nn.ReLU,
"tanh": nn.Tanh,
"gelu": nn.GELU,
"mish": nn.Mish,
"sine": Sine,
}
# Decoder.
g = NeuralDecoder(
decoder=nn.Sequential(
CNNDecoder(param.K, param.N, param.D, 2, param.g_cnn_channels),
ToNormalParameters(param.sigY),
),
)
# Transition function and recognition network.
solver_kwargs = {
"method": param.solver,
"rtol": param.rtol,
"atol": param.atol,
"adjoint": param.adjoint,
"options": {"step_size": 0.2},
}
if param.dyn_order == 1:
F = ODETransitionFunction(
f=nn.Sequential(
nn.Linear(param.K, param.m_F*param.K), nonlins[param.F_nonlin](),
nn.Linear(param.m_F*param.K, param.m_F*param.K), nonlins[param.F_nonlin](),
nn.Linear(param.m_F*param.K, param.K)
),
layers_to_count=[],
solver_kwargs=solver_kwargs
)
h = RecognitionNet(
phi_enc=CNNEncoder(param.m_h*param.K, param.N, param.D, param.h_enc_cnn_channels),
phi_agg=create_agg_net(param, "static"),
phi_gamma=nn.Linear(param.m_h*param.K, param.K),
phi_tau=nn.Linear(param.m_h*param.K, param.K),
tau_min=param.tau_min,
)
elif param.dyn_order == 2:
assert param.K % 2 == 0, "Latent dimension `K` must be divisible by 2."
F = ODETransitionFunctionSecondOrder(
f=nn.Sequential(
nn.Linear(param.K, param.m_F*param.K), nonlins[param.F_nonlin](),
nn.Linear(param.m_F*param.K, param.m_F*param.K), nonlins[param.F_nonlin](),
nn.Linear(param.m_F*param.K, param.K//2)
),
layers_to_count=[],
solver_kwargs=solver_kwargs
)
h = RecognitionNetSecondOrder(
phi_enc=CNNEncoder(param.m_h*param.K, param.N, param.D, param.h_enc_cnn_channels),
phi_agg=create_agg_net(param, "static"),
phi_agg_dyn=create_agg_net(param, "dynamic"),
phi_gamma=nn.Linear(param.m_h*param.K, param.K//2),
phi_gamma_dyn=nn.Linear(param.m_h*param.K, param.K//2),
phi_tau=nn.Linear(param.m_h*param.K, param.K//2),
phi_tau_dyn=nn.Linear(param.m_h*param.K, param.K//2),
tau_min=param.tau_min,
)
else:
raise RuntimeError("Wrong dynamics order. Must be 1 or 2.")
return g, F, h
def create_elbo(
g: NeuralDecoder,
F: ODETransitionFunction,
h: RecognitionNet,
param: SimpleNamespace
) -> AmortizedMultipleShootingELBO:
prior_param_dict = nn.ParameterDict({
"mu0": Parameter(0.0 * torch.ones([param.K]), False),
"sig0": Parameter(1.0 * torch.ones([param.K]), False),
"sigXi": Parameter(param.Xi / param.K**0.5 * torch.ones([1]), False),
"mu_theta": Parameter(0.0 * torch.ones([1]), False),
"sig_theta": Parameter(1.0 * torch.ones([1]), False),
})
posterior_param_dict = nn.ParameterDict({
"mu_theta_g": Parameter(torch.cat([par.detach().reshape(-1) for par in g.parameters()])),
"log_sig_theta_g": Parameter(-7.0 * torch.ones(g.param_count())),
"mu_theta_F": Parameter(torch.cat([par.detach().reshape(-1) for par in F.parameters()])),
"log_sig_theta_F": Parameter(-7.0 * torch.ones(F.param_count())),
})
if param.dyn_order == 1:
p = ModelNormal(prior_param_dict, g, F)
elif param.dyn_order == 2:
p = ModelNormalSecondOrder(prior_param_dict, g, F)
else:
raise RuntimeError("Wrong dynamics order. Must be 1 or 2.")
q = AmortizedMultipleShootingPosterior(posterior_param_dict, F, h)
elbo = AmortizedMultipleShootingELBO(p, q)
elbo.p.set_theta(elbo.q.sample_theta())
return elbo
def visualize_trajectories(
traj: list[ndarray],
vis_inds: list[int],
title: str,
path: str,
img_name: str,
) -> None:
if not os.path.isdir(path):
os.makedirs(path)
img_size = 32
panel_size = 5
n_row = len(traj)
n_col = len(vis_inds)
fig, ax = plt.subplots(n_row, n_col, figsize=(panel_size*n_col, panel_size*n_row), squeeze=False)
for i in range(n_row):
for j in range(n_col):
ax[i, j].imshow(traj[i][0, vis_inds[j], :, 0].reshape(img_size, img_size)) # type: ignore
ax[i, j].grid(False) # type: ignore
# fig.colorbar(im, ax=ax[i, j], orientation='vertical') # type: ignore
fig.suptitle(title, fontsize=45)
fig.tight_layout()
plt.savefig(path+img_name)
plt.close()
class ToNormalParameters(Module):
"""Converts output of CNNDecoder to parameters of p(y|x)."""
def __init__(self, sigY) -> None:
super().__init__()
self.sigY = sigY
def forward(self, x):
x[..., 0] = torch.sigmoid(x[..., 0]) # to keep mean \in (0, 1)
x[..., 1] = self.sigY # fix standard deviation
return x
def get_data_transform():
def apply_transform(y: Tensor) -> Tensor:
return y
return apply_transform
def get_scheduler(optimizer, n_iters, lr):
sched = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=(1e-5/lr)**(1.0/n_iters))
return sched
| 15,338 | 40.013369 | 157 | py |
msvi | msvi-main/msvi/utils/__init__.py | from msvi.utils import utils # noqa
from msvi.utils import pendulum # noqa
from msvi.utils import rmnist # noqa
from msvi.utils import bballs # noqa | 152 | 37.25 | 39 | py |
Disease-Detection-and-Diagnostic-Image-Feature | Disease-Detection-and-Diagnostic-Image-Feature-main/util/setup.py | from setuptools import setup
setup(name='util',
version='0.1',
description='Common functions shared by all projects',
url='#',
author='LishinC',
author_email='NA',
license='NA',
packages=['util'],
zip_safe=False) | 212 | 20.3 | 54 | py |
Disease-Detection-and-Diagnostic-Image-Feature | Disease-Detection-and-Diagnostic-Image-Feature-main/util/__init__.py | 0 | 0 | 0 | py |
|
Disease-Detection-and-Diagnostic-Image-Feature | Disease-Detection-and-Diagnostic-Image-Feature-main/util/util/__init__.py | 0 | 0 | 0 | py |
|
Disease-Detection-and-Diagnostic-Image-Feature | Disease-Detection-and-Diagnostic-Image-Feature-main/util/util/backbone/Backbone.py | import os
import torch
import torch.nn as nn
from util.model.initialize_load_r21d import initialize_load_model
from util.loader.LUMC_A4C.loader_vid import create_dataloader
from util.checkpoint.checkpoint_train import checkpoint_train
from util.checkpoint.checkpoint_test import checkpoint_test
from torch.utils.tensorboard import SummaryWriter
from datetime import datetime
from tqdm import tqdm
import sys, traceback
from shutil import copyfile
class Backbone():
def __init__(self, save_folder, epo_iter, forward, task='clas',
create_dataloader=create_dataloader, initialize_load_model=initialize_load_model,
checkpoint_train=checkpoint_train, checkpoint_test = checkpoint_test,
optimizer=torch.optim.Adam, lr=1e-4, wd=1e-8, loss_accu_period=1,
log_val_only=True, eval_per_iter=False):
assert task in ['clas', 'seg', 'regres']
# Variables
self.save_folder = save_folder
self.epo_iter = epo_iter
self.task = task
self.optimizer = optimizer
self.lr = lr
self.wd = wd
self.device = "cuda" if torch.cuda.is_available() else "cpu"
self.loss_accu_period = loss_accu_period
self.log_val_only = log_val_only
self.eval_per_iter = eval_per_iter
# Functions
self.forward = forward
self.create_dataloader = create_dataloader
self.initialize_load_model = initialize_load_model
self.checkpoint_train = checkpoint_train
self.checkpoint_test = checkpoint_test
def batch_train(self, model, batch, i, opt, loss_running_accu, loss_accu_period, **kwargs):
loss, _ = self.forward(batch, model, self.device, return_one_batch=False, **kwargs)
loss.backward()
loss_running_accu += loss.item()
one_batch = 0
if (i + 1) % loss_accu_period == 0:
loss_running_accu = loss_running_accu / loss_accu_period
opt.step()
opt.zero_grad()
one_batch = loss_running_accu
loss_running_accu = 0
return model, one_batch, loss_running_accu, opt
def whole_eval(self, model, dataloader, **kwargs):
model.eval()
one_epoch = []
for i, batch in enumerate(dataloader):
loss, one_batch = self.forward(batch, model, self.device, return_one_batch=True, **kwargs)
one_epoch.append(one_batch)
return one_epoch
def run_val(self, i, itr, epo, num_batch, num_epo, subfolder, one_epoch_train,
model, dataloader_val, dataloader_test, **kwargs):
one_epoch_val = self.whole_eval(model, dataloader_val, **kwargs)
one_epoch_test = [] if self.log_val_only else self.whole_eval(model, dataloader_test, **kwargs)
is_first_update = (self.eval_per_iter & (itr == 0)) | ((not self.eval_per_iter) & (epo == 0))
is_last_update = itr == ((num_batch // self.loss_accu_period) * num_epo) - 1
self.checkpoint_train(itr, one_epoch_train, one_epoch_val, one_epoch_test, model,
self.save_folder, subfolder, epo, is_first_update, is_last_update,
self.writer, self.log_val_only, self.task, **kwargs)
def run_test(self, dataloader_test, subfolder, **kwargs):
model, _ = self.initialize_load_model(mode='test', model_path=self.save_folder+'train/model_val_min.pth', device=self.device, **kwargs)
one_epoch = self.whole_eval(model, dataloader_test, **kwargs)
self.checkpoint_test(one_epoch, model, self.save_folder, subfolder, self.task, **kwargs)
def run(self, workflow='complete', subfolder='default', verbose=False, **kwargs):
try:
n = datetime.now()
assert workflow in ['complete', 'train', 'test']
dataloader_train = self.create_dataloader(mode='train', **kwargs) #Have to be initialized here since kwargs are needed
dataloader_val = self.create_dataloader(mode='val', **kwargs)
dataloader_test = self.create_dataloader(mode='test', **kwargs)
## [Training]
if (workflow == 'complete') | (workflow == 'train'):
num_batch, num_epo = len(dataloader_train), self.epo_iter.stop
assert num_batch % self.loss_accu_period == 0
model, param = self.initialize_load_model(mode='train', device=self.device, **kwargs)
opt = self.optimizer(param, lr=self.lr, weight_decay=self.wd)
opt.zero_grad() # Do zero_grad() here because of the gradient accumulation feature
self.writer = SummaryWriter(self.save_folder)
if verbose: print('Training initialization time: ', datetime.now() - n,'='*100)
n = datetime.now()
for epo in tqdm(self.epo_iter, ncols=0):
one_epoch_train, loss_running_accu = [], 0
for i, batch in enumerate(dataloader_train):
wt = (datetime.now() - n).total_seconds()
if verbose&(wt>2): print('\n Batch loading waiting time ', wt)
# itr counts the number of updates. When loss accumulation is used, itr would be different to i.
itr = i//self.loss_accu_period + epo * (num_batch//self.loss_accu_period)
model, one_batch_train, loss_running_accu, opt = self.batch_train(model, batch, i, opt,
loss_running_accu, self.loss_accu_period, **kwargs)
# Log training loss of one (full) batch for calculation of averaged training loss later on.
if (i+1)%self.loss_accu_period == 0:
one_epoch_train.append(one_batch_train)
## [Validation]:
# Run validation if eval_per_iter & end of a batch; Or NOT eval_per_iter & end of a epoch
if (
(self.eval_per_iter & ((i+1)%self.loss_accu_period == 0)) or
((not self.eval_per_iter) & ((i+1)/self.loss_accu_period == num_batch//self.loss_accu_period))
):
self.run_val(i, itr, epo, num_batch, num_epo, subfolder, one_epoch_train,
model, dataloader_val, dataloader_test, **kwargs)
one_epoch_train = []
n = datetime.now()
self.writer.flush()
self.writer.close()
## [Testing]
if (workflow == 'complete') | (workflow == 'test'):
self.run_test(dataloader_test, subfolder, **kwargs)
except KeyboardInterrupt:
## [Test on current best if interrupted]
print('Interrupted at epo ', epo, )
# copyfile(self.save_folder + 'train/log_tmp.csv', self.save_folder + 'train/log_' + str(epo) + '.csv')
# epo_iter = range(epo+1)
self.run_test(dataloader_test, subfolder, **kwargs)
sys.exit(0) | 7,196 | 48.979167 | 143 | py |
Disease-Detection-and-Diagnostic-Image-Feature | Disease-Detection-and-Diagnostic-Image-Feature-main/util/util/backbone/__init__.py | 0 | 0 | 0 | py |
|
Disease-Detection-and-Diagnostic-Image-Feature | Disease-Detection-and-Diagnostic-Image-Feature-main/util/util/checkpoint/checkpoint_test.py | import os
import numpy as np
import torch
from util.checkpoint.create_header import create_header_clas, create_header_seg, create_header_regres
from util.eval.eval import one_epoch_avg_clas, one_epoch_avg_seg, one_epoch_avg_regres
def checkpoint_test(one_epoch, model, save_folder, subfolder, task,
header_train=None, header_eval=None, one_epoch_avg=None, **kwargs):
mode = 'test'
create_header = globals()['create_header_'+task]
if one_epoch_avg is None:
one_epoch_avg = globals()['one_epoch_avg_'+task]
if subfolder == 'default': subfolder = mode
save_subfolder = save_folder + subfolder
os.makedirs(save_subfolder, exist_ok=True)
epo = find_epo_test(save_folder, subfolder, **kwargs) # Here epo might actually be itr since the log might be per every update
one_epoch_avg = one_epoch_avg(one_epoch)
multi_epo = create_header(mode, None, header_train, header_eval)
multi_epo = np.concatenate([multi_epo, one_epoch_avg], axis=0)
np.savetxt(save_subfolder + '/prediction_' + str(epo) + '.csv', np.asarray(one_epoch), fmt='%s', delimiter=',')
np.savetxt(save_subfolder + '/performance_' + str(epo) + '.csv', np.asarray(multi_epo), fmt='%s', delimiter=',')
print('Epoch: ', epo, '| ', mode, ' | performance: ', one_epoch_avg, '\n')
def find_epo_test(save_folder, subfolder, **kwargs):
# The columns of multi_epo are [itr, train_loss, val_loss_or_early_stop_metric, other_val_metrics_if_any]
multi_epo = np.genfromtxt(save_folder + '/train/log.csv', dtype='str', delimiter=',')
multi_epo = multi_epo[1:,2].astype('float')
epo_test = np.argmin(multi_epo)
min_loss = multi_epo[epo_test]
os.makedirs(save_folder + '/' + subfolder, exist_ok=True)
np.savetxt(save_folder + '/' + subfolder + '/minLoss_'+str(min_loss)+'.txt',[]) # Just to indicate val-loss. Empty file
return epo_test | 1,894 | 50.216216 | 131 | py |
Disease-Detection-and-Diagnostic-Image-Feature | Disease-Detection-and-Diagnostic-Image-Feature-main/util/util/checkpoint/minLoss.py | import os
import numpy as np
def find_epo_test(save_folder, epo_iter, **kwargs):
csv_name = save_folder + '/train/log_'+str(epo_iter.stop-1)+'.csv'
multi_epo = np.genfromtxt(csv_name, dtype='str', delimiter=',')
multi_epo = multi_epo[1:,2].astype('float')
epo_test = np.argmin(multi_epo)
min_loss = multi_epo[epo_test]
os.makedirs(save_folder + '/test', exist_ok=True)#TODO this is to be modified to accomodate custom train/test folder
np.savetxt(save_folder + '/test/minLoss_'+str(min_loss)+'.txt',[])
return epo_test
def find_epo_test_from_val_log(save_folder, epo_iter, val_folder='val', test_folder='test', del_after_val=True, **kwargs):
# Validation log must start with loss
# Allows assigning special validation folder with kwargs
csv_name = save_folder + val_folder + '/log_'+str(epo_iter.stop-1)+'.csv'
multi_epo = np.genfromtxt(csv_name, dtype='str', delimiter=',')
multi_epo = multi_epo[1:,0].astype('float')
epo_test = np.argmin(multi_epo)
min_loss = multi_epo[epo_test]
np.savetxt(save_folder + test_folder +'/minLoss_'+str(min_loss)+'.txt',[])
if del_after_val:
for epo in epo_iter:
if epo != epo_test:
os.remove(save_folder + 'train/model_' + str(epo) + '.pth')
return epo_test | 1,303 | 43.965517 | 122 | py |
Disease-Detection-and-Diagnostic-Image-Feature | Disease-Detection-and-Diagnostic-Image-Feature-main/util/util/checkpoint/create_header.py | import numpy as np
def create_header_clas(mode, log_val_only, header_train=None, header_eval=None):
if mode == 'train':
if header_train is None:
if log_val_only:
header_train = ['itr', 'Loss/train', 'Loss/val', 'Accuracy/val']
else:
header_train = ['itr', 'Loss/train', 'Loss/val', 'Accuracy/val', 'Loss/test', 'Accuracy/test']
multi_epo = np.asarray(header_train)
else:
if header_eval is None:
header_eval = ['loss', 'accuracy', 'precision', 'recall', ' F1-score']
multi_epo = np.asarray(header_eval)
return multi_epo.reshape(1,-1)
def create_header_seg(mode, log_val_only, header_train=None, header_eval=None):
if mode == 'train':
if header_train is None:
if log_val_only:
header_train = ['itr', 'Loss/train', 'Loss/val', 'Dice/val']
else:
header_train = ['itr', 'Loss/train', 'Loss/val', 'Dice/val', 'Loss/test', 'Dice/test']
multi_epo = np.asarray(header_train)
else:
if header_eval is None:
header_eval = ['loss', 'dice', 'iou', 'precision', 'recall']
multi_epo = np.asarray(header_eval)
return multi_epo.reshape(1,-1)
def create_header_regres(mode, log_val_only, header_train=None, header_eval=None):
if mode == 'train':
if header_train is None:
if log_val_only:
header_train = ['itr', 'Loss/train', 'Loss_MSE/val', 'l1/val']
else:
header_train = ['itr', 'Loss/train', 'Loss_MSE/val', 'l1/val', 'Loss_MSE/test', 'l1/test']
multi_epo = np.asarray(header_train)
else:
if header_eval is None:
header_eval = ['Loss_MSE', 'l1', 'rmse', 'r2']
multi_epo = np.asarray(header_eval)
return multi_epo.reshape(1,-1) | 1,854 | 36.857143 | 110 | py |
Disease-Detection-and-Diagnostic-Image-Feature | Disease-Detection-and-Diagnostic-Image-Feature-main/util/util/checkpoint/__init__.py | 0 | 0 | 0 | py |
|
Disease-Detection-and-Diagnostic-Image-Feature | Disease-Detection-and-Diagnostic-Image-Feature-main/util/util/checkpoint/checkpoint_train.py | import os
import numpy as np
import torch
from util.checkpoint.create_header import create_header_clas, create_header_seg, create_header_regres
from util.eval.eval import one_epoch_avg_clas, one_epoch_avg_seg, one_epoch_avg_regres
def checkpoint_train(itr, one_epoch_train, one_epoch_val, one_epoch_test, model, save_folder, subfolder, epo, is_first_update, is_last_update, writer, log_val_only,
task, header_train=None, header_eval=None, one_epoch_avg=None, **kwargs):
mode = 'train'
create_header = globals()['create_header_'+task]
if one_epoch_avg is None:
one_epoch_avg = globals()['one_epoch_avg_'+task]
save_subfolder = save_folder + mode # From now on the trainign log is always stored in the folder "train"
os.makedirs(save_subfolder, exist_ok=True)
train_avg = np.mean(one_epoch_train)
header = create_header(mode, log_val_only, header_train, header_eval)
NUM_METRICS_TO_LOG = len(header[0]) - 2
if log_val_only:
# For clas, one_epoch contains appended [ID[0], loss.item(), y_true, y_pred]
# one_epoch_avg returns numpy array of shape (1,-1) containing [loss, acc, prec, rec, f1]
# Sor seg, one_epoch contains appended [ID[0], loss, 'dice', 'iou', 'precision', 'recall']
# one_epoch_avg returns its average, with shape (1, -1)
val_avg = one_epoch_avg(one_epoch_val)
one_epoch_log = [itr, train_avg] + list(val_avg.reshape(-1,))[:NUM_METRICS_TO_LOG]
else:
val_avg = one_epoch_avg(one_epoch_val)
test_avg = one_epoch_avg(one_epoch_test)
one_epoch_log = [itr, train_avg] + list(val_avg.reshape(-1,))[:NUM_METRICS_TO_LOG/2] + list(test_avg.reshape(-1,))[:NUM_METRICS_TO_LOG/2]
logging(one_epoch_log, header, writer, save_subfolder, is_first_update, log_val_only, model, **kwargs)
# if is_last_update:
# os.rename(save_subfolder + '/log_tmp.csv', save_subfolder + '/log_' + str(epo) + '.csv')
# # os.rename(save_subfolder + '/individual_pred_tmp.csv', save_subfolder + '/individual_pred_' + str(epo) + '.csv')
print('Epoch: ', epo, '| training | performance: ', one_epoch_log, '\n')
def logging(one_epoch_log, header, writer, save_subfolder, is_first_update, log_val_only, model, **kwargs):
"""
1) Log performance to csv & tensorboard.
2) Determine if has validation loss minimum.
"""
def compare(one_epoch_log, multi_epo):
current = one_epoch_log[2]
history_min = min(multi_epo[1:,2].astype('float'))
if current < history_min:
has_min_val = True
else:
has_min_val = False
return has_min_val
# Write to tensorboard
itr = one_epoch_log[0]
assert len(header[0]) == len(one_epoch_log)
for i in range(1,len(header[0])):
writer.add_scalar(header[0,i], one_epoch_log[i], itr)
# Write to csv file & Save model if has val-loss minimum
csv_name = save_subfolder+'/log.csv'
if is_first_update:
multi_epo = header
has_min_val = True
else:
multi_epo = np.genfromtxt(csv_name, dtype='str', delimiter=',')
has_min_val = compare(one_epoch_log, multi_epo)
one_epoch_log = np.asarray(one_epoch_log).reshape(1, -1)
multi_epo = np.concatenate([multi_epo, one_epoch_log], axis=0)
np.savetxt(csv_name, np.asarray(multi_epo), fmt='%s', delimiter=',')
if has_min_val: torch.save(model.state_dict(), save_subfolder + '/model_val_min.pth')
| 3,481 | 43.641026 | 164 | py |
Disease-Detection-and-Diagnostic-Image-Feature | Disease-Detection-and-Diagnostic-Image-Feature-main/util/util/eval/eval.py | import numpy as np
from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error
from sklearn.metrics import accuracy_score, precision_recall_fscore_support
def performance_seg(pred, true):
overlap = pred & true # TP
union = pred | true # TP + FN + FP
misclassified = overlap != union # FN + FP
FP = (misclassified & pred).sum()
FN = (misclassified & true).sum()
TP = overlap.sum()
TN = (~pred & ~true).sum()
UN = union.sum()
if UN == 0:
dice = iou = precision = recall = accuracy = 1
elif TP == 0:
dice = iou = precision = recall = accuracy = 0
else:
dice = (TP*2) / (UN + TP)
iou = TP / UN
precision = TP / (TP + FP)
recall = TP / (TP + FN)
accuracy = (TP + TN) / (UN + TN)
return [dice, iou, precision, recall]
def one_epoch_avg_seg(one_epoch):
one_epoch = np.asarray(one_epoch)[:, 1:].astype(np.float)
avg = np.mean(one_epoch, axis=0, keepdims=True)
return avg
def performance_clas(Y_true, Y_pred):
acc = accuracy_score(Y_true, Y_pred)
prec, rec, f1, _ = precision_recall_fscore_support(Y_true, Y_pred, average='weighted')
return [acc, prec, rec, f1]
def one_epoch_avg_clas(one_epoch):
one_epoch = np.asarray(one_epoch)[:,:4]
Y_true = one_epoch[:, 2].astype(np.float)
Y_pred = one_epoch[:, 3].astype(np.float)
loss = one_epoch[:, 1].astype(np.float).mean()
acc, prec, rec, f1 = performance_clas(Y_true, Y_pred)
return np.asarray([loss, acc, prec, rec, f1]).reshape(1,-1)
def performance_regres(Y_true, Y_pred):
l1 = mean_absolute_error(Y_true, Y_pred)
rmse = np.sqrt(mean_squared_error(Y_true, Y_pred))
r2 = r2_score(Y_true, Y_pred)
return [l1, rmse, r2]
def one_epoch_avg_regres(one_epoch):
one_epoch = np.asarray(one_epoch)[:,:4]
Y_true = one_epoch[:, 2].astype(np.float)
Y_pred = one_epoch[:, 3].astype(np.float)
loss = one_epoch[:, 1].astype(np.float).mean()
l1, rmse, r2 = performance_regres(Y_true, Y_pred)
return np.asarray([loss, l1, rmse, r2]).reshape(1,-1)
| 2,135 | 30.411765 | 90 | py |
Disease-Detection-and-Diagnostic-Image-Feature | Disease-Detection-and-Diagnostic-Image-Feature-main/util/util/eval/__init__.py | 0 | 0 | 0 | py |
|
Disease-Detection-and-Diagnostic-Image-Feature | Disease-Detection-and-Diagnostic-Image-Feature-main/util/util/loader/__init__.py | 0 | 0 | 0 | py |
|
Disease-Detection-and-Diagnostic-Image-Feature | Disease-Detection-and-Diagnostic-Image-Feature-main/util/util/loader/LUMC_A4C/loader_vid.py | import numpy as np
import torch
from torch.utils.data.dataset import Dataset
import random
from skimage.transform import rotate
class loader(Dataset):
def __init__(self, X_list, aug=False, rgb_channel=3, **kwargs):
self.X_list = X_list
self.aug = aug
self.rgb_channel = rgb_channel
def __getitem__(self, index):
filepath = self.X_list[index]
X = np.load(filepath)
# Replace with own loader. Output X should have size [channel=3, num_frame=30, x_dimension=112, y_dimension=112]
X = torch.from_numpy(X).float()
return X, Y, ID
def __len__(self):
return len(self.X_list)
def create_dataloader(mode, batch_size=16, num_workers=[4, 4], data_folder='../data/LUMC_A4C/ver3/',
split_folder='split_000all_400_401/', **kwargs):
X_list = np.load(data_folder + split_folder + '/' + mode + '_list_RGB.npy').tolist()
if mode == 'train':
data = loader(X_list, aug=True, **kwargs)
dataloader = torch.utils.data.DataLoader(dataset=data, batch_size=batch_size, shuffle=True, drop_last=True, num_workers=num_workers[0], pin_memory=True)
elif (mode == 'val') | (mode == 'test'):
data = loader(X_list, aug=False, **kwargs)
dataloader = torch.utils.data.DataLoader(dataset=data, batch_size=1, shuffle=False, drop_last=False, num_workers=num_workers[1], pin_memory=True)
return dataloader
# if __name__ == '__main__':
# dataloader = create_dataloader('train')
# print(len(dataloader))
# for i, batch in enumerate(dataloader):
# print(batch[0].shape, batch[1].shape, batch[2][0]) | 1,640 | 36.295455 | 160 | py |
Disease-Detection-and-Diagnostic-Image-Feature | Disease-Detection-and-Diagnostic-Image-Feature-main/util/util/model/initialize_load_r21d.py | import torch
import torch.nn as nn
import torchvision
def initialize_load_model(mode, model_path='scratch', in_channel=3, out_channel=3, device="cuda", **kwargs):
def r21d(in_channel, out_channel, pretrain=False, echo_pretrain=False):
model = torchvision.models.video.__dict__["r2plus1d_18"](pretrained=pretrain)
if in_channel == 1: model.stem[0] = nn.Conv3d(1, 45, kernel_size=(1, 7, 7), stride=(1, 2, 2), padding=(0, 3, 3), bias=False)
model.fc = nn.Linear(model.fc.in_features, out_channel)
return model
if model_path == 'pretrain':
model = r21d(in_channel, out_channel, pretrain=True)
elif model_path == 'scratch':
model = r21d(in_channel, out_channel)
else:
model = r21d(in_channel, out_channel)
model.load_state_dict(torch.load(model_path))
model.to(device)
param = model.parameters()
if mode == 'train':
model.train()
else:
model.eval()
return model, param | 984 | 34.178571 | 132 | py |
Disease-Detection-and-Diagnostic-Image-Feature | Disease-Detection-and-Diagnostic-Image-Feature-main/util/util/model/__init__.py | 0 | 0 | 0 | py |
|
Disease-Detection-and-Diagnostic-Image-Feature | Disease-Detection-and-Diagnostic-Image-Feature-main/projectDDDIF/main.py | import os
import torch
import torch.nn as nn
# import numpy as np
from util.backbone.Backbone import Backbone
from util.loader.LUMC_A4C.loader_vid import create_dataloader
from util.model.initialize_load_r21d import initialize_load_model
from analyze import analyze
def forward(batch, model, device, return_one_batch, criterion=nn.CrossEntropyLoss(), **kwargs):
X, Y, ID = batch
X = X.to(device)
Y = Y.to(device).view(-1).long()
out_logit = model(X)
loss = criterion(out_logit, Y)
if return_one_batch:
sf = nn.Softmax(1)
output = sf(out_logit)
y_pred = output.argmax(1).item()
y_true = Y.item()
one_batch = [ID[0], loss.item(), y_true, y_pred, output[:, 1].item()]
# analyze(X, [y_true], model, 1, 'model/DeepLIFT/', ID[0])
return loss, one_batch
else:
return loss, []
if __name__ == '__main__':
root = './model/regurg/'
kwargs = {'in_channel': 3, 'out_channel': 3}
b = Backbone(root, range(200), forward,
create_dataloader=create_dataloader, initialize_load_model=initialize_load_model)
b.run(**kwargs)
| 1,139 | 26.804878 | 98 | py |
Disease-Detection-and-Diagnostic-Image-Feature | Disease-Detection-and-Diagnostic-Image-Feature-main/projectDDDIF/analyze.py | import os
import torch
import torch.nn as nn
import torchvision
import matplotlib.pyplot as plt
from skimage.transform import resize
import numpy as np
import cv2
# import cmapy
from pydicom import dcmread
from pydicom.uid import ExplicitVRLittleEndian
from captum.attr import GradientShap, DeepLift, DeepLiftShap, IntegratedGradients, GuidedGradCam, NoiseTunnel, Saliency, GuidedBackprop
def to_0_255(x):
return (x-x.min())/(x.max()-x.min())*255
def write_dcm(raw, x, path):
# Requires x of shape (t,row,col,3)
x = to_0_255(x)
x = x.astype('uint8')
raw.NumberOfFrames = x.shape[0]
raw.Rows = x.shape[1]
raw.Columns = x.shape[2]
raw.PixelData = x.tobytes()
raw.save_as(path)
def show_save_mov(video, save_path, file_type='mp4', norm=False, boundary=None, gray2color=None, fps=5, show=False, insert_text=None):
if norm:
if boundary is not None:
video[video > boundary[0]] = boundary[0]
video[video < boundary[1]] = boundary[1]
video = ((video - np.min(video)) / (np.max(video) - np.min(video))) * 255
video = np.asarray(video, dtype='uint8')
frame_delay = int(1000 / fps)
if file_type == 'mp4':
fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
elif file_type == 'avi':
fourcc = cv2.VideoWriter_fourcc('X', 'V', 'I', 'D')
save_path = save_path + '.' + file_type
out = cv2.VideoWriter(save_path, fourcc, fps, (video.shape[2],video.shape[1]))
for frame in video:
if gray2color is not None:
frame = cv2.applyColorMap(frame, gray2color)
if insert_text is not None:
cv2.putText(frame, insert_text, (2, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (255, 255, 255), 1)
cv2.putText(frame, 'ILV'+' '*21+'AVR', (2, 18), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (255, 255, 255), 1)
out.write(frame)
if show:
cv2.imshow('frame', frame)
key = cv2.waitKey(frame_delay)
out.release()
cv2.destroyAllWindows()
def get_analyzer(methodID, model, if_smoothGrad):
assert methodID in range(5)
if methodID == 0:
analyzer = Saliency(model)
methodname = '_Saliency'
if methodID == 1:
analyzer = DeepLift(model)
methodname = '_DL'
if methodID == 2:
analyzer = DeepLiftShap(model)
methodname = '_DLshap'
if methodID == 3:
analyzer = GuidedBackprop(model)
methodname = '_GB'
if methodID == 4:
analyzer = GuidedGradCam(model, model.layer4)
methodname = '_GradCAM'
if if_smoothGrad:
analyzer = NoiseTunnel(analyzer)
methodname = methodname+'smo'
return analyzer, methodname
def run_analyze(analyzer, inputs, target):
return analyzer.attribute(inputs=inputs, target=target, baselines=inputs*0)
def post_process(attributions, threshold):
"""Post-process the generated attributions"""
assert threshold in ['abs', 'pos']
if threshold == 'abs':
attributions = abs(attributions)
elif threshold == 'pos':
attributions[attributions<0] = 0
attributions = attributions.cpu().detach().numpy()[0, 0, ...] # remove batch & channel dimension -> [t,x,y]
attributions = np.uint8(to_0_255(attributions))
attributions_color = []
for i, att in enumerate(attributions):
# att = cv2.applyColorMap(att, cv2.COLORMAP_JET) #After this step the shape changes from (112,112) to (112,112,3)
att = cv2.applyColorMap(att, cv2.COLORMAP_HOT)
attributions_color.append(att)
attributions_color = np.stack(attributions_color, axis=0)
assert attributions_color.shape == (30, 112, 112, 3)
return attributions_color
def analyze(X, target_classes, model, methodID, save_dir=None, file_name=None, tail='',
save_vid_type='mp4', save_att_vid=True, save_input_vid=False, save_render_vid=False, save_render_npy=False,
save_dcm=False, save_figs=False, threshold='pos', if_smoothGrad=False):
os.makedirs(save_dir, exist_ok=True)
# First, process and save the input X if needed
if save_input_vid | save_render_vid | save_render_npy | save_figs: # Then we would need X
Xrgb = to_0_255(X.cpu().detach().numpy()[0, 0, ...]) # (b,c,t,x,y) -> (t,x,y)
Xrgb = np.stack([Xrgb] * 3, axis=3)
if save_input_vid:
show_save_mov(video=Xrgb, save_path=save_dir + file_name, file_type=save_vid_type)
# Second, run analyze and save if needed
for c in target_classes:
classname = '_class'+str(c)
analyzer, methodname = get_analyzer(methodID, model, if_smoothGrad)
attributions = run_analyze(analyzer, X, c)
attributions_color = post_process(attributions, threshold)
if save_render_vid | save_render_npy | save_figs: # Then we would need "render"
render = attributions_color * 0.7 + Xrgb * 0.3
if save_att_vid:
show_save_mov(video=attributions_color, save_path=save_dir+file_name+tail+methodname+classname, file_type=save_vid_type)
if save_render_vid:
show_save_mov(nvideo=render, save_path=save_dir+file_name+tail+methodname+classname+'_overlay', file_type=save_vid_type)
if save_render_npy:
np.save(save_dir+file_name+tail+methodname+classname+'_overlay.npy', render)
if save_figs:
for i, (img, att, rnd) in enumerate(zip(Xrgb, attributions_color, render)):
cv2.imwrite(save_dir + file_name + '_' + str(i) + '.png', img)
cv2.imwrite(save_dir + file_name+tail+methodname+classname+'_heatmap_'+str(i)+'.png', att)
cv2.imwrite(save_dir + file_name+tail+methodname+classname+'_render_'+str(i)+'.png', rnd)
| 5,747 | 39.478873 | 135 | py |
MetaHIN | MetaHIN-master/code/main.py | # coding: utf-8
# author: lu yf
# create date: 2019-11-21 17:27
import gc
import glob
import random
import time
import numpy as np
import torch
from HeteML_new import HML
from DataHelper import DataHelper
from tqdm import tqdm
from Config import states
# random.seed(13)
np.random.seed(13)
torch.manual_seed(13)
def training(model, model_save=True, model_file=None, device='cpu'):
print('training model...')
if config['use_cuda']:
model.cuda()
model.train()
batch_size = config['batch_size']
num_epoch = config['num_epoch']
for _ in range(num_epoch): # 20
loss, mae, rmse = [], [], []
ndcg_at_5 = []
start = time.time()
random.shuffle(train_data)
num_batch = int(len(train_data) / batch_size) # ~80
supp_xs_s, supp_ys_s, supp_mps_s, query_xs_s, query_ys_s, query_mps_s = zip(*train_data) # supp_um_s:(list,list,...,2553)
for i in range(num_batch): # each batch contains some tasks (each task contains a support set and a query set)
support_xs = list(supp_xs_s[batch_size * i:batch_size * (i + 1)])
support_ys = list(supp_ys_s[batch_size * i:batch_size * (i + 1)])
support_mps = list(supp_mps_s[batch_size * i:batch_size * (i + 1)])
query_xs = list(query_xs_s[batch_size * i:batch_size * (i + 1)])
query_ys = list(query_ys_s[batch_size * i:batch_size * (i + 1)])
query_mps = list(query_mps_s[batch_size * i:batch_size * (i + 1)])
_loss, _mae, _rmse, _ndcg_5 = model.global_update(support_xs,support_ys,support_mps,
query_xs,query_ys,query_mps,device)
loss.append(_loss)
mae.append(_mae)
rmse.append(_rmse)
ndcg_at_5.append(_ndcg_5)
print('epoch: {}, loss: {:.6f}, cost time: {:.1f}s, mae: {:.5f}, rmse: {:.5f}, ndcg@5: {:.5f}'.
format(_, np.mean(loss), time.time() - start,
np.mean(mae), np.mean(rmse), np.mean(ndcg_at_5)))
if _ % 10 == 0 and _ != 0:
testing(model, device)
model.train()
if model_save:
print('saving model...')
torch.save(model.state_dict(), model_file)
def testing(model, device='cpu'):
# testing
print('evaluating model...')
if config['use_cuda']:
model.cuda()
model.eval()
for state in states:
if state == 'meta_training':
continue
print(state + '...')
evaluate(model, state, device)
def evaluate(model, state, device='cpu'):
test_data = data_helper.load_data(data_set=data_set, state=state,
load_from_file=True)
supp_xs_s, supp_ys_s, supp_mps_s, query_xs_s, query_ys_s, query_mps_s = zip(*test_data) # supp_um_s:(list,list,...,2553)
loss, mae, rmse = [], [], []
ndcg_at_5 = []
for i in range(len(test_data)): # each task
_mae, _rmse, _ndcg_5 = model.evaluation(supp_xs_s[i], supp_ys_s[i], supp_mps_s[i],
query_xs_s[i], query_ys_s[i], query_mps_s[i],device)
mae.append(_mae)
rmse.append(_rmse)
ndcg_at_5.append(_ndcg_5)
print('mae: {:.5f}, rmse: {:.5f}, ndcg@5: {:.5f}'.
format(np.mean(mae), np.mean(rmse),np.mean(ndcg_at_5)))
# print('fine tuning...')
# model.train()
# for i in range(len(test_data)):
# model.fine_tune(supp_xs_s[i], supp_ys_s[i], supp_mps_s[i])
# model.eval()
# for i in range(len(test_data)): # each task
# _mae, _rmse, _ndcg_5 = model.evaluation(supp_xs_s[i], supp_ys_s[i], supp_mps_s[i],
# query_xs_s[i], query_ys_s[i], query_mps_s[i],device)
# mae.append(_mae)
# rmse.append(_rmse)
# ndcg_at_5.append(_ndcg_5)
# print('mae: {:.5f}, rmse: {:.5f}, ndcg@5: {:.5f}'.
# format(np.mean(mae), np.mean(rmse), np.mean(ndcg_at_5)))
if __name__ == "__main__":
# data_set = 'dbook'
data_set = 'movielens'
# data_set = 'yelp'
input_dir = '../data/'
output_dir = '../data/'
res_dir = '../res/'+data_set
load_model = False
if data_set == 'movielens':
from Config import config_ml as config
elif data_set == 'yelp':
from Config import config_yelp as config
elif data_set == 'dbook':
from Config import config_db as config
cuda_or_cpu = torch.device("cuda" if config['use_cuda'] else "cpu")
print (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
print(config)
model_filename = "{}/hml.pkl".format(res_dir)
data_helper = DataHelper(input_dir, output_dir, config)
# training model.
model_name = 'mp_update'
# model_name = 'mp_MAML'
# model_name = 'mp_update_multi_MAML'
# model_name = 'mp_update_no_f'
# model_name = 'no_MAML'
# model_name = 'no_MAML_with_finetuning'
hml = HML(config, model_name)
print('--------------- {} ---------------'.format(model_name))
if not load_model:
# Load training dataset
print('loading train data...')
train_data = data_helper.load_data(data_set=data_set,state='meta_training',load_from_file=True)
# print('loading warm data...')
# warm_data = data_helper.load_data(data_set=data_set, state='warm_up',load_from_file=True)
training(hml, model_save=True, model_file=model_filename,device=cuda_or_cpu)
else:
trained_state_dict = torch.load(model_filename)
hml.load_state_dict(trained_state_dict)
# testing
testing(hml, device=cuda_or_cpu)
print('--------------- {} ---------------'.format(model_name))
| 5,711 | 35.615385 | 130 | py |
MetaHIN | MetaHIN-master/code/MetaLearner_new.py | # coding: utf-8
# author: lu yf
# create date: 2019-12-10 14:25
import torch
from torch.nn import functional as F
class MetaLearner(torch.nn.Module):
def __init__(self,config):
super(MetaLearner, self).__init__()
self.embedding_dim = config['embedding_dim']
self.fc1_in_dim = 32 + config['item_embedding_dim']
self.fc2_in_dim = config['first_fc_hidden_dim']
self.fc2_out_dim = config['second_fc_hidden_dim']
self.use_cuda = config['use_cuda']
self.config = config
# prediction parameters
self.vars = torch.nn.ParameterDict()
self.vars_bn = torch.nn.ParameterList()
w1 = torch.nn.Parameter(torch.ones([self.fc2_in_dim,self.fc1_in_dim])) # 64, 96
torch.nn.init.xavier_normal_(w1)
self.vars['ml_fc_w1'] = w1
self.vars['ml_fc_b1'] = torch.nn.Parameter(torch.zeros(self.fc2_in_dim))
w2 = torch.nn.Parameter(torch.ones([self.fc2_out_dim,self.fc2_in_dim]))
torch.nn.init.xavier_normal_(w2)
self.vars['ml_fc_w2'] = w2
self.vars['ml_fc_b2'] = torch.nn.Parameter(torch.zeros(self.fc2_in_dim))
w3 = torch.nn.Parameter(torch.ones([1, self.fc2_out_dim]))
torch.nn.init.xavier_normal_(w3)
self.vars['ml_fc_w3'] = w3
self.vars['ml_fc_b3'] = torch.nn.Parameter(torch.zeros(1))
def forward(self, user_emb, item_emb, user_neigh_emb, vars_dict=None):
"""
"""
if vars_dict is None:
vars_dict = self.vars
x_i = item_emb
x_u = user_neigh_emb # movielens: loss:12.14... up! ; dbook 20epoch: user_cold: mae 0.6051;
x = torch.cat((x_i, x_u), 1) # ?, item_emb_dim+user_emb_dim+user_emb_dim
x = F.relu(F.linear(x, vars_dict['ml_fc_w1'], vars_dict['ml_fc_b1']))
x = F.relu(F.linear(x, vars_dict['ml_fc_w2'], vars_dict['ml_fc_b2']))
x = F.linear(x, vars_dict['ml_fc_w3'], vars_dict['ml_fc_b3'])
return x.squeeze()
def zero_grad(self, vars_dict=None):
with torch.no_grad():
if vars_dict is None:
for p in self.vars.values():
if p.grad is not None:
p.grad.zero_()
else:
for p in vars_dict.values():
if p.grad is not None:
p.grad.zero_()
def update_parameters(self):
return self.vars
class MetapathLearner(torch.nn.Module):
def __init__(self,config):
super(MetapathLearner, self).__init__()
self.config = config
# meta-path parameters
self.vars = torch.nn.ParameterDict()
neigh_w = torch.nn.Parameter(torch.ones([32,config['item_embedding_dim']])) # dim=32, movielens 0.81006
torch.nn.init.xavier_normal_(neigh_w)
self.vars['neigh_w'] = neigh_w
self.vars['neigh_b'] = torch.nn.Parameter(torch.zeros(32))
def forward(self, user_emb, item_emb, neighs_emb, mp, index_list, vars_dict=None):
"""
"""
if vars_dict is None:
vars_dict = self.vars
agg_neighbor_emb = F.linear(neighs_emb, vars_dict['neigh_w'], vars_dict['neigh_b']) # (#neighbors, item_emb_dim)
output_emb = F.leaky_relu(torch.mean(agg_neighbor_emb, 0)).repeat(user_emb.shape[0], 1) # (#sample, user_emb_dim)
#
# # each mean, then att agg
# _emb = []
# start = 0
# for idx in index_list:
# end = start+idx
# _emb.append(F.leaky_relu(torch.mean(agg_neighbor_emb[start:end],0)))
# start = end
# output_emb = torch.stack(_emb, 0) # (#sample, dim)
return output_emb
def zero_grad(self, vars_dict=None):
with torch.no_grad():
if vars_dict is None:
for p in self.vars.values():
if p.grad is not None:
p.grad.zero_()
else:
for p in vars_dict.values():
if p.grad is not None:
p.grad.zero_()
def update_parameters(self):
return self.vars
| 4,108 | 35.6875 | 122 | py |
MetaHIN | MetaHIN-master/code/DataHelper.py | # coding: utf-8
# author: lu yf
# create date: 2019-11-24 13:16
import gc
import glob
import os
import pickle
# from DataProcessor import Movielens
from tqdm import tqdm
from multiprocessing import Process, Pool
from multiprocessing.pool import ThreadPool
import numpy as np
import torch
class DataHelper:
def __init__(self, input_dir, output_dir, config):
self.input_dir = input_dir # ../data/movielens_1m/original/
self.output_dir = output_dir # ../data/movielens_1m
self.config = config
self.mp_list = self.config['mp']
def load_data(self, data_set, state, load_from_file=True):
data_dir = os.path.join(self.output_dir,data_set)
supp_xs_s = []
supp_ys_s = []
supp_mps_s = []
query_xs_s = []
query_ys_s = []
query_mps_s = []
if data_set == 'yelp':
training_set_size = int(
len(glob.glob("{}/{}/*.npy".format(data_dir, state))) / self.config['file_num']) # support, query
# load all data
for idx in tqdm(range(training_set_size)):
supp_xs_s.append(torch.from_numpy(np.load("{}/{}/support_x_{}.npy".format(data_dir, state, idx))))
supp_ys_s.append(torch.from_numpy(np.load("{}/{}/support_y_{}.npy".format(data_dir, state, idx))))
query_xs_s.append(torch.from_numpy(np.load("{}/{}/query_x_{}.npy".format(data_dir, state, idx))))
query_ys_s.append(torch.from_numpy(np.load("{}/{}/query_y_{}.npy".format(data_dir, state, idx))))
supp_mp_data, query_mp_data = {}, {}
for mp in self.mp_list:
_cur_data = np.load("{}/{}/support_{}_{}.npy".format(data_dir, state, mp, idx), encoding='latin1')
supp_mp_data[mp] = [torch.from_numpy(x) for x in _cur_data]
_cur_data = np.load("{}/{}/query_{}_{}.npy".format(data_dir, state, mp, idx), encoding='latin1')
query_mp_data[mp] = [torch.from_numpy(x) for x in _cur_data]
supp_mps_s.append(supp_mp_data)
query_mps_s.append(query_mp_data)
else:
# if not load_from_file:
# ml = Movielens(os.path.join(self.input_dir,data_set), os.path.join(self.output_dir,data_set))
# ml.support_query_data()
training_set_size = int(len(glob.glob("{}/{}/*.pkl".format(data_dir,state))) / self.config['file_num']) # support, query
# load all data
for idx in tqdm(range(training_set_size)):
support_x = pickle.load(open("{}/{}/support_x_{}.pkl".format(data_dir, state, idx), "rb"))
if support_x.shape[0] > 5:
continue
del support_x
supp_xs_s.append(pickle.load(open("{}/{}/support_x_{}.pkl".format(data_dir, state, idx), "rb")))
supp_ys_s.append(pickle.load(open("{}/{}/support_y_{}.pkl".format(data_dir, state, idx), "rb")))
query_xs_s.append(pickle.load(open("{}/{}/query_x_{}.pkl".format(data_dir, state, idx), "rb")))
query_ys_s.append(pickle.load(open("{}/{}/query_y_{}.pkl".format(data_dir, state, idx), "rb")))
supp_mp_data, query_mp_data = {}, {}
for mp in self.mp_list:
supp_mp_data[mp] = pickle.load(open("{}/{}/support_{}_{}.pkl".format(data_dir, state, mp, idx), "rb"))
query_mp_data[mp] = pickle.load(open("{}/{}/query_{}_{}.pkl".format(data_dir, state, mp, idx), "rb"))
supp_mps_s.append(supp_mp_data)
query_mps_s.append(query_mp_data)
print('#support set: {}, #query set: {}'.format(len(supp_xs_s), len(query_xs_s)))
total_data = list(zip(supp_xs_s, supp_ys_s, supp_mps_s,
query_xs_s, query_ys_s, query_mps_s)) # all training tasks
del (supp_xs_s, supp_ys_s, supp_mps_s, query_xs_s, query_ys_s, query_mps_s)
gc.collect()
return total_data
def load_batch_data(self, data_set, state, batch_indices, load_from_file=True):
data_dir = os.path.join(self.output_dir,data_set)
supp_xs_s = []
supp_ys_s = []
supp_mps_s = []
query_xs_s = []
query_ys_s = []
query_mps_s = []
for idx in batch_indices:
supp_xs_s.append(pickle.load(open("{}/{}/support_x_{}.pkl".format(data_dir, state, idx), "rb")))
supp_ys_s.append(pickle.load(open("{}/{}/support_y_{}.pkl".format(data_dir, state, idx), "rb")))
query_xs_s.append(pickle.load(open("{}/{}/query_x_{}.pkl".format(data_dir, state, idx), "rb")))
query_ys_s.append(pickle.load(open("{}/{}/query_y_{}.pkl".format(data_dir, state, idx), "rb")))
supp_mp_data, query_mp_data = {}, {}
for mp in self.mp_list:
supp_mp_data[mp] = pickle.load(open("{}/{}/support_{}_{}.pkl".format(data_dir, state, mp, idx), "rb"))
query_mp_data[mp] = pickle.load(open("{}/{}/query_{}_{}.pkl".format(data_dir, state, mp, idx), "rb"))
supp_mps_s.append(supp_mp_data)
query_mps_s.append(query_mp_data)
return supp_xs_s, supp_ys_s, supp_mps_s, query_xs_s, query_ys_s, query_mps_s
def load_data_multiprocess(self, data_set, state, batch_indices, load_from_file=True):
data_dir = os.path.join(self.output_dir, data_set)
global cur_state
cur_state = state
supp_xs_s = []
supp_ys_s = []
supp_mps_s = []
query_xs_s = []
query_ys_s = []
query_mps_s = []
pool = ThreadPool(processes=20)
res = pool.map(self.load_single_data, batch_indices)
for r in res:
supp_xs_s.append(r[0])
supp_ys_s.append(r[1])
supp_mps_s.append(r[2])
query_xs_s.append(r[3])
query_ys_s.append(r[4])
query_mps_s.append(r[5])
return supp_xs_s, supp_ys_s, supp_mps_s, query_xs_s, query_ys_s, query_mps_s
def load_single_data(self, idx):
data_dir = os.path.join(self.output_dir, self.config['dataset'])
supp_xs = pickle.load(open("{}/{}/support_x_{}.pkl".format(data_dir, cur_state, idx), "rb"))
supp_ys = pickle.load(open("{}/{}/support_y_{}.pkl".format(data_dir, cur_state, idx), "rb"))
query_xs = pickle.load(open("{}/{}/query_x_{}.pkl".format(data_dir, cur_state, idx), "rb"))
query_ys = pickle.load(open("{}/{}/query_y_{}.pkl".format(data_dir, cur_state, idx), "rb"))
supp_mp_data = {}
query_mp_data = {}
for mp in self.config['mp']:
supp_mp_data[mp] = pickle.load(open("{}/{}/support_{}_{}.pkl".format(data_dir, cur_state, mp, idx), "rb"))
query_mp_data[mp] = pickle.load(open("{}/{}/query_{}_{}.pkl".format(data_dir, cur_state, mp, idx), "rb"))
return supp_xs, supp_ys, supp_mp_data, query_xs, query_ys, query_mp_data
# if __name__ == "__main__":
# from Config import config_ml
# data_set = 'movielens_1m'
# input_dir = '../data/'
# output_dir = '../data/'
#
# data_helper = DataHelper(input_dir, output_dir, config_ml)
#
# training_set_size = int(len(glob.glob("../data/{}/{}/*.pkl".format(data_set, 'meta_training'))) / config_ml['file_num'])
# indices = list(range(training_set_size))
# random.shuffle(indices)
# num_batch = int(training_set_size / 32)
# start_time = time.time()
# for idx, i in tqdm(enumerate(range(num_batch))):
# cur_indices = indices[32*i:32*(i+1)]
# support_xs, support_ys, support_mps, query_xs, query_ys, query_mps = \
# data_helper.load_data_multiprocess(data_set, 'meta_training', cur_indices)
#
# print(time.time()-start_time)
| 7,817 | 45.814371 | 133 | py |
MetaHIN | MetaHIN-master/code/test.py | # coding: utf-8
# author: lu yf
# create date: 2019-12-25 11:23
import math
import os
import pickle
import numpy as np
import multiprocessing as mp
# def dcg_at_k(scores):
# # assert scores
# return scores[0] + sum(sc / math.log(ind+1, 2) for sc, ind in zip(scores[1:], range(2, len(scores) + 1))) # ind+1!!!
#
#
# def ndcg_at_k(real_scores, predicted_scores):
# assert len(predicted_scores) == len(real_scores)
# idcg = dcg_at_k(sorted(real_scores, reverse=True))
# return (dcg_at_k(predicted_scores) / idcg) if idcg > 0.0 else 0.0
#
#
# def ranking(real_score, pred_score, k_list):
# # ndcg@k
# ndcg = {}
# for k in k_list:
# sorted_idx = sorted(np.argsort(real_score)[::-1][:k])
# r_s_at_k = real_score[sorted_idx]
# p_s_at_k = pred_score[sorted_idx]
#
# ndcg[k] = ndcg_at_k(r_s_at_k, p_s_at_k)
# return ndcg
#
#
# predicted1 = [.4, .1, .8]
# predicted2 = [.0, .1, .4]
# predicted3 = [.4, .1, .0]
# actual = [.8, .4, .1, .0]
#
# print(ranking(np.array(actual), np.array(predicted1), [1,3]))
# print(ranking(np.array(actual), np.array(predicted2), [1,3]))
# print(ranking(np.array(actual), np.array(predicted3), [1,3]))
#
# print(dcg_at_k([3,2,3,0,1,2]))
# print(ranking(np.array([3,3,2,2,1,0]), np.array([3,2,3,0,1,2]), [6]))
def job(x):
return x*x, x+x
def multicore():
l = []
pool = mp.Pool()
res = pool.map(job, range(10))
for r in res:
l.append(r[0])
print(res)
print(l)
if __name__ == '__main__':
# multicore()
data_dir = os.path.join('../data', 'yelp')
supp_xs =pickle.load(open("{}/{}/support_ubtb_0.pkl".format(data_dir, 'meta_training')))
print(supp_xs) | 1,696 | 24.328358 | 123 | py |
MetaHIN | MetaHIN-master/code/EmbeddingInitializer.py | # coding: utf-8
# author: lu yf
# create date: 2019-12-10 14:22
import torch
from torch.autograd import Variable
# Movielens dataset
class UserEmbeddingML(torch.nn.Module):
def __init__(self, config):
super(UserEmbeddingML, self).__init__()
self.num_gender = config['num_gender']
self.num_age = config['num_age']
self.num_occupation = config['num_occupation']
self.num_zipcode = config['num_zipcode']
self.embedding_dim = config['embedding_dim']
self.embedding_gender = torch.nn.Embedding(
num_embeddings=self.num_gender,
embedding_dim=self.embedding_dim
)
self.embedding_age = torch.nn.Embedding(
num_embeddings=self.num_age,
embedding_dim=self.embedding_dim
)
self.embedding_occupation = torch.nn.Embedding(
num_embeddings=self.num_occupation,
embedding_dim=self.embedding_dim
)
self.embedding_area = torch.nn.Embedding(
num_embeddings=self.num_zipcode,
embedding_dim=self.embedding_dim
)
def forward(self, user_fea):
"""
:param user_fea:
:return:
"""
gender_idx = Variable(user_fea[:, 0], requires_grad=False)
age_idx = Variable(user_fea[:, 1], requires_grad=False)
occupation_idx = Variable(user_fea[:, 2], requires_grad=False)
area_idx = Variable(user_fea[:, 3], requires_grad=False)
gender_emb = self.embedding_gender(gender_idx)
age_emb = self.embedding_age(age_idx)
occupation_emb = self.embedding_occupation(occupation_idx)
area_emb = self.embedding_area(area_idx)
return torch.cat((gender_emb, age_emb, occupation_emb, area_emb), 1) # (1, 4*32)
class ItemEmbeddingML(torch.nn.Module):
def __init__(self, config):
super(ItemEmbeddingML, self).__init__()
self.num_rate = config['num_rate']
self.num_genre = config['num_genre']
self.embedding_dim = config['embedding_dim']
self.embedding_rate = torch.nn.Embedding(
num_embeddings=self.num_rate,
embedding_dim=self.embedding_dim
)
self.embedding_genre = torch.nn.Linear(
in_features=self.num_genre,
out_features=self.embedding_dim,
bias=False
)
def forward(self, item_fea):
"""
:param item_fea:
:return:
"""
rate_idx = Variable(item_fea[:, 0], requires_grad=False)
genre_idx = Variable(item_fea[:, 1:26], requires_grad=False)
rate_emb = self.embedding_rate(rate_idx) # (1,32)
genre_emb = self.embedding_genre(genre_idx.float()) / torch.sum(genre_idx.float(), 1).view(-1, 1) # (1,32)
return torch.cat((rate_emb, genre_emb), 1) # (1, 2*32)
# Yelp dataset
class UserEmbeddingYelp(torch.nn.Module):
def __init__(self, config):
super(UserEmbeddingYelp, self).__init__()
self.num_fans = config['num_fans']
self.num_avgrating = config['num_avgrating']
self.embedding_dim = config['embedding_dim']
self.embedding_fans = torch.nn.Embedding(
num_embeddings=self.num_fans,
embedding_dim=self.embedding_dim
)
self.embedding_avgrating = torch.nn.Embedding(
num_embeddings=self.num_avgrating,
embedding_dim=self.embedding_dim
)
def forward(self, user_fea):
fans_idx = Variable(user_fea[:, 0], requires_grad=False) # [#sample]
avgrating_idx = Variable(user_fea[:, 1], requires_grad=False) # [#sample]
fans_emb = self.embedding_fans(fans_idx)
avgrating_emb = self.embedding_avgrating(avgrating_idx)
return torch.cat((fans_emb, avgrating_emb), 1) # (1, 1*32)
class ItemEmbeddingYelp(torch.nn.Module):
def __init__(self, config):
super(ItemEmbeddingYelp, self).__init__()
self.num_stars = config['num_stars']
self.num_postalcode = config['num_postalcode']
self.embedding_dim = config['embedding_dim']
self.embedding_stars = torch.nn.Embedding(
num_embeddings=self.num_stars,
embedding_dim=self.embedding_dim,
)
self.embedding_postalcode = torch.nn.Embedding(
num_embeddings=self.num_postalcode,
embedding_dim=self.embedding_dim,
)
def forward(self, item_fea):
stars_idx = Variable(item_fea[:, 0], requires_grad=False)
postalcode_idx = Variable(item_fea[:, 1], requires_grad=False)
stars_emb = self.embedding_stars(stars_idx) # (1,32)
postalcode_emb = self.embedding_postalcode(postalcode_idx) # (1,32)
return torch.cat((stars_emb, postalcode_emb), 1)
# DBook dataset
class UserEmbeddingDB(torch.nn.Module):
def __init__(self, config):
super(UserEmbeddingDB, self).__init__()
self.num_location = config['num_location']
self.embedding_dim = config['embedding_dim']
self.embedding_location = torch.nn.Embedding(
num_embeddings=self.num_location,
embedding_dim=self.embedding_dim
)
def forward(self, user_fea):
"""
:param user_fea: tensor, shape = [#sample, #user_fea]
:return:
"""
location_idx = Variable(user_fea[:, 0], requires_grad=False) # [#sample]
location_emb = self.embedding_location(location_idx)
return location_emb # (1, 1*32)
class ItemEmbeddingDB(torch.nn.Module):
def __init__(self, config):
super(ItemEmbeddingDB, self).__init__()
self.num_publisher = config['num_publisher']
self.embedding_dim = config['embedding_dim']
self.embedding_publisher = torch.nn.Embedding(
num_embeddings=self.num_publisher,
embedding_dim=self.embedding_dim
)
def forward(self, item_fea):
"""
:param item_fea:
:return:
"""
publisher_idx = Variable(item_fea[:, 0], requires_grad=False)
publisher_emb = self.embedding_publisher(publisher_idx) # (1,32)
return publisher_emb # (1, 1*32)
| 6,165 | 32.51087 | 115 | py |
MetaHIN | MetaHIN-master/code/HeteML_new.py | # coding: utf-8
# author: lu yf
# create date: 2019-12-02 11:25
import numpy as np
import torch
from torch.nn import functional as F
from Evaluation import Evaluation
from MetaLearner_new import MetapathLearner, MetaLearner
class HML(torch.nn.Module):
def __init__(self, config, model_name):
super(HML, self).__init__()
self.config = config
self.use_cuda = self.config['use_cuda']
self.device = torch.device("cuda" if config['use_cuda'] else "cpu")
self.model_name = model_name
if self.config['dataset'] == 'movielens':
from EmbeddingInitializer import UserEmbeddingML, ItemEmbeddingML
self.item_emb = ItemEmbeddingML(config)
self.user_emb = UserEmbeddingML(config)
elif self.config['dataset'] == 'yelp':
from EmbeddingInitializer import UserEmbeddingYelp, ItemEmbeddingYelp
self.item_emb = ItemEmbeddingYelp(config)
self.user_emb = UserEmbeddingYelp(config)
elif self.config['dataset'] == 'dbook':
from EmbeddingInitializer import UserEmbeddingDB, ItemEmbeddingDB
self.item_emb = ItemEmbeddingDB(config)
self.user_emb = UserEmbeddingDB(config)
self.mp_learner = MetapathLearner(config)
self.meta_learner = MetaLearner(config)
self.mp_lr = config['mp_lr']
self.local_lr = config['local_lr']
self.emb_dim = self.config['embedding_dim']
self.cal_metrics = Evaluation()
self.ml_weight_len = len(self.meta_learner.update_parameters())
self.ml_weight_name = list(self.meta_learner.update_parameters().keys())
self.mp_weight_len = len(self.mp_learner.update_parameters())
self.mp_weight_name = list(self.mp_learner.update_parameters().keys())
self.transformer_liners = self.transform_mp2task()
self.meta_optimizer = torch.optim.Adam(self.parameters(), lr=config['lr'])
def transform_mp2task(self):
liners = {}
ml_parameters = self.meta_learner.update_parameters()
# output_dim_of_mp = self.config['user_embedding_dim']
output_dim_of_mp = 32 # movielens: lr=0.001, avg mp, 0.8081
for w in self.ml_weight_name:
liners[w.replace('.', '-')] = torch.nn.Linear(output_dim_of_mp,
np.prod(ml_parameters[w].shape))
return torch.nn.ModuleDict(liners)
def forward(self, support_user_emb, support_item_emb, support_set_y, support_mp_user_emb, vars_dict=None):
"""
"""
if vars_dict is None:
vars_dict = self.meta_learner.update_parameters()
support_set_y_pred = self.meta_learner(support_user_emb, support_item_emb, support_mp_user_emb, vars_dict)
loss = F.mse_loss(support_set_y_pred, support_set_y)
grad = torch.autograd.grad(loss, vars_dict.values(), create_graph=True)
fast_weights = {}
for i, w in enumerate(vars_dict.keys()):
fast_weights[w] = vars_dict[w] - self.local_lr * grad[i]
for idx in range(1, self.config['local_update']): # for the current task, locally update
support_set_y_pred = self.meta_learner(support_user_emb, support_item_emb, support_mp_user_emb, vars_dict=fast_weights)
loss = F.mse_loss(support_set_y_pred, support_set_y) # calculate loss on support set
grad = torch.autograd.grad(loss, fast_weights.values(),
create_graph=True) # calculate gradients w.r.t. model parameters
for i, w in enumerate(fast_weights.keys()):
fast_weights[w] = fast_weights[w] - self.local_lr * grad[i]
return fast_weights
def mp_update(self, support_set_x, support_set_y, support_set_mps, query_set_x, query_set_y, query_set_mps):
"""
Mete-update the parameters of MetaPathLearner, AggLearner and MetaLearner.
"""
# each mp
support_mp_enhanced_user_emb_s, query_mp_enhanced_user_emb_s = [], []
mp_task_fast_weights_s = {}
mp_task_loss_s = {}
mp_initial_weights = self.mp_learner.update_parameters()
ml_initial_weights = self.meta_learner.update_parameters()
support_user_emb = self.user_emb(support_set_x[:, self.config['item_fea_len']:])
support_item_emb = self.item_emb(support_set_x[:, 0:self.config['item_fea_len']])
query_user_emb = self.user_emb(query_set_x[:, self.config['item_fea_len']:])
query_item_emb = self.item_emb(query_set_x[:, 0:self.config['item_fea_len']])
for mp in self.config['mp']:
support_set_mp = list(support_set_mps[mp])
query_set_mp = list(query_set_mps[mp])
support_neighs_emb = self.item_emb(torch.cat(support_set_mp))
support_index_list = list(map(lambda _: _.shape[0], support_set_mp))
query_neighs_emb = self.item_emb(torch.cat(query_set_mp))
query_index_list = list(map(lambda _: _.shape[0], query_set_mp))
support_mp_enhanced_user_emb = self.mp_learner(support_user_emb, support_item_emb, support_neighs_emb, mp, support_index_list)
support_set_y_pred = self.meta_learner(support_user_emb, support_item_emb, support_mp_enhanced_user_emb)
loss = F.mse_loss(support_set_y_pred, support_set_y)
grad = torch.autograd.grad(loss, mp_initial_weights.values(), create_graph=True)
fast_weights = {}
for i in range(self.mp_weight_len):
weight_name = self.mp_weight_name[i]
fast_weights[weight_name] = mp_initial_weights[weight_name] - self.mp_lr * grad[i]
for idx in range(1, self.config['mp_update']):
support_mp_enhanced_user_emb = self.mp_learner(support_user_emb, support_item_emb, support_neighs_emb, mp, support_index_list,
vars_dict=fast_weights)
support_set_y_pred = self.meta_learner(support_user_emb, support_item_emb, support_mp_enhanced_user_emb)
loss = F.mse_loss(support_set_y_pred, support_set_y)
grad = torch.autograd.grad(loss, fast_weights.values(), create_graph=True)
for i in range(self.mp_weight_len):
weight_name = self.mp_weight_name[i]
fast_weights[weight_name] = fast_weights[weight_name] - self.mp_lr * grad[i]
support_mp_enhanced_user_emb = self.mp_learner(support_user_emb, support_item_emb, support_neighs_emb, mp, support_index_list, vars_dict=fast_weights)
support_mp_enhanced_user_emb_s.append(support_mp_enhanced_user_emb)
query_mp_enhanced_user_emb = self.mp_learner(query_user_emb, query_item_emb, query_neighs_emb, mp, query_index_list, vars_dict=fast_weights)
query_mp_enhanced_user_emb_s.append(query_mp_enhanced_user_emb)
f_fast_weights = {}
for w, liner in self.transformer_liners.items():
w = w.replace('-', '.')
f_fast_weights[w] = ml_initial_weights[w] * \
torch.sigmoid(liner(support_mp_enhanced_user_emb.mean(0))). \
view(ml_initial_weights[w].shape)
# f_fast_weights = None
# # the current mp ---> task update
mp_task_fast_weights = self.forward(support_user_emb, support_item_emb, support_set_y,
support_mp_enhanced_user_emb,vars_dict=f_fast_weights)
mp_task_fast_weights_s[mp] = mp_task_fast_weights
query_set_y_pred = self.meta_learner(query_user_emb, query_item_emb, query_mp_enhanced_user_emb,
vars_dict=mp_task_fast_weights)
q_loss = F.mse_loss(query_set_y_pred, query_set_y)
mp_task_loss_s[mp] = q_loss.data # movielens: 0.8126 dbook 0.6084
# mp_task_loss_s[mp] = loss.data # dbook 0.6144
# mp_att = torch.FloatTensor([l/sum(mp_task_loss_s.values()) for l in mp_task_loss_s.values()]).to(self.device) # movielens: 0.81
mp_att = F.softmax(-torch.stack(list(mp_task_loss_s.values())), dim=0) # movielens: 0.80781 lr0.001
# mp_att = torch.FloatTensor([1.0 / len(self.config['mp'])] * len(self.config['mp'])).to(self.device)
agg_task_fast_weights = self.aggregator(mp_task_fast_weights_s, mp_att)
agg_mp_emb = torch.stack(query_mp_enhanced_user_emb_s, 1)
# agg_mp_emb = torch.stack(support_mp_enhanced_user_emb_s, 1)
query_agg_enhanced_user_emb = torch.sum(agg_mp_emb * mp_att.unsqueeze(1), 1)
query_y_pred = self.meta_learner(query_user_emb, query_item_emb, query_agg_enhanced_user_emb, vars_dict=agg_task_fast_weights)
loss = F.mse_loss(query_y_pred, query_set_y)
query_y_real = query_set_y.data.cpu().numpy()
query_y_pred = query_y_pred.data.cpu().numpy()
mae, rmse = self.cal_metrics.prediction(query_y_real, query_y_pred)
ndcg_5 = self.cal_metrics.ranking(query_y_real, query_y_pred, k=5)
return loss, mae, rmse, ndcg_5
def mp_update_mp_MAML(self, support_set_x, support_set_y, support_set_mps, query_set_x, query_set_y, query_set_mps):
"""
MeLU + multiple meta-paths aggregation
"""
support_mp_enhanced_user_emb_s, query_mp_enhanced_user_emb_s = [], []
support_user_emb = self.user_emb(support_set_x[:, self.config['item_fea_len']:])
support_item_emb = self.item_emb(support_set_x[:, 0:self.config['item_fea_len']])
query_user_emb = self.user_emb(query_set_x[:, self.config['item_fea_len']:])
query_item_emb = self.item_emb(query_set_x[:, 0:self.config['item_fea_len']])
mp_task_loss_s = {}
for mp in self.config['mp']:
support_set_mp = list(support_set_mps[mp])
query_set_mp = list(query_set_mps[mp])
support_neighs_emb = self.item_emb(torch.cat(support_set_mp))
support_index_list = map(lambda _: _.shape[0], support_set_mp)
query_neighs_emb = self.item_emb(torch.cat(query_set_mp))
query_index_list = map(lambda _: _.shape[0], query_set_mp)
support_mp_enhanced_user_emb = self.mp_learner(support_user_emb, support_item_emb, support_neighs_emb, mp,
support_index_list)
support_mp_enhanced_user_emb_s.append(support_mp_enhanced_user_emb)
query_mp_enhanced_user_emb = self.mp_learner(query_user_emb, query_item_emb, query_neighs_emb, mp,
query_index_list)
query_mp_enhanced_user_emb_s.append(query_mp_enhanced_user_emb)
# query_set_y_pred = self.meta_learner(query_user_emb, query_item_emb, query_mp_enhanced_user_embs)
# q_loss = F.mse_loss(query_set_y_pred, query_set_y)
# mp_task_loss_s[mp] = q_loss.data
# mp_att = F.softmax(-torch.stack(list(mp_task_loss_s.values())), dim=0)
mp_att = torch.FloatTensor([1.0 / len(self.config['mp'])] * len(self.config['mp'])).to(self.device) # mean
agg_mp_emb = torch.stack(support_mp_enhanced_user_emb_s, 1)
support_agg_enhanced_user_emb = torch.sum(agg_mp_emb * mp_att.unsqueeze(1), 1)
agg_mp_emb = torch.stack(query_mp_enhanced_user_emb_s, 1)
query_agg_enhanced_user_emb = torch.sum(agg_mp_emb * mp_att.unsqueeze(1), 1)
task_fast_weights = self.forward(support_user_emb, support_item_emb, support_set_y,
support_agg_enhanced_user_emb)
query_y_pred = self.meta_learner(query_user_emb, query_item_emb, query_agg_enhanced_user_emb, vars_dict=task_fast_weights)
loss = F.mse_loss(query_y_pred, query_set_y)
query_y_real = query_set_y.data.cpu().numpy()
query_y_pred = query_y_pred.data.cpu().numpy()
mae, rmse = self.cal_metrics.prediction(query_y_real, query_y_pred)
ndcg_5 = self.cal_metrics.ranking(query_y_real, query_y_pred, k=5)
return loss, mae, rmse, ndcg_5
def mp_update_multi_MAML(self, support_set_x, support_set_y, support_set_mps, query_set_x, query_set_y, query_set_mps):
"""
multiple MAML for multiple meta-paths
"""
loss_s = []
mae_s, rmse_s = [], []
ndcg_at_5 = []
support_user_emb = self.user_emb(support_set_x[:, self.config['item_fea_len']:])
support_item_emb = self.item_emb(support_set_x[:, 0:self.config['item_fea_len']])
query_user_emb = self.user_emb(query_set_x[:, self.config['item_fea_len']:])
query_item_emb = self.item_emb(query_set_x[:, 0:self.config['item_fea_len']])
for mp in self.config['mp']:
support_set_mp = list(support_set_mps[mp])
query_set_mp = list(query_set_mps[mp])
support_neighs_emb = self.item_emb(torch.cat(support_set_mp))
support_index_list = map(lambda _: _.shape[0], support_set_mp)
query_neighs_emb = self.item_emb(torch.cat(query_set_mp))
query_index_list = map(lambda _: _.shape[0], query_set_mp)
support_mp_enhanced_user_emb = self.mp_learner(support_user_emb, support_item_emb, support_neighs_emb, mp,
support_index_list)
query_mp_enhanced_user_emb = self.mp_learner(query_user_emb, query_item_emb, query_neighs_emb, mp,
query_index_list)
task_fast_weights = self.forward(support_user_emb, support_item_emb, support_set_y,
support_mp_enhanced_user_emb)
query_y_pred = self.meta_learner(query_user_emb, query_item_emb, query_mp_enhanced_user_emb,
vars_dict=task_fast_weights)
loss = F.mse_loss(query_y_pred, query_set_y)
mae, rmse = self.cal_metrics.prediction(query_set_y.data.cpu().numpy(),
query_y_pred.data.cpu().numpy())
ndcg_5 = self.cal_metrics.ranking(query_set_y.data.cpu().numpy(),
query_y_pred.data.cpu().numpy(), 5)
loss_s.append(loss)
mae_s.append(mae)
rmse_s.append(rmse)
ndcg_at_5.append(ndcg_5)
return torch.stack(loss_s).mean(0), np.mean(mae_s), np.mean(rmse_s), np.mean(ndcg_5)
def no_MAML(self, support_set_x, support_set_y, support_set_mps, query_set_x, query_set_y, query_set_mps):
# each mp
support_mp_enhanced_user_emb_s, query_mp_enhanced_user_emb_s = [], []
support_user_emb = self.user_emb(support_set_x[:, self.config['item_fea_len']:])
support_item_emb = self.item_emb(support_set_x[:, 0:self.config['item_fea_len']])
query_user_emb = self.user_emb(query_set_x[:, self.config['item_fea_len']:])
query_item_emb = self.item_emb(query_set_x[:, 0:self.config['item_fea_len']])
for mp in self.config['mp']:
support_set_mp = list(support_set_mps[mp])
query_set_mp = list(query_set_mps[mp])
support_neighs_emb = self.item_emb(torch.cat(support_set_mp))
support_index_list = map(lambda _: _.shape[0], support_set_mp)
query_neighs_emb = self.item_emb(torch.cat(query_set_mp))
query_index_list = map(lambda _: _.shape[0], query_set_mp)
support_mp_enhanced_user_emb = self.mp_learner(support_user_emb, support_item_emb, support_neighs_emb, mp,
support_index_list)
support_mp_enhanced_user_emb_s.append(support_mp_enhanced_user_emb)
query_mp_enhanced_user_emb = self.mp_learner(query_user_emb, query_item_emb, query_neighs_emb, mp,
query_index_list)
query_mp_enhanced_user_emb_s.append(query_mp_enhanced_user_emb)
mp_att = torch.FloatTensor([1.0 / len(self.config['mp'])] * len(self.config['mp'])).to(self.device) # mean
agg_mp_emb = torch.stack(support_mp_enhanced_user_emb_s, 1)
support_agg_enhanced_user_emb = torch.sum(agg_mp_emb * mp_att.unsqueeze(1), 1)
agg_mp_emb = torch.stack(query_mp_enhanced_user_emb_s, 1)
query_agg_enhanced_user_emb = torch.sum(agg_mp_emb * mp_att.unsqueeze(1), 1)
support_y_pred = self.meta_learner(support_user_emb, support_item_emb, support_agg_enhanced_user_emb)
support_loss = F.mse_loss(support_y_pred, support_set_y)
support_mae, support_rmse = self.cal_metrics.prediction(support_set_y.data.cpu().numpy(),
support_y_pred.data.cpu().numpy())
support_ndcg_5 = self.cal_metrics.ranking(support_set_y.data.cpu().numpy(),
support_y_pred.data.cpu().numpy(), 5)
query_y_pred = self.meta_learner(query_user_emb, query_item_emb, query_agg_enhanced_user_emb)
query_loss = F.mse_loss(query_y_pred, query_set_y)
query_mae, query_rmse = self.cal_metrics.prediction(query_set_y.data.cpu().numpy(),
query_y_pred.data.cpu().numpy())
query_ndcg_5 = self.cal_metrics.ranking(query_set_y.data.cpu().numpy(),
query_y_pred.data.cpu().numpy(), 5)
return (support_loss + query_loss) / 2.0, (support_mae + query_mae) / 2.0, (support_rmse + query_rmse) / 2.0, \
(support_ndcg_5 + query_ndcg_5) / 2.0
def global_update(self, support_xs, support_ys, support_mps, query_xs, query_ys, query_mps, device='cpu'):
"""
"""
batch_sz = len(support_xs)
loss_s = []
mae_s = []
rmse_s = []
ndcg_at_5_s = []
for i in range(batch_sz): # each task in a batch
support_mp = dict(support_mps[i]) # must be dict!!!
query_mp = dict(query_mps[i])
for mp in self.config['mp']:
support_mp[mp] = map(lambda x: x.to(device), support_mp[mp])
query_mp[mp] = map(lambda x: x.to(device), query_mp[mp])
_loss, _mae, _rmse, _ndcg_5 = self.mp_update(support_xs[i].to(device), support_ys[i].to(device), support_mp,
query_xs[i].to(device), query_ys[i].to(device), query_mp)
# _loss, _mae, _rmse, _ndcg_5 = self.mp_update_mp_MAML(support_xs[i].to(device), support_ys[i].to(device), support_mp,
# query_xs[i].to(device), query_ys[i].to(device), query_mp)
# _loss, _mae, _rmse, _ndcg_5 = self.mp_update_multi_MAML(support_xs[i].to(device), support_ys[i].to(device), support_mp,
# query_xs[i].to(device), query_ys[i].to(device), query_mp)
# _loss, _mae, _rmse, _ndcg_5 = self.no_MAML(support_xs[i].to(device), support_ys[i].to(device), support_mp,
# query_xs[i].to(device), query_ys[i].to(device), query_mp)
loss_s.append(_loss)
mae_s.append(_mae)
rmse_s.append(_rmse)
ndcg_at_5_s.append(_ndcg_5)
loss = torch.stack(loss_s).mean(0)
mae = np.mean(mae_s)
rmse = np.mean(rmse_s)
ndcg_at_5 = np.mean(ndcg_at_5_s)
self.meta_optimizer.zero_grad()
loss.backward()
self.meta_optimizer.step()
return loss.cpu().data.numpy(), mae, rmse, ndcg_at_5
def evaluation(self, support_x, support_y, support_mp, query_x, query_y, query_mp, device='cpu'):
"""
"""
support_mp = dict(support_mp) # must be dict!!!
query_mp = dict(query_mp)
for mp in self.config['mp']:
support_mp[mp] = map(lambda x: x.to(device), support_mp[mp])
query_mp[mp] = map(lambda x: x.to(device), query_mp[mp])
_, mae, rmse, ndcg_5 = self.mp_update(support_x.to(device), support_y.to(device), support_mp,
query_x.to(device), query_y.to(device), query_mp)
# _, mae, rmse, ndcg_5 = self.mp_update_mp_MAML(support_x.to(device), support_y.to(device), support_mp,
# query_x.to(device), query_y.to(device), query_mp)
# _, mae, rmse, ndcg_5 = self.mp_update_multi_MAML(support_x.to(device), support_y.to(device), support_mp,
# query_x.to(device), query_y.to(device), query_mp)
# mae, rmse, ndcg_5 = self.eval_no_MAML(query_x.to(device), query_y.to(device), query_mp)
return mae, rmse, ndcg_5
def aggregator(self, task_weights_s, att):
for idx, mp in enumerate(self.config['mp']):
if idx == 0:
att_task_weights = dict({k: v * att[idx] for k, v in task_weights_s[mp].items()})
continue
tmp_att_task_weights = dict({k: v * att[idx] for k, v in task_weights_s[mp].items()})
att_task_weights = dict(zip(att_task_weights.keys(),
list(map(lambda x: x[0] + x[1],
zip(att_task_weights.values(), tmp_att_task_weights.values())))))
return att_task_weights
def eval_no_MAML(self, query_set_x, query_set_y, query_set_mps):
# each mp
query_mp_enhanced_user_emb_s = []
query_user_emb = self.user_emb(query_set_x[:, self.config['item_fea_len']:])
query_item_emb = self.item_emb(query_set_x[:, 0:self.config['item_fea_len']])
for mp in self.config['mp']:
query_set_mp = list(query_set_mps[mp])
query_neighs_emb = self.item_emb(torch.cat(query_set_mp))
query_index_list = map(lambda _: _.shape[0], query_set_mp)
query_mp_enhanced_user_emb = self.mp_learner(query_user_emb, query_item_emb, query_neighs_emb, mp,
query_index_list)
query_mp_enhanced_user_emb_s.append(query_mp_enhanced_user_emb)
mp_att = torch.FloatTensor([1.0 / len(self.config['mp'])] * len(self.config['mp'])).to(self.device) # mean
agg_mp_emb = torch.stack(query_mp_enhanced_user_emb_s, 1)
query_agg_enhanced_user_emb = torch.sum(agg_mp_emb * mp_att.unsqueeze(1), 1)
query_y_pred = self.meta_learner(query_user_emb, query_item_emb, query_agg_enhanced_user_emb)
query_mae, query_rmse = self.cal_metrics.prediction(query_set_y.data.cpu().numpy(),
query_y_pred.data.cpu().numpy())
query_ndcg_5 = self.cal_metrics.ranking(query_set_y.data.cpu().numpy(),
query_y_pred.data.cpu().numpy(), 5)
return query_mae, query_rmse, query_ndcg_5
def fine_tune(self, support_x,support_y,support_mp):
if self.cuda():
support_x = support_x.cuda()
support_y = support_y.cuda()
support_mp = dict(support_mp) # must be dict!!!
for mp, mp_data in support_mp.items():
support_mp[mp] = list(map(lambda x: x.cuda(), mp_data))
support_mp_enhanced_user_emb_s = []
support_user_emb = self.user_emb(support_x[:, self.config['item_fea_len']:])
support_item_emb = self.item_emb(support_x[:, 0:self.config['item_fea_len']])
for mp in self.config['mp']:
support_set_mp = support_mp[mp]
support_neighs_emb = self.item_emb(torch.cat(support_set_mp))
support_index_list = map(lambda _: _.shape[0], support_set_mp)
support_mp_enhanced_user_emb = self.mp_learner(support_user_emb, support_item_emb, support_neighs_emb, mp,
support_index_list)
support_mp_enhanced_user_emb_s.append(support_mp_enhanced_user_emb)
mp_att = torch.FloatTensor([1.0 / len(self.config['mp'])] * len(self.config['mp'])).to(self.device) # mean
agg_mp_emb = torch.stack(support_mp_enhanced_user_emb_s, 1)
support_agg_enhanced_user_emb = torch.sum(agg_mp_emb * mp_att.unsqueeze(1), 1)
support_y_pred = self.meta_learner(support_user_emb, support_item_emb, support_agg_enhanced_user_emb)
support_loss = F.mse_loss(support_y_pred, support_y)
# fine-tune
self.meta_optimizer.zero_grad()
support_loss.backward()
self.meta_optimizer.step()
| 24,851 | 55.869565 | 162 | py |
MetaHIN | MetaHIN-master/code/Config.py | # coding: utf-8
# author: lu yf
# create date: 2019-11-20 19:46
config_db = {
'dataset': 'dbook',
# 'mp': ['ub'],
# 'mp': ['ub','ubab'],
'mp': ['ub','ubab','ubub'],
'use_cuda': True,
'file_num': 10, # each task contains 10 files
# user
'num_location': 453,
'num_fea_item': 2,
# item
'num_publisher': 1698,
'num_fea_user': 1,
'item_fea_len': 1,
# model setting
# 'embedding_dim': 32,
# 'user_embedding_dim': 32*1, # 1 features
# 'item_embedding_dim': 32*1, # 1 features
'embedding_dim': 32,
'user_embedding_dim': 32*1, # 1 features
'item_embedding_dim': 32*1, # 1 features
'first_fc_hidden_dim': 64,
'second_fc_hidden_dim': 64,
'mp_update': 1,
'local_update': 1,
'lr': 5e-4,
'mp_lr': 5e-3,
'local_lr': 5e-3,
'batch_size': 32, # for each batch, the number of tasks
'num_epoch': 50,
'neigh_agg': 'mean',
# 'neigh_agg': 'attention',
'mp_agg': 'mean',
# 'mp_agg': 'attention',
}
config_ml = {
'dataset': 'movielens',
# 'mp': ['um'],
# 'mp': ['um','umdm'],
# 'mp': ['um','umam','umdm'],
'mp': ['um','umum','umam','umdm'],
'use_cuda': True,
'file_num': 12, # each task contains 12 files for movielens
# item
'num_rate': 6,
'num_genre': 25,
'num_fea_item': 2,
'item_fea_len': 26,
# user
'num_gender': 2,
'num_age': 7,
'num_occupation': 21,
'num_zipcode': 3402,
'num_fea_user': 4,
# model setting
'embedding_dim': 32,
'user_embedding_dim': 32*4, # 4 features
'item_embedding_dim': 32*2, # 2 features
'first_fc_hidden_dim': 64,
'second_fc_hidden_dim': 64,
'mp_update': 1,
'local_update': 1,
'lr': 5e-4,
'mp_lr': 5e-3,
'local_lr': 5e-3,
'batch_size': 32, # for each batch, the number of tasks
'num_epoch': 100,
'neigh_agg': 'mean',
# 'neigh_agg': 'max',
'mp_agg': 'mean',
# 'mp_agg': 'attention',
}
config_yelp = {
'dataset': 'yelp',
# 'mp': ['ubub'],
'mp': ['ub','ubcb','ubtb','ubub'],
'use_cuda': True,
'file_num': 12, # each task contains 12 files
# item
'num_stars': 9,
'num_postalcode': 6127,
'num_fea_item': 2,
'item_fea_len': 2,
# user
'num_fans': 412,
'num_avgrating': 359,
'num_fea_user': 2,
# model setting
'embedding_dim': 32,
'user_embedding_dim': 32*2, # 1 features
'item_embedding_dim': 32*2, # 1 features
'first_fc_hidden_dim': 64,
'second_fc_hidden_dim': 64,
'mp_update': 1,
'local_update': 1,
'lr': 5e-4,
'mp_lr': 1e-3,
'local_lr': 1e-3,
'batch_size': 32, # for each batch, the number of tasks
'num_epoch': 50,
'neigh_agg': 'mean',
# 'neigh_agg': 'attention',
'mp_agg': 'mean',
# 'mp_agg': 'attention',
}
states = ["meta_training","warm_up", "user_cold_testing", "item_cold_testing", "user_and_item_cold_testing"]
| 2,953 | 21.210526 | 108 | py |
MetaHIN | MetaHIN-master/code/Evaluation.py | # coding: utf-8
# author: lu yf
# create date: 2019-11-27 13:14
import math
import numpy as np
from sklearn.metrics import mean_squared_error, mean_absolute_error
class Evaluation:
def __init__(self):
self.k = 5
def prediction(self, real_score, pred_score):
MAE = mean_absolute_error(real_score, pred_score)
RMSE = math.sqrt(mean_squared_error(real_score, pred_score))
return MAE, RMSE
def dcg_at_k(self,scores):
# assert scores
return scores[0] + sum(sc / math.log(ind+1, 2) for sc, ind in zip(scores[1:], range(2, len(scores) + 1)))
def ndcg_at_k(self, real_scores, predicted_scores):
idcg = self.dcg_at_k(sorted(real_scores, reverse=True))
return (self.dcg_at_k(predicted_scores) / idcg) if idcg > 0.0 else 0.0
def ranking(self, real_score, pred_score, k):
# ndcg@k
sorted_idx = sorted(np.argsort(real_score)[::-1][:k]) # get the index of the top k real score
r_s_at_k = real_score[sorted_idx]
p_s_at_k = pred_score[sorted_idx]
ndcg_5 = self.ndcg_at_k(r_s_at_k, p_s_at_k)
#
# ndcg = {}
# for k in k_list:
# sorted_idx = sorted(np.argsort(real_score)[::-1][:k])
# r_s_at_k = real_score[sorted_idx]
# p_s_at_k = pred_score[sorted_idx]
#
# ndcg[k] = self.ndcg_at_k(r_s_at_k, p_s_at_k)
return ndcg_5
| 1,426 | 28.122449 | 113 | py |
galIMF | galIMF-master/galimf.py | # A python3 code
# This is the main module operating the other two modules IGIMF and OSGIMF.
# The IGIMF model calculates an analytically integrated galaxy-wide IMF;
# The OSGIMF model samples all the star cluster mass and all the stellar mass in each star cluster
# and then combind the stars in all star clusters to give the galaxy stellar population.
# --------------------------------------------------------------------------------------------------------------------------------
# importing modules and libraries
import math
import csv # csv and izip/zip are used to create output files
try:
from itertools import izip as zip
except ImportError: # will be python 3.x series
pass
# --------------------------------------------------------------------------------------------------------------------------------
# The star mass resolution is the lower resolution among
# the resolution of histogram (resolution_histogram_relative)
# and the resolution of star generation (resolution_star_... in the file IMF_schulz.py)
resolution_histogram_relative = 0.01 # The star mass resolution of histogram, star mass * resolution_histogram_relative
# also re-defined in a test file, it scales automatically with the SFR
# function_galimf takes in I/OS-GMF parameters and create output files
def function_galimf(IorS, R14orNOT, SFR, alpha3_model, delta_t, M_over_H, I_ecl, M_ecl_U, M_ecl_L, beta_model,
I_str, M_str_L, alpha_1, alpha1_model, M_turn, alpha_2, alpha2_model, M_turn2, M_str_U, printout=False):
if IorS == "I":
global List_xi, List_M_str_for_xi_str
function_draw_igimf(R14orNOT, SFR, alpha3_model, beta_model, delta_t, M_over_H,
I_ecl, M_ecl_U, M_ecl_L, I_str, M_str_L, alpha_1, alpha1_model,
M_turn, alpha_2, alpha2_model, M_turn2, M_str_U)
if printout is True:
# write data for GalIMF_Result/IGIMF_shape
with open('Galaxy_wide_IMF.txt', 'w') as f:
writer = csv.writer(f, delimiter=' ')
f.write("# Galaxy-wide IMF output file.\n# The columns are:\n# mass xi\n# where xi=dN/dm ("
"see Yan et.al 2017 A&A...607A.126Y)\n\n")
writer.writerows(
zip(List_M_str_for_xi_str, List_xi))
print("\n ### Galaxy-wide IMF data saved in the file Galaxy_wide_IMF.txt ###\n")
return
elif IorS == "OS":
global mass_range_center, mass_range, mass_range_upper_limit, mass_range_lower_limit, star_number
sample_for_one_epoch(R14orNOT, SFR, alpha3_model, delta_t, I_ecl, M_ecl_U, M_ecl_L, beta_model,
I_str, M_str_L, alpha_1, alpha1_model, M_turn, alpha_2, alpha2_model, M_turn2, M_over_H, M_str_U)
function_draw(SFR, M_str_L, M_str_U, M_ecl_L, resolution_histogram_relative)
function_make_drop_line()
# write data for GalIMF_Result/histogram
function_draw_histogram()
if printout is True:
with open('Galaxy_stellar_mass_histogram.txt', 'w') as f:
writer = csv.writer(f, delimiter=' ')
f.write(
"# Stellar mass histogram output file. It gives the generated number of stars in different "
"mass range.\n# The columns are:\n# mass_range_center mass_range mass_range_upper_limit mass_"
"range_lower_limit star_number_in_the_mass_range\n\n")
writer.writerows(
zip(mass_range_center, mass_range, mass_range_upper_limit, mass_range_lower_limit, star_number))
print("\n ### Stellar mass histogram data saved in the file Galaxy_stellar_mass_histogram.txt ###\n")
return
else:
print("Input wrong parameter for 'IorS'!")
return
######## IGIMF #########
# This module compute IGIMF as described in Yan et al 2017
# --------------------------------------------------------------------------------------------------------------------------------
# initialization of floating length arrays
List_M_ecl_for_xi_ecl = []
List_xi_ecl = []
List_M_str_for_xi_str = []
List_xi_str = []
List_xi = []
# --------------------------------------------------------------------------------------------------------------------------------
# function_dar_IGIMF computes the IGIMF by combining function_ecmf (embedded cluster mass
# function) and function_IMF (stellar mass function in individual embedded clusters)
# equation (1) from Yan et al. 2017
# function returns values of global lists:
# List_M_ecl_for_xi_ecl - list of masses, M_ecl, of embedded clusters for ECMF
# List_xi IGIMF (xi_IGIMF = dN/dm, dN number of star in a mass bin dm) values
# by default normalized to total mass in Msun units (= SFR*10Myr)
# List_M_str_for_xi_str list of stellar masses for stellar IMF in Msun units
# List_xi_L logarithmic IGIMF (xi_IGIMF_L = dN/d log_10 m)
# List_Log_M_str - natural logarithm
def function_draw_igimf(R14orNOT, SFR, alpha3_model, beta_model, delta_t, M_over_H, I_ecl, M_ecl_U, M_ecl_L,
I_str, M_str_L, alpha_1, alpha1_model, M_turn, alpha_2, alpha2_model, M_turn2, M_str_U):
if SFR != 0:
global List_M_ecl_for_xi_ecl, List_xi, List_M_str_for_xi_str, List_xi_L, List_Log_M_str, x_IMF, y_IMF, List_xi_str
function_ecmf(R14orNOT, SFR, beta_model, delta_t, I_ecl, M_ecl_U, M_ecl_L, M_over_H)
x_IMF = []
y_IMF = []
alpha_1_change = function_alpha_1_change(alpha_1, alpha1_model, M_over_H)
alpha_2_change = function_alpha_2_change(alpha_2, alpha2_model, M_over_H)
alpha_3_change = function_alpha_3_change(alpha3_model, List_M_ecl_for_xi_ecl[-1], M_over_H)
function_draw_xi_str(M_str_L, List_M_ecl_for_xi_ecl[-1], I_str, M_str_L, alpha_1_change,
M_turn, alpha_2_change, M_turn2, alpha_3_change, M_str_U)
maximum_number_of_mass_grid = function_maximum_number_of_mass_grid(M_str_L, M_str_U)
List_xi = [1e-10] * maximum_number_of_mass_grid
List_M_str_for_xi_str = [M_str_U] * maximum_number_of_mass_grid
List_xi_str = [1e-10] * maximum_number_of_mass_grid
number_of_ecl = len(List_M_ecl_for_xi_ecl) - 1
function_IMF(alpha3_model, M_over_H, I_str, M_str_L, alpha_1_change, M_turn, alpha_2_change, M_turn2, M_str_U,
number_of_ecl, 0)
x_IMF = []
y_IMF = []
function_draw_xi_str(M_str_L, List_M_ecl_for_xi_ecl[-1], I_str, M_str_L, alpha_1_change,
M_turn, alpha_2_change, M_turn2, alpha_3_change, M_str_U)
for i in range(len(x_IMF)):
List_M_str_for_xi_str[i] = x_IMF[i]
lenth = len(List_M_str_for_xi_str)
List_xi_L = [0] * lenth
List_Log_M_str = [0] * lenth
function_xi_to_xiL(lenth - 1, List_xi[0])
for eee in range(len(List_M_str_for_xi_str)):
if List_M_str_for_xi_str[eee] == M_str_U:
List_xi[eee] = List_xi[eee-1]
List_M_str_for_xi_str[eee] = List_M_str_for_xi_str[eee-1]
List_xi_str[eee] = List_xi_str[eee-1]
else:
List_M_str_for_xi_str = [0, 1000]
List_xi = [0, 0]
return
# function_ecmf computes IMF of star clusters (i.e. ECMF - embedded cluster mass function)
# The assumed shape of ECMF is single powerlaw with slope beta (function of SFR)
# the empirical lower limit for star cluster mass is 5 Msun
# the hypotetical upper mass limit is 10^9 Msun, but the M_ecl^max is computed, eq (12) in Yan et al. 2017
def function_ecmf(R14orNOT, SFR, beta_model, delta_t, I_ecl, M_ecl_U, M_ecl_L, M_over_H):
global List_M_ecl_for_xi_ecl, List_xi_ecl, x_ECMF, y_ECMF
x_ECMF = []
y_ECMF = []
if R14orNOT == True:
beta_change = 2
else:
beta_change = function_beta_change(beta_model, SFR, M_over_H)
function_draw_xi_ecl(R14orNOT, M_ecl_L, SFR, delta_t, I_ecl, M_ecl_U, M_ecl_L, beta_change)
List_M_ecl_for_xi_ecl = x_ECMF
del List_M_ecl_for_xi_ecl[0]
del List_M_ecl_for_xi_ecl[-1]
List_xi_ecl = y_ECMF
del List_xi_ecl[0]
del List_xi_ecl[-1]
return
# function_IMF computes stellar IMF in individual embedded star clusters
def function_IMF(alpha3_model, M_over_H, I_str, M_str_L, alpha_1_change, M_turn, alpha_2_change, M_turn2, M_str_U,
number_of_ecl, i):
while i < number_of_ecl:
global List_M_str_for_xi_str, List_xi_str, List_M_ecl_for_xi_ecl, x_IMF, y_IMF
x_IMF = []
y_IMF = []
M_ecl = List_M_ecl_for_xi_ecl[i]
alpha_3_change = function_alpha_3_change(alpha3_model, M_ecl, M_over_H)
# Here only alpha_3_change is recalculated as alpha1(2)_change do not depend on M_ecl thus do not change.
function_draw_xi_str(M_str_L, M_ecl, I_str, M_str_L, alpha_1_change, M_turn, alpha_2_change, M_turn2,
alpha_3_change, M_str_U)
for qqq in range(min(len(x_IMF), len(List_M_str_for_xi_str))):
List_M_str_for_xi_str[qqq] = x_IMF[qqq]
for www in range(min(len(y_IMF), len(List_xi_str))):
List_xi_str[www] = y_IMF[www]
number_of_str = len(List_M_str_for_xi_str)
function_update_list_xi(i, number_of_str, 0)
(i) = (i+1)
return
def function_update_list_xi(i, number_of_str, j):
while j < number_of_str:
global List_xi, List_xi_str, List_xi_ecl, List_M_ecl_for_xi_ecl
List_xi[j] += List_xi_str[j] * List_xi_ecl[i] * (List_M_ecl_for_xi_ecl[i+1] - List_M_ecl_for_xi_ecl[i])
(j) = (j+1)
return
def function_xi_to_xiL(i, unit):
global List_xi_L, List_xi, List_M_str_for_xi_str, List_Log_M_str
while i > -1:
if List_xi[i] == 0:
List_xi[i] = 10**(-5)
List_xi_L[i] = math.log((List_xi[i] * math.log(10) * List_M_str_for_xi_str[i] / unit * 1800), 10)
List_Log_M_str[i] = math.log(List_M_str_for_xi_str[i] , 10)
(i) = (i-1)
return
############ OSGIMF #############
# -----------------------------------------------------------------------------------------
# initialization of open-length arrays
# -----------------------------------------------------------------------------------------
List_M_str_all_i = []
List_n_str_all_i = []
List_mass_grid_x_axis = []
List_star_number_in_mass_grid_y_axis = []
List_star_number_in_mass_grid_y_axis2 = []
List_star_number_in_mass_grid_y_axis3 = []
List_star_number_in_mass_grid_y_axis4 = []
List_mass_grid = []
List_star_number_in_mass_grid = []
# -----------------------------------------------------------------------------------------
# This function gives the stellar masses in entire galaxy in unsorted manner
# i.e. the stars are grouped in parent clusters
def sample_for_one_epoch(R14orNOT, SFR, alpha3_model, delta_t, I_ecl, M_ecl_U, M_ecl_L, beta_model,
I_str, M_str_L, alpha_1, alpha1_model, M_turn, alpha_2, alpha2_model, M_turn2, M_over_H, M_str_U):
global List_M_str_all_i, List_n_str_all_i, list_M_ecl_i
beta_change = function_beta_change(beta_model, SFR, M_over_H)
function_sample_cluster(R14orNOT, SFR, delta_t, I_ecl, M_ecl_U, M_ecl_L, beta_change)
len_of_M_ecl_list = len(list_M_ecl_i)
List_M_str_all_i = []
List_n_str_all_i = []
function_sample_star_from_clusters(alpha3_model, I_str, M_str_L, alpha_1, alpha1_model, M_turn, alpha_2, alpha2_model,
M_turn2, M_over_H, M_str_U, len_of_M_ecl_list, 0)
return
# Masses of formed clusters
def function_sample_cluster(R14orNOT, SFR, delta_t, I_ecl, M_ecl_U, M_ecl_L, beta_change):
global list_m_ecl_i, list_n_ecl_i, list_M_ecl_i, M_max_ecl
list_m_ecl_i = []
list_n_ecl_i = []
list_M_ecl_i = []
M_max_ecl = 0
function_sample_from_ecmf(R14orNOT, SFR, delta_t, I_ecl, M_ecl_U, M_ecl_L, beta_change)
return
# Stellar masses in a given star cluster
def function_sample_star_from_clusters(alpha3_model, I_str, M_str_L, alpha_1, alpha1_model, M_turn, alpha_2, alpha2_model,
M_turn2, M_over_H, M_str_U, len_of_M_ecl_list, i):
while i < len_of_M_ecl_list: # sample a total number of i clusters
global List_M_str_all_i, List_n_str_all_i, list_m_str_i, list_n_str_i, list_M_str_i
list_m_str_i = []
list_n_str_i = []
list_M_str_i = []
alpha_1_change = function_alpha_1_change(alpha_1, alpha1_model, M_over_H)
alpha_2_change = function_alpha_2_change(alpha_2, alpha2_model, M_over_H)
alpha_3_change = function_alpha_3_change(alpha3_model, list_M_ecl_i[i], M_over_H)
function_sample_from_imf(list_M_ecl_i[i],
I_str, M_str_L, alpha_1_change, M_turn, alpha_2_change, M_turn2, alpha_3_change, M_str_U)
List_M_str_all_i += [list_M_str_i] # save all i clusters in "all_i" list
List_n_str_all_i += [list_n_str_i]
(i) = (i+1)
return
##################################################################################
## The sampling is finished here. Below are just sorting, binning, and plotting.##
##################################################################################
# Now star mass are recorded in individual star clusters in the "List_M_str_all_i" and "List_n_str_all_i"
# we have for the whole galaxy: cluster mass, number of cluster with certain mass
# and for each cluster: star mass, number of stars with certain mass
# Sort out all star mass in a epoch into a mass grid
# THe main purpose here is to sort the stellar masses and preparation for plotting output
def function_draw(SFR, M_str_low, M_str_up, M_ecl_low, resolution_histogram_relative):
M_low = min(M_str_low, M_ecl_low)
global List_mass_grid, List_star_number_in_mass_grid, List_mass_grid_x_axis, List_star_number_in_mass_grid_y_axis
# for all stars
List_mass_grid = []
function_mass_grid(SFR, M_str_up, M_low, resolution_histogram_relative)
List_mass_grid += [M_low]
List_star_number_in_mass_grid = [0] * (len(List_mass_grid) - 1)
function_sort_out_star_mass(0)
##########
List_mass_grid_x_axis = [M_str_up]
make_mass_grid_x_axis(1)
List_mass_grid_x_axis += [M_low]
List_star_number_in_mass_grid_y_axis = []
make_star_number_in_mass_grid_y_axis(0)
List_mass_grid_x_axis = [List_mass_grid_x_axis[0]] + List_mass_grid_x_axis
List_mass_grid_x_axis += [List_mass_grid_x_axis[-1]]
List_star_number_in_mass_grid_y_axis = [0.0000001] + List_star_number_in_mass_grid_y_axis
List_star_number_in_mass_grid_y_axis += [0.0000001]
# for most massive star
global List_mass_grid2, List_star_number_in_mass_grid2, List_mass_grid_x_axis2, List_star_number_in_mass_grid_y_axis2
List_mass_grid2 = List_mass_grid
List_star_number_in_mass_grid2 = [0] * (len(List_mass_grid2) - 1)
function_sort_out_star_mass2(0)
##########
List_star_number_in_mass_grid_y_axis2 = []
make_star_number_in_mass_grid_y_axis2(0)
List_star_number_in_mass_grid_y_axis2 = [0.0000001] + List_star_number_in_mass_grid_y_axis2
List_star_number_in_mass_grid_y_axis2 += [0.0000001]
###################################
global List_mass_grid3, List_star_number_in_mass_grid3, List_mass_grid_x_axis3, List_star_number_in_mass_grid_y_axis3
List_mass_grid3 = List_mass_grid
List_star_number_in_mass_grid3 = [0] * (len(List_mass_grid3) - 1)
function_sort_out_star_mass3(0)
##########
List_star_number_in_mass_grid_y_axis3 = []
make_star_number_in_mass_grid_y_axis3(0)
List_star_number_in_mass_grid_y_axis3 = [0.0000001] + List_star_number_in_mass_grid_y_axis3
List_star_number_in_mass_grid_y_axis3 += [0.0000001]
###################################
global List_mass_grid4, List_star_number_in_mass_grid4, List_mass_grid_x_axis4, List_star_number_in_mass_grid_y_axis4
List_mass_grid4 = List_mass_grid
List_star_number_in_mass_grid4 = [0] * (len(List_mass_grid4) - 1)
function_sort_out_star_mass4(0)
##########
List_star_number_in_mass_grid_y_axis4 = []
make_star_number_in_mass_grid_y_axis4(0)
List_star_number_in_mass_grid_y_axis4 = [0.0000001] + List_star_number_in_mass_grid_y_axis4
List_star_number_in_mass_grid_y_axis4 += [0.0000001]
return
### make a mass grid ###
def function_mass_grid(SFR, mass, M_str_low, resolution_histogram_relative):
while mass > M_str_low:
global List_mass_grid
List_mass_grid += [mass]
(mass) = (mass * (1-resolution_histogram_relative))
# we find it is useful to use the following form of mass grid sometimes.
# One can apply this alternative form by quote the above line
# (add a # in front of the line) and unquote the below two lines:
# (mass) = (mass * (0.967 + math.log(SFR, 10) / 400) / (math.log(mass + 1) ** 2 /
# (2 ** (math.log(SFR, 10) + 6.85) - 1) + 1))
return
# count the number of star in each grid
ls = 0
def function_sort_out_star_mass(i):
while i < len(List_M_str_all_i):
global ls
ls = 0
subfunction_sort_out(i, 0)
(i) = (i+1)
return
def function_sort_out_star_mass2(i):
while i < len(List_M_str_all_i):
global ls
ls = 0
subfunction_sort_out2(i, 0)
(i) = (i+1)
return
def function_sort_out_star_mass3(i):
while i < len(List_M_str_all_i):
global ls
ls = 0
subfunction_sort_out3(i, 1)
(i) = (i+1)
return
def function_sort_out_star_mass4(i):
while i < len(List_M_str_all_i):
global ls
ls = 0
subfunction_sort_out4(i, 2)
(i) = (i+1)
return
def subfunction_sort_out(i, j):
while j < len(List_M_str_all_i[i]):
global ls, List_n_str_all_i
function_find_k(i, j, ls)
List_star_number_in_mass_grid[ls] += List_n_str_all_i[i][j] * list_n_ecl_i[i]
(j) = (j+1)
return
def subfunction_sort_out2(i, j):
if j < len(List_M_str_all_i[i]):
global ls
function_find_k(i, j, ls)
List_star_number_in_mass_grid2[ls] += list_n_ecl_i[i]
return
def subfunction_sort_out3(i, j):
if j < len(List_M_str_all_i[i]):
global ls
function_find_k(i, j, ls)
List_star_number_in_mass_grid3[ls] += list_n_ecl_i[i]
return
def subfunction_sort_out4(i, j):
if j < len(List_M_str_all_i[i]):
global ls
function_find_k(i, j, ls)
List_star_number_in_mass_grid4[ls] += list_n_ecl_i[i]
return
def function_find_k(i, j, k):
while List_mass_grid[k+1] > List_M_str_all_i[i][j]:
global ls
ls = k+1
(k) = (k+1)
return
# prepare for the breaking line plot
def make_mass_grid_x_axis(i):
global List_mass_grid_x_axis, List_mass_grid
while i < len(List_mass_grid)-1:
List_mass_grid_x_axis += [List_mass_grid[i]]*2
(i) = (i+1)
return
def make_star_number_in_mass_grid_y_axis(i):
global List_star_number_in_mass_grid_y_axis, List_star_number_in_mass_grid, List_mass_grid
while i < len(List_star_number_in_mass_grid):
List_star_number_in_mass_grid_y_axis += [List_star_number_in_mass_grid[i]/(List_mass_grid[i] -
List_mass_grid[i+1])]*2
(i) = (i+1)
return
def make_star_number_in_mass_grid_y_axis2(i):
global List_star_number_in_mass_grid_y_axis2, List_star_number_in_mass_grid2, List_mass_grid2
while i < len(List_star_number_in_mass_grid2):
List_star_number_in_mass_grid_y_axis2 += [List_star_number_in_mass_grid2[i]/(List_mass_grid2[i] -
List_mass_grid2[i+1])]*2
(i) = (i+1)
return
def make_star_number_in_mass_grid_y_axis3(i):
global List_star_number_in_mass_grid_y_axis3, List_star_number_in_mass_grid3, List_mass_grid3
while i < len(List_star_number_in_mass_grid3):
List_star_number_in_mass_grid_y_axis3 += [List_star_number_in_mass_grid3[i]/(List_mass_grid3[i] -
List_mass_grid3[i+1])]*2
(i) = (i+1)
return
def make_star_number_in_mass_grid_y_axis4(i):
global List_star_number_in_mass_grid_y_axis4, List_star_number_in_mass_grid4, List_mass_grid4
while i < len(List_star_number_in_mass_grid4):
List_star_number_in_mass_grid_y_axis4 += [List_star_number_in_mass_grid4[i]/(List_mass_grid4[i] -
List_mass_grid4[i+1])]*2
(i) = (i+1)
return
def function_make_drop_line1(i):
while i < len(List_star_number_in_mass_grid_y_axis)-1:
if List_star_number_in_mass_grid_y_axis[i] == 0:
List_star_number_in_mass_grid_y_axis[i] = 0.0000001
(i) = (i+1)
def function_make_drop_line2(i):
while i < len(List_star_number_in_mass_grid_y_axis2)-1:
if List_star_number_in_mass_grid_y_axis2[i] == 0:
List_star_number_in_mass_grid_y_axis2[i] = 0.0000001
(i) = (i+1)
def function_make_drop_line3(i):
while i < len(List_star_number_in_mass_grid_y_axis3)-1:
if List_star_number_in_mass_grid_y_axis3[i] == 0:
List_star_number_in_mass_grid_y_axis3[i] = 0.0000001
(i) = (i+1)
def function_make_drop_line4(i):
while i < len(List_star_number_in_mass_grid_y_axis4)-1:
if List_star_number_in_mass_grid_y_axis4[i] == 0:
List_star_number_in_mass_grid_y_axis4[i] = 0.0000001
(i) = (i+1)
def function_make_drop_line():
function_make_drop_line1(0)
function_make_drop_line2(0)
function_make_drop_line3(0)
function_make_drop_line4(0)
return
######################## histogram ########################
mass_range_center = []
mass_range = []
mass_range_upper_limit = []
mass_range_lower_limit = []
star_number = []
def function_draw_histogram():
global mass_range_center, mass_range, mass_range_upper_limit, mass_range_lower_limit, star_number
mass_range_center = []
i = 0
while i < len(List_mass_grid) - 1:
mass_range_center += [
0.5 * (List_mass_grid[i] + List_mass_grid[i + 1])]
i = i + 1
mass_range = []
i = 0
while i < len(List_mass_grid) - 1:
mass_range += [List_mass_grid[i] - List_mass_grid[i + 1]]
i = i + 1
mass_range_upper_limit = []
i = 0
while i < len(List_mass_grid):
mass_range_upper_limit += [List_mass_grid[i]]
i = i + 1
mass_range_lower_limit = []
i = 0
while i < len(List_mass_grid) - 1:
mass_range_lower_limit += [List_mass_grid[i + 1]]
i = i + 1
star_number = List_star_number_in_mass_grid + []
return
############## IMF #################
# use equations in "supplementary-document-galimf.pdf"
# The star mass resolution is the lower resolution among "relative resolution" and "absolute resolution" where
# the relative resolution = star mass * resolution_star_relative
# the absolute resolution = resolution_star_absolute
resolution_star_relative = 0.01
resolution_star_absolute = 0.01
mass_grid_index = 1.01
list_m_str_i = []
list_n_str_i = []
list_M_str_i = []
def function_sample_from_imf(M_ecl, I_str, M_L, alpha_1, M_turn, alpha_2, M_turn2, alpha_3, M_U):
global list_m_str_i, list_n_str_i, list_M_str_i, M_max, M_max_function, k3, k2, k1, resolution_star_relative, resolution_star_absolute
M_max = 0
M_max_function = 0
function_M_max(M_ecl, I_str, M_L, alpha_1, M_turn, alpha_2, M_turn2, alpha_3, M_U)
k3 = 0
k2 = 0
k1 = 0
function_k321(I_str, alpha_1, M_turn, alpha_2, M_turn2, alpha_3, M_U)
list_m_str_i = []
list_n_str_i = []
function_m_i_str(k1, k2, k3, M_L, alpha_1, M_turn, alpha_2, M_turn2, alpha_3, M_max, resolution_star_relative, resolution_star_absolute) # equation 18
list_M_str_i = []
length_n = len(list_n_str_i)
function_M_i(k1, k2, k3, M_L, alpha_1, M_turn, alpha_2, M_turn2, alpha_3, M_U, length_n) # equation 20
del list_n_str_i[0]
return
# M_max is computed by solving simultaneously equations (3) and (4) from Yan et al 2017
def function_M_max(M_ecl, I_str, M_L, alpha_1, M_turn, alpha_2, M_turn2, alpha_3, M_U):
global M_max_function, M_max, M_max_function
M_constant = M_ecl * M_U ** (1 - alpha_3) / I_str / (1 - alpha_3) - M_turn2 ** (alpha_2 - alpha_3) * M_turn ** (
alpha_1 - alpha_2) * (M_turn ** (2 - alpha_1) - M_L ** (2 - alpha_1)) / (2 - alpha_1) - M_turn2 ** (
alpha_2 - alpha_3) * (M_turn2 ** (2 - alpha_2) - M_turn ** (
2 - alpha_2)) / (2 - alpha_2) + M_turn2 ** (2 - alpha_3) / (2 - alpha_3) # equation 16
function_M_max_1(M_constant, M_ecl, I_str, alpha_3, M_U, M_L, M_U/2, 10, -1) # equation 16
M_max_function = 1
if M_max < M_turn2:
M_constant2 = M_ecl * M_turn2 ** (1 - alpha_2) / I_str / (1 - alpha_2) + M_ecl * M_turn2 ** (
alpha_3 - alpha_2) * (M_U ** (
1 - alpha_3) - M_turn2 ** (1 - alpha_3)) / I_str / (1 - alpha_3) - M_turn ** (alpha_1 - alpha_2) * (
M_turn ** (2 - alpha_1) - M_L ** (
2 - alpha_1)) / (2 - alpha_1) + M_turn ** (2 - alpha_2) / (2 - alpha_2) # equation 25
function_M_max_2(M_constant2, M_ecl, I_str, alpha_2, M_U, M_L, 0.75, 0.1, -1) # equation 25
M_max_function = 2
if M_max < M_turn:
M_constant3 = M_ecl * M_turn ** (1 - alpha_1) / I_str / (1 - alpha_1) + M_ecl * M_turn ** (
alpha_2 - alpha_1) * (M_turn2 ** (
1 - alpha_2) - M_turn ** (1 - alpha_2)) / I_str / (1 - alpha_2) + M_ecl * M_turn2 ** (
alpha_3 - alpha_2) * M_turn ** (
alpha_2 - alpha_1) * (M_U ** (1 - alpha_3) - M_turn2 ** (1 - alpha_3)) / I_str / (1 - alpha_3) + M_L ** (
2 - alpha_1) / (2 - alpha_1) # equation 29
function_M_max_3(M_constant3, M_ecl, I_str, alpha_1, M_U, M_L, 100, 10, -1) # equation 29
M_max_function = 3
if M_max < M_L:
M_max_function = 0
print("M_max < M_L")
return
def function_k321(I_str, alpha_1, M_turn, alpha_2, M_turn2, alpha_3, M_U):
global M_max_function, k3, k2, k1, M_max
if M_max_function == 1:
k3 = I_str*(1-alpha_3)/(M_U**(1-alpha_3)-M_max**(1-alpha_3))
# equation 14
elif M_max_function == 2:
k3 = I_str/(M_turn2**(alpha_2-alpha_3)*(M_turn2**(1-alpha_2)-M_max**(1-alpha_2))/(1-alpha_2) + (
M_U**(1-alpha_3)-M_turn2**(1-alpha_3))/(1-alpha_3))
# equation 23
elif M_max_function == 3:
k3 = I_str/(M_turn2**(alpha_2-alpha_3) * M_turn**(alpha_1-alpha_2) * (M_turn**(1-alpha_1)-M_max**(1-alpha_1)) / (
1-alpha_1) + M_turn2**(alpha_2-alpha_3)*(M_turn2**(1-alpha_2)-M_turn**(1-alpha_2))/(1-alpha_2) + (M_U**(
1-alpha_3)-M_turn2**(1-alpha_3))/(1-alpha_3))
# equation 27
else:
print("function_M_max went wrong")
return
k2 = k3*M_turn2**(alpha_2-alpha_3) # equation 2
k1 = k2*M_turn**(alpha_1-alpha_2) # equation 2
return
def function_M_max_1(M_constant, M_ecl, I_str, alpha_3, M_U, M_L, m_1, step, pm): # equation 16
m_1 = round(m_1, 10) # round
M_x = m_1**(2-alpha_3)/(2-alpha_3) + M_ecl*m_1**(1-alpha_3)/I_str/(1-alpha_3)
while abs(M_x-M_constant) > abs(M_constant) * 10 ** (-50) and m_1 > 1 and step > 0.00000001:
if m_1 - step <= M_L or m_1 + step >= M_U:
step = step / 2
elif M_x > M_constant and pm == -1:
m_1 = m_1 - step
pm = -1
M_x = m_1 ** (2 - alpha_3) / (2 - alpha_3) + M_ecl * m_1 ** (1 - alpha_3) / I_str / (1 - alpha_3)
elif M_x > M_constant and pm == 1:
m_1 = m_1 - step / 2
step = step / 2
pm = -1
M_x = m_1 ** (2 - alpha_3) / (2 - alpha_3) + M_ecl * m_1 ** (1 - alpha_3) / I_str / (1 - alpha_3)
elif M_x < M_constant and pm == 1:
m_1 = m_1 + step
pm = 1
M_x = m_1 ** (2 - alpha_3) / (2 - alpha_3) + M_ecl * m_1 ** (1 - alpha_3) / I_str / (1 - alpha_3)
elif M_x < M_constant and pm == -1:
m_1 = m_1 + step / 2
step = step / 2
pm = 1
M_x = m_1 ** (2 - alpha_3) / (2 - alpha_3) + M_ecl * m_1 ** (1 - alpha_3) / I_str / (1 - alpha_3)
global M_max
M_max = m_1
return
def function_M_max_2(M_constant2, M_ecl, I_str, alpha_2, M_U, M_L, m_1, step, pm): # equation 25
m_1 = round(m_1, 10) # round
M_x = m_1 ** (2 - alpha_2) / (2 - alpha_2) + M_ecl * m_1 ** (1 - alpha_2) / I_str / (1 - alpha_2)
while abs(M_x-M_constant2) > abs(M_constant2) * 10 ** (-7) and m_1 > 0.5 and step > 0.002:
if m_1 - step <= M_L or m_1 + step >= M_U:
step = step / 2
elif M_x > M_constant2 and pm == -1:
m_1 = m_1 - step
pm = -1
M_x = m_1 ** (2 - alpha_2) / (2 - alpha_2) + M_ecl * m_1 ** (1 - alpha_2) / I_str / (1 - alpha_2)
elif M_x > M_constant2 and pm == 1:
m_1 = m_1 - step / 2
step = step / 2
pm = -1
M_x = m_1 ** (2 - alpha_2) / (2 - alpha_2) + M_ecl * m_1 ** (1 - alpha_2) / I_str / (1 - alpha_2)
elif M_x < M_constant2 and pm == 1:
m_1 = m_1 + step
pm = 1
M_x = m_1 ** (2 - alpha_2) / (2 - alpha_2) + M_ecl * m_1 ** (1 - alpha_2) / I_str / (1 - alpha_2)
elif M_x < M_constant2 and pm == -1:
m_1 = m_1 + step / 2
step = step / 2
pm = 1
M_x = m_1 ** (2 - alpha_2) / (2 - alpha_2) + M_ecl * m_1 ** (1 - alpha_2) / I_str / (1 - alpha_2)
global M_max
M_max = m_1
return
def function_M_max_3(M_constant3, M_ecl, I_str, alpha_1, M_U, M_L, m_1, step, pm): # equation 29
m_1 = round(m_1, 10) # round
M_x = m_1 ** (2 - alpha_1) / (2 - alpha_1) + M_ecl * m_1 ** (1 - alpha_1) / I_str / (1 - alpha_1)
if abs(M_x-M_constant3) < abs(M_constant3) * 10 ** (-7) or step < 0.001:
global M_max
M_max = m_1
elif m_1 - step <= M_L or m_1 + step >= M_U:
function_M_max_3(M_constant3, M_ecl, I_str, alpha_1, M_U, M_L, m_1, step / 2, pm)
elif M_x > M_constant3 and pm == -1:
function_M_max_3(M_constant3, M_ecl, I_str, alpha_1, M_U, M_L, m_1 - step, step, -1)
elif M_x > M_constant3 and pm == 1:
function_M_max_3(M_constant3, M_ecl, I_str, alpha_1, M_U, M_L, m_1 - step / 2, step / 2, -1)
elif M_x < M_constant3 and pm == 1:
function_M_max_3(M_constant3, M_ecl, I_str, alpha_1, M_U, M_L, m_1 + step, step, 1)
elif M_x < M_constant3 and pm == -1:
function_M_max_3(M_constant3, M_ecl, I_str, alpha_1, M_U, M_L, m_1 + step / 2, step / 2, 1)
return
def function_m_i_str(k1, k2, k3, M_L, alpha_1, M_turn, alpha_2, M_turn2, alpha_3, M_max, resolution_star_relative, resolution_star_absolute): # equation 18
global list_m_str_i
if M_max > 100:
loop_m_i_first_three(k3, M_turn2, alpha_3, M_max, 0, resolution_star_relative, resolution_star_absolute, 0)
(m_str_i, n_str_i) = cross_M_turn(k3, k2, M_turn2, alpha_3, alpha_2, list_m_str_i[-1], resolution_star_relative, resolution_star_absolute)
loop_m_i(k2, M_turn, alpha_2, m_str_i, n_str_i, resolution_star_relative, resolution_star_absolute)
(m_str_i, n_str_i) = cross_M_turn(k2, k1, M_turn, alpha_2, alpha_1, list_m_str_i[-1], resolution_star_relative, resolution_star_absolute)
loop_m_i(k1, M_L, alpha_1, m_str_i, n_str_i, resolution_star_relative, resolution_star_absolute)
cross_M_L(k1, M_L, alpha_1, list_m_str_i[-1])
return
elif M_max > M_turn2:
loop_m_i(k3, M_turn2, alpha_3, M_max, 0, resolution_star_relative, resolution_star_absolute)
(m_str_i, n_str_i) = cross_M_turn(k3, k2, M_turn2, alpha_3, alpha_2, list_m_str_i[-1], resolution_star_relative, resolution_star_absolute)
loop_m_i(k2, M_turn, alpha_2, m_str_i, n_str_i, resolution_star_relative, resolution_star_absolute)
(m_str_i, n_str_i) = cross_M_turn(k2, k1, M_turn, alpha_2, alpha_1, list_m_str_i[-1], resolution_star_relative, resolution_star_absolute)
loop_m_i(k1, M_L, alpha_1, m_str_i, n_str_i, resolution_star_relative, resolution_star_absolute)
cross_M_L(k1, M_L, alpha_1, list_m_str_i[-1])
return
elif M_max > M_turn:
loop_m_i(k2, M_turn, alpha_2, M_max, 0, resolution_star_relative, resolution_star_absolute)
(m_str_i, n_str_i) = cross_M_turn(k2, k1, M_turn, alpha_2, alpha_1, list_m_str_i[-1], resolution_star_relative, resolution_star_absolute)
loop_m_i(k1, M_L, alpha_1, m_str_i, n_str_i, resolution_star_relative, resolution_star_absolute)
cross_M_L(k1, M_L, alpha_1, list_m_str_i[-1])
return
else:
loop_m_i(k1, M_L, alpha_1, M_max, 0, resolution_star_relative, resolution_star_absolute)
cross_M_L(k1, M_L, alpha_1, list_m_str_i[-1])
return
def function_get_n_new_str(m_i, k, alpha, m_i_plus_n, n_i, resolution_star_relative, resolution_star_absolute):
while m_i - m_i_plus_n < max(resolution_star_relative * m_i, resolution_star_absolute):
n_new = round(n_i * mass_grid_index + 1)
m_i_plus_n_new = (m_i ** (1 - alpha) - n_new * (1 - alpha) / k) ** (1 / (1 - alpha))
(m_i_plus_n, n_i) = (m_i_plus_n_new, n_new)
return m_i_plus_n, n_i
def loop_m_i_first_three(k, M_low, alpha, m_i, n_i, resolution_star_relative, resolution_star_absolute, count):
while m_i > M_low:
global list_m_str_i, list_n_str_i, n_turn
list_m_str_i += [m_i]
list_n_str_i += [n_i]
m_i_plus_n = (m_i ** (1 - alpha) - n_i * (1 - alpha) / k) ** (1 / (1 - alpha))
if count < 3:
m_i_plus_n = (m_i ** (1 - alpha) - (1 - alpha) / k) ** (1 / (1 - alpha))
n_turn = n_i
(m_i, n_i, count) = (m_i_plus_n, 1, (count+1))
elif m_i - m_i_plus_n > max(resolution_star_relative * m_i, resolution_star_absolute):
n_turn = n_i
(m_i, n_i) = (m_i_plus_n, n_i)
else:
(m_i_plus_n_new, n_turn) = function_get_n_new_str(m_i, k, alpha, m_i_plus_n, n_i, resolution_star_relative, resolution_star_absolute)
(m_i, n_i) = (m_i_plus_n_new, n_turn)
def loop_m_i(k, M_low, alpha, m_i, n_i, resolution_star_relative, resolution_star_absolute):
while m_i > M_low:
global list_m_str_i, list_n_str_i, n_turn
list_m_str_i += [m_i]
list_n_str_i += [n_i]
a = m_i ** (1 - alpha) - n_i * (1 - alpha) / k
if a > 0:
b = 1 / (1 - alpha)
m_i_plus_n = a ** b
if m_i - m_i_plus_n > max(resolution_star_relative * m_i, resolution_star_absolute):
(m_i, n_i) = (m_i_plus_n, n_i)
else:
(m_i_plus_n_new, n_turn) = function_get_n_new_str(m_i, k, alpha, m_i_plus_n, n_i, resolution_star_relative, resolution_star_absolute)
(m_i, n_i) = (m_i_plus_n_new, n_turn)
else:
return
def cross_M_turn(k_before, k_after, M_cross, alpha_before, alpha_after, m_i, resolution_star_relative, resolution_star_absolute):
global n_turn
n_before = int(k_before/(1-alpha_before)*(m_i**(1-alpha_before)-M_cross**(1-alpha_before)))
m_before_cross = (m_i ** (1 - alpha_before) - n_before * (1 - alpha_before) / k_before) ** (1 / (1 - alpha_before))
a = (M_cross**(1-alpha_after)+k_before/k_after*(1-alpha_after)/(1-alpha_before)*(m_before_cross**(
1-alpha_before)-M_cross**(1-alpha_before))-(1-alpha_after)/k_after)
if a > 0:
m_after_cross = a ** (1/(1-alpha_after))
n_after = int(0.9*(n_turn - n_before - 1))
m_after_cross_plus_n_after = (m_after_cross ** (1 - alpha_after) - n_after * (1 - alpha_after) / k_after) ** (1 / (1 - alpha_after))
if m_i - m_after_cross_plus_n_after > max(resolution_star_relative * m_i, resolution_star_absolute):
return (m_after_cross_plus_n_after, n_before + 1 + n_after)
else:
(m_after_cross_plus_n_new, n_after_new) = function_get_n_new_str_cross(
m_i, m_after_cross, k_after, alpha_after, m_after_cross_plus_n_after, n_after, resolution_star_relative, resolution_star_absolute)
return (m_after_cross_plus_n_new, n_before + 1 + n_after_new)
else:
return (0, 0)
def function_get_n_new_str_cross(m_i, m_after_cross, k, alpha, m_after_cross_plus_n, n_i, resolution_star_relative, resolution_star_absolute):
while m_i - m_after_cross_plus_n < max(resolution_star_relative * m_i, resolution_star_absolute):
n_after_new = round(n_i * mass_grid_index + 1)
m_after_cross_plus_n_new = (m_after_cross ** (1 - alpha) - n_after_new * (1 - alpha) / k) ** (1 / (1 - alpha))
(m_after_cross_plus_n, n_i) = (m_after_cross_plus_n_new, n_after_new)
return m_after_cross_plus_n, n_i
def cross_M_L(k_1, M_L, alpha_1, m_i): # equation 21
global list_m_str_i, list_n_str_i
n_i = int(k_1 / (1 - alpha_1) * (m_i ** (1 - alpha_1) - M_L ** (1 - alpha_1)))
list_m_str_i += [M_L]
list_n_str_i += [n_i]
return
def function_M_i(k1, k2, k3, M_L, alpha_1, M_turn, alpha_2, M_turn2, alpha_3, M_U, length_n): # equation 20
global list_m_str_i, new_i, list_M_str_i, M_max, list_n_str_i
new_i = 0
if M_max > M_turn2:
loop_M_i(k3, M_turn2, alpha_3, new_i)
cross_M_turn2(k3, k2, M_turn2, alpha_3, alpha_2, new_i)
if new_i + 1 < len(list_m_str_i):
loop_M_i(k2, M_turn, alpha_2, new_i)
if list_n_str_i[new_i + 1] > 0:
cross_M_turn2(k2, k1, M_turn, alpha_2, alpha_1, new_i)
if new_i + 1 < len(list_m_str_i):
loop_M_i(k1, M_L, alpha_1, new_i)
if list_n_str_i[new_i+1] == 0:
return
else:
M_i = k1 / (2 - alpha_1) * (list_m_str_i[new_i] ** (2 - alpha_1) - list_m_str_i[new_i + 1] ** (2 - alpha_1)) / \
list_n_str_i[new_i + 1]
list_M_str_i += [M_i]
return
elif M_max > M_turn:
loop_M_i(k2, M_turn, alpha_2, new_i)
cross_M_turn2(k2, k1, M_turn, alpha_2, alpha_1, new_i)
loop_M_i(k1, M_L, alpha_1, new_i)
if list_n_str_i[new_i+1] == 0:
return
else:
M_i = k1 / (2 - alpha_1) * (list_m_str_i[new_i] ** (2 - alpha_1) - list_m_str_i[new_i + 1] ** (
2 - alpha_1)) / list_n_str_i[new_i + 1]
list_M_str_i += [M_i]
return
else:
loop_M_i(k1, M_L, alpha_1, new_i)
if list_n_str_i[new_i+1] == 0:
return
else:
M_i = k1 / (2 - alpha_1) * (list_m_str_i[new_i] ** (2 - alpha_1) - list_m_str_i[new_i + 1] ** (
2 - alpha_1)) / list_n_str_i[new_i + 1]
list_M_str_i += [M_i]
return
def loop_M_i(k, M_low, alpha, i):
global list_m_str_i, list_n_str_i, list_M_str_i, new_i
while list_m_str_i[i+1] > M_low:
M_i = k/(2-alpha)*(list_m_str_i[i]**(2-alpha)-list_m_str_i[i+1]**(2-alpha))/list_n_str_i[i+1]
list_M_str_i += [M_i]
new_i = i + 1
(i)=(new_i)
def cross_M_turn2(k_before, k_after, M_cross, alpha_before, alpha_after, i):
global list_m_str_i, list_n_str_i, list_M_str_i, new_i
M_i = k_before / (2 - alpha_before) * (list_m_str_i[i] ** (2 - alpha_before) - M_cross ** (2 - alpha_before)
) / list_n_str_i[i + 1] + k_after / (2 - alpha_after) * (M_cross ** (2 - alpha_after
) - list_m_str_i[i + 1] ** (2 - alpha_after)) / list_n_str_i[i + 1]
list_M_str_i += [M_i]
new_i = i + 1
return
################# draw IMF without sampling #################
# k_str is a normalization factor.
# The IMF is normalized to the total mass of the star cluster (M_ecl)
# The normalization is done by first calculate the M_max (with function function_M_max),
# then k_str (function_k321) as described by the Part I of supplementary-document-galimf.pdf
def k_str(M_ecl, I_str, M_L, alpha_1, M_turn, alpha_2, M_turn2, alpha_3, M_U):
global M_max, M_max_function, k3, k2, k1
M_max = 0
M_max_function = 0
function_M_max(M_ecl, I_str, M_L, alpha_1, M_turn, alpha_2, M_turn2, alpha_3, M_U)
k3 = 0
k2 = 0
k1 = 0
function_k321(I_str, alpha_1, M_turn, alpha_2, M_turn2, alpha_3, M_U)
return
x_IMF = []
y_IMF = []
def function_draw_xi_str(M_str_L, M_ecl, I_str, M_L, alpha_1, M_turn, alpha_2, M_turn2, alpha_3, M_U):
global x_IMF, y_IMF, k1, k2, k3, M_max
k_str(M_ecl, I_str, M_L, alpha_1, M_turn, alpha_2, M_turn2, alpha_3, M_U)
function_draw_xi_str_loop(M_str_L, alpha_1, M_turn, alpha_2, M_turn2, alpha_3)
return
def function_draw_xi_str_loop(M_str, alpha_1, M_turn, alpha_2, M_turn2, alpha_3):
global x_IMF, y_IMF, k1, k2, k3, M_max, mass_grid_index
while M_str < M_max:
x_IMF += [M_str]
if M_str > M_turn2:
xi = k3 * M_str ** (-alpha_3)
elif M_str > M_turn:
xi = k2 * M_str ** (-alpha_2)
else:
xi = k1 * M_str ** (-alpha_1)
y_IMF += [xi]
(M_str) = (mass_grid_index * M_str)
return
def function_maximum_number_of_mass_grid(M_str_min, M_str_max):
global mass_grid_index
maximum_number_of_mass_grid = 4
M_str = M_str_min
while M_str < M_max:
maximum_number_of_mass_grid += 1
(M_str) = (mass_grid_index * M_str)
return maximum_number_of_mass_grid
########### alpha ###########
def function_alpha_1_change(alpha_1, alpha1_model, M_over_H):
if (alpha1_model == 0):
return alpha_1
elif (alpha1_model == 1):
alpha_1_change = alpha_1 + 0.5 * M_over_H
return alpha_1_change
elif (alpha1_model == 'IGIMF2.5'):
alpha_1_change = alpha_1 + 0.12 * M_over_H
return alpha_1_change
elif (alpha1_model == 'Z'):
alpha_1_change = alpha_1 + 63 * (10**M_over_H - 1) * 0.0142
return alpha_1_change
else:
print("alpha1_model: %s, do not exist.\nCheck file 'alpha1.py'" % (alpha1_model))
return
def function_alpha_2_change(alpha_2, alpha2_model, M_over_H):
if (alpha2_model == 0):
return alpha_2
elif (alpha2_model == 1):
alpha_2_change = alpha_2 + 0.5 * M_over_H
return alpha_2_change
elif (alpha2_model == 'Z'):
alpha_2_change = alpha_2 + 63 * (10**M_over_H - 1) * 0.0142
if M_over_H>1:
print("Warning: Abnormally high gas metallicity leading to an unrealistic IMF shape according to the assumed variation law: alpha2_model == 'Z'. Please check your galaxy evolution settings or change to a different IMF variation assumption.")
return alpha_2_change
elif (alpha2_model == 'IGIMF2.5'):
alpha_2_change = alpha_2 + 0.12 * M_over_H
return alpha_2_change
elif (alpha2_model == 'R14'):
alpha_2_change = 2.3 + 0.0572 * M_over_H
return alpha_2_change
else:
print("alpha2_model: %s, do not exist.\nCheck file 'alpha2.py'" % (alpha2_model))
return
def function_alpha_3_change(alpha3_model, M_ecl, M_over_H):
if (alpha3_model == 0):
default_alpha3 = 2.3
# print("alpha_3 is set to be a constant: %s, as this is the default alpha_3 value for alpha3_model 0.\nFor more options regarding alpha_3 variation, please check file 'alpha3.py'" % (default_alpha3))
return default_alpha3
elif (alpha3_model == 1):
rho = 10 ** (0.61 * math.log(M_ecl, 10) + 2.85)
if rho < 9.5 * 10 ** 4:
alpha_3_change = 2.3
else:
alpha_3_change = 1.86 - 0.43 * math.log(rho / 10 ** 6, 10)
# print("Notification in file 'alpha3_model' uncompleted")
if alpha_3_change < 0.5:
print("IMF alpha_3 being", alpha_3_change, "out of the tested range from Marks et al. 2012.")
return alpha_3_change
elif (alpha3_model == 2):
rho = 10 ** (0.61 * math.log(M_ecl, 10) + 2.85)
x = -0.1405 * M_over_H + 0.99 * math.log(rho / 10 ** 6, 10)
if x < -0.87:
alpha_3_change = 2.3
else:
alpha_3_change = -0.41 * x + 1.94
# print("Notification in file 'alpha3_model' uncompleted")
return alpha_3_change
elif (alpha3_model == 'R14'):
alpha_3_change = 2.3 + 0.0572 * M_over_H
return alpha_3_change
else:
# print("alpha_3 is set to be a constant: %s, as this is the input value of parameter 'alpha3_model'.\nFor more options regarding alpha_3 variation, please check file 'alpha3.py'" % (alpha3_model))
return alpha3_model
########## ECMF #########
# This part gives the cluster masses according to file "supplementary-document-galimf.pdf".
# The code is only valid when SFR > 3 * 10^(-10) solar / year.
# Inputs:
# SFR,delta_t, I, M_U, M_L, \beta
# step 1
# use equation 13 or 17
# give first integration limit m_1 i.e. M_max_ecl
# step 2
# use equation 10 or 14
# give k
# step 3
# use equation 21
# give every integration limit m_i and the number of stars in this region n_i
# step 4
# use equation 22 or 23
# give every cluster mass M_i
# Outputs:
# list of star mass "list_M_ecl_i"
# and the number of star with each mass "list_n_ecl_i"
################### sample cluster from ECMF #####################
resolution_cluster_relative = 0.01 # The mass resolution of a embedded cluster with mass M is: M * resolution_cluster_relative.
list_m_ecl_i = []
list_n_ecl_i = []
list_M_ecl_i = []
M_max_ecl = 0
def function_sample_from_ecmf(R14orNOT, SFR, delta_t, I_ecl, M_U, M_L, beta):
global list_m_ecl_i, list_n_ecl_i, list_M_ecl_i, M_max_ecl, resolution_cluster_relative
M_tot = SFR * delta_t * 10**6 # units in Myr
if R14orNOT == True:
M_max_ecl = 10**(4.83+0.75*math.log(SFR, 10))
k = I_ecl / (1 / M_max_ecl - 1 / M_U) # equation 41
list_m_ecl_i = [M_max_ecl]
list_n_ecl_i = []
beta = 2
function_m_i_ecl(M_max_ecl, M_L, k, beta, 1) # equation 48
list_M_ecl_i = []
length_n = len(list_n_ecl_i)
function_M_i_2(k, 0, length_n) # equation 50
else:
if beta == 2:
M_max_ecl = 0
function_M_max_ecl_2(M_tot, I_ecl, M_U, M_L, 10**8, 10**7, -1) # equation 44
k = I_ecl / (1 / M_max_ecl - 1 / M_U) # equation 41
list_m_ecl_i = [M_max_ecl]
list_n_ecl_i = []
function_m_i_ecl(M_max_ecl, M_L, k, beta, 1) # equation 48
list_M_ecl_i = []
length_n = len(list_n_ecl_i)
function_M_i_2(k, 0, length_n) # equation 50
else:
M_max_ecl = 0
function_M_max_ecl_not_2(M_tot, I_ecl, M_U, M_L, beta, 10**8, 10**7, -1) # equation 40
k = I_ecl * (1 - beta) / (M_U ** (1 - beta) - M_max_ecl ** (1 - beta)) # equation 37
list_m_ecl_i = [M_max_ecl]
list_n_ecl_i = []
function_m_i_ecl(M_max_ecl, M_L, k, beta, 1) # equation 48
list_M_ecl_i = []
length_n = len(list_n_ecl_i)
function_M_i_not_2(k, beta, 0, length_n) # equation 49
return
def function_M_max_ecl_2(M_tot, I_ecl, M_U, M_L, m_1, step, pm): # equation 44
m_1 = round(m_1, 10) # round makes the code only valid when SFR > 3 * 10^(-10) solar / year
M_x = I_ecl * (math.log(m_1) - math.log(M_L)) / (1 / m_1 - 1 / M_U)
if M_tot * (1. + 10 ** (-5)) > M_x > M_tot * (1- 10 ** (-5)):
global M_max_ecl
M_max_ecl = m_1
elif m_1 - step < M_L or m_1 + step > M_U:
function_M_max_ecl_2(M_tot, I_ecl, M_U, M_L, m_1, step/10, pm)
elif M_x > M_tot and pm == -1:
function_M_max_ecl_2(M_tot, I_ecl, M_U, M_L, m_1 - step, step, -1)
elif M_x > M_tot and pm == 1:
function_M_max_ecl_2(M_tot, I_ecl, M_U, M_L, m_1 - step/10, step/10, -1)
elif M_x < M_tot and pm == 1:
function_M_max_ecl_2(M_tot, I_ecl, M_U, M_L, m_1 + step, step, 1)
elif M_x < M_tot and pm == -1:
function_M_max_ecl_2(M_tot, I_ecl, M_U, M_L, m_1 + step/10, step/10, 1)
def function_M_max_ecl_not_2(M_tot, I_ecl, M_U, M_L, beta, m_1, step, pm): # equation 40
m_1 = round(m_1, 10) # round makes the code only valid when SFR > 3 * 10^(-10) solar / year
M_x = I_ecl * (1 - beta) / (2 - beta) * (m_1 ** (2 - beta) - M_L ** (2 - beta)) / (
M_U ** (1 - beta) - m_1 ** (1 - beta))
if M_tot * (1.+10**(-5)) > M_x > M_tot * (1-10**(-5)):
global M_max_ecl
M_max_ecl = m_1
elif m_1 - step <= M_L or m_1 + step >= M_U:
function_M_max_ecl_not_2(M_tot, I_ecl, M_U, M_L, beta, m_1, step/2, pm)
elif M_x > M_tot and pm == -1:
function_M_max_ecl_not_2(M_tot, I_ecl, M_U, M_L, beta, m_1 - step, step, -1)
elif M_x > M_tot and pm == 1:
function_M_max_ecl_not_2(M_tot, I_ecl, M_U, M_L, beta, m_1 - step/2, step/2, -1)
elif M_x < M_tot and pm == 1:
function_M_max_ecl_not_2(M_tot, I_ecl, M_U, M_L, beta, m_1 + step, step, 1)
elif M_x < M_tot and pm == -1:
function_M_max_ecl_not_2(M_tot, I_ecl, M_U, M_L, beta, m_1 + step/2, step/2, 1)
def function_m_i_ecl(m_i, M_L, k, beta, n_i): # equation 48
while m_i > M_L:
global list_m_ecl_i, list_n_ecl_i, resolution_cluster_relative
m_i_plus_n = (m_i**(1-beta) - n_i * (1-beta) / k)**(1/(1-beta))
if m_i_plus_n < M_L:
list_m_ecl_i += [M_L]
n_L = int((m_i**(1-beta) - M_L**(1-beta)) * k / (1-beta))
if n_L == 0:
return
else:
list_n_ecl_i += [n_L]
return
elif m_i - m_i_plus_n > resolution_cluster_relative * m_i:
list_m_ecl_i += [m_i_plus_n]
list_n_ecl_i += [n_i]
(m_i, n_i) = (m_i_plus_n, n_i)
else:
(m_i_plus_n_new, n_new) = function_get_n_new_ecl(m_i, k, beta, m_i_plus_n, n_i)
list_m_ecl_i += [m_i_plus_n_new]
list_n_ecl_i += [n_new]
(m_i, n_i) = (m_i_plus_n_new, n_new)
return
def function_get_n_new_ecl(m_i, k, beta, m_i_plus_n, n_i):
while m_i - m_i_plus_n < resolution_cluster_relative * m_i:
n_new = round(n_i * mass_grid_index + 1)
m_i_plus_n_new = (m_i ** (1 - beta) - n_new * (1 - beta) / k) ** (1 / (1 - beta))
(m_i_plus_n, n_i) = (m_i_plus_n_new, n_new)
return m_i_plus_n, n_i
def function_M_i_2(k, i, length_n): # equation 50
while i < length_n:
global list_m_ecl_i, list_n_ecl_i, list_M_ecl_i
M_i = k * (math.log(list_m_ecl_i[i]) - math.log(list_m_ecl_i[i+1])) / list_n_ecl_i[i]
list_M_ecl_i += [M_i]
(i) = (i+1)
return
def function_M_i_not_2(k, beta, i, length_n): # equation 49
while i < length_n:
global list_m_ecl_i, list_n_ecl_i, list_M_ecl_i
M_i = k / (2-beta) * (list_m_ecl_i[i]**(2-beta)-list_m_ecl_i[i+1]**(2-beta)) / list_n_ecl_i[i]
list_M_ecl_i += [M_i]
(i) = (i+1)
return
################### draw ECMF without sampling #####################
# k_ecl is a normalization factor.
# The ECMF is normalized to the total mass of the cluster population in a 10 Myr star formation epoch (M_tot)
# That is M_tot = SFR [Msun/yr] * 10^7 [yr]
# The normalization is done by first calculate the M_max_ecl then k_ecl as described by the Part II of supplementary-document-galimf.pdf
def k_ecl(R14orNOT, M_ecl, SFR, delta_t, I_ecl, M_U, M_L, beta):
global M_max_ecl
M_tot = SFR * delta_t * 10 ** 6 # units in Myr
if R14orNOT == True:
M_max_ecl = 10 ** (4.83 + 0.75 * math.log(SFR, 10))
if M_max_ecl < 5:
M_max_ecl = 5
k = I_ecl / (1 / M_max_ecl - 1 / M_U) # equation 45
else:
if beta == 2:
M_max_ecl = 0
function_M_max_ecl_2(M_tot, I_ecl, M_U, M_L, 10**8, 10**7, -1) # equation 48
k = I_ecl / (1 / M_max_ecl - 1 / M_U) # equation 45
else:
M_max_ecl = 0
function_M_max_ecl_not_2(M_tot, I_ecl, M_U, M_L, beta, M_U/10, M_U/100, -1) # equation 44
k = I_ecl * (1 - beta) / (M_U ** (1 - beta) - M_max_ecl ** (1 - beta)) # equation 41
return k
x_ECMF = []
y_ECMF = []
def function_draw_xi_ecl(R14orNOT, M_ecl, SFR, delta_t, I_ecl, M_U, M_L, beta):
global x_ECMF, y_ECMF
k = k_ecl(R14orNOT, M_ecl, SFR, delta_t, I_ecl, M_U, M_L, beta)
function_draw_xi_ecl_loop(M_ecl, k, M_U, beta)
x_ECMF = [x_ECMF[0]] + x_ECMF
x_ECMF += [x_ECMF[-1]]
y_ECMF = [0.000000001] + y_ECMF
y_ECMF += [0.000000001]
return
def function_draw_xi_ecl_loop(M_ecl, k, M_U, beta):
global x_ECMF, y_ECMF, M_max_ecl
while M_ecl < M_max_ecl:
x_ECMF += [M_ecl]
xi = k * M_ecl ** (-beta)
y_ECMF += [xi]
(M_ecl) = (mass_grid_index * M_ecl)
return
########## beta ###########
def function_beta_change(beta_model, SFR, M_over_H):
if (beta_model == 0):
default_beta = 2.00000001
return default_beta
elif (beta_model == 1):
beta_change = -0.106 * math.log(SFR, 10) + 2.000001 #+ 0.5*M_over_H
if beta_change < 1.5:
beta_change = 1.5
elif beta_change > 2.5:
beta_change = 2.5
# print("ECMF-beta =", beta_change)
return beta_change
elif (beta_model == 2):
if SFR > 1:
beta_change = -0.106 * math.log(SFR, 10) + 2.00000001
else:
beta_change = 2.0000001
return beta_change
else:
return beta_model
| 53,974 | 40.841085 | 253 | py |
galIMF | galIMF-master/stellar_luminosity.py | # The function here gives the stellar bolometric luminosity relative to the sun [L_sun],
# assuming a simplified form using only the main-sequence luminosity as a function of mass [M_sun].
# See Yan et al. 2019 for details.
# The stellar luminosity should also be a function of Y_for_helium and Z_for_metal, which shall be added later.
def stellar_luminosity_function(mass):
if mass < 0.23 ** (1 / 1.7):
lum = 0.23 * mass ** 2.3
elif mass < 1.96:
lum = mass ** 4
elif mass < (32000 / 1.4) ** (1 / 2.5):
lum = 1.4 * mass ** 3.5
else:
lum = 32000 * mass
return lum
| 617 | 35.352941 | 111 | py |
galIMF | galIMF-master/plot_stellar_yield_table.py | import time
import math
import matplotlib.pyplot as plt
import numpy as np
from scipy import interpolate
import element_abundances_solar
reference_name = 'Anders1989'
H_abundances_solar = element_abundances_solar.function_solar_element_abundances(reference_name, 'H')
# He_abundances_solar = element_abundances_solar.function_solar_element_abundances(reference_name, 'He')
C_abundances_solar = element_abundances_solar.function_solar_element_abundances(reference_name, 'C')
# N_abundances_solar = element_abundances_solar.function_solar_element_abundances(reference_name, 'N')
O_abundances_solar = element_abundances_solar.function_solar_element_abundances(reference_name, 'O')
Mg_abundances_solar = element_abundances_solar.function_solar_element_abundances(reference_name, 'Mg')
Fe_abundances_solar = element_abundances_solar.function_solar_element_abundances(reference_name, 'Fe')
Si_abundances_solar = element_abundances_solar.function_solar_element_abundances(reference_name, 'Si')
Ca_abundances_solar = element_abundances_solar.function_solar_element_abundances(reference_name, 'Ca')
def plot_lifetime_and_finalmass():
Z2_list = [0.0004, 0.004, 0.008, 0.012]
file = open('yield_tables/rearranged/setllar_final_mass_from_portinari98/portinari98_Z=0.004.txt', 'r')
data = file.readlines()
file.close()
list2 = str.split(data[3])
list_ini_mass = []
for j in list2:
list_ini_mass.append(math.log(float(j), 10))
list_fin_mass = []
i = len(Z2_list) - 1
while i > -1:
file = open('yield_tables/rearranged/setllar_final_mass_from_portinari98/portinari98_Z={}.txt'.format(Z2_list[i]), 'r')
data = file.readlines()
file.close()
list2 = str.split(data[5])
list3 = []
for j in list2:
list3.append(math.log(float(j), 10))
list = [float(data[1]), list3]
list_fin_mass.append(list)
(i) = (i - 1)
color_list_ = []
for i in range(len(list_fin_mass)):
ZZZ = list_fin_mass[i][0]
Z_box = math.log(ZZZ, 10) - math.log(0.01886, 10)
color_list_.append(round(((Z_box+7)**4.001 - (-6.001 + 7) ** 4.001) / ((1 + 7) ** 4.001 - (-6.001 + 7) ** 4.001) * 1000))
colors = plt.cm.hsv_r(np.linspace(0, 1, 1000))
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(21, figsize=(4, 3.5))
# plt.xlim(-1.5, 2.5)
# plt.ylim(-1.5, 1.5)
# i = len(Z2_list) - 1
# while i > -1:
# plt.plot(list_ini_mass, list_fin_mass[i][1], label='Z={}'.format(list_fin_mass[i][0]))
# (i) = (i - 1)
# plt.plot([-2, 3], [-2, 3], ls='dashed', c='k', lw=0.7)
# plt.legend(prop={'size': 6}, loc='best')
# plt.xlabel(r'log$_{10}$($M_{\rm *, initial}$ [$M_\odot$])')
# plt.ylabel(r'log$_{10}$($M_{\rm *, final}$ [$M_\odot$])')
# plt.tight_layout()
# plt.savefig('Interpolated_stellar_final_mass.pdf', dpi=250)
list_lifetime = []
i = len(Z2_list) - 1
while i > -1:
file = open(
'yield_tables/rearranged/setllar_lifetime_from_portinari98/portinari98_Z={}.txt'.format(Z2_list[i]), 'r')
data = file.readlines()
file.close()
list2 = str.split(data[5])
list3 = []
for j in list2:
list3.append(math.log(float(j), 10))
list = [float(data[1]), list3]
list_lifetime.append(list)
(i) = (i - 1)
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(22, figsize=(4, 3.5))
# plt.xlim(-1.5, 2.5)
# plt.ylim(6, 15)
# i = len(Z2_list) - 1
# while i > -1:
# plt.plot(list_ini_mass, list_lifetime[i][1], label='Z={}'.format(list_fin_mass[i][0]))
# (i) = (i - 1)
# # plt.plot([-2, 3], [-2, 3], ls='dashed', c='k', lw=0.7)
# plt.legend(prop={'size': 6}, loc='best')
# plt.xlabel(r'log$_{10}$($M_{\rm *, initial}$ [$M_\odot$])')
# plt.ylabel(r'log$_{10}$(life time [yr])')
# plt.tight_layout()
# plt.savefig('Interpolated_stellar_lifetime.pdf', dpi=250)
##########
Metallicity_origen = [0.008, 0.02]
Age_origen = [
[6.47E+10, 3.54E+10, 2.09E+10, 1.30E+10, 8.46E+09, 5.72E+09, 4.12E+09, 2.92E+09, 2.36E+09, 2.18E+09, 1.82E+09,
1.58E+09, 1.41E+09, 1.25E+09, 1.23E+09, 6.86E+08, 4.12E+08, 1.93E+08, 1.15E+08, 7.71E+07, 5.59E+07, 3.44E+07,
2.10E+07, 1.49E+07, 1.01E+07, 6.65E+06, 5.30E+06, 4.15E+06, 3.44E+06, 3.32E+06],
[7.92E+10, 4.45E+10, 2.61E+10, 1.59E+10, 1.03E+10, 6.89E+09, 4.73E+09, 3.59E+09, 2.87E+09, 2.64E+09, 2.18E+09,
1.84E+09, 1.59E+09, 1.38E+09, 1.21E+09, 7.64E+08, 4.56E+08, 2.03E+08, 1.15E+08, 7.45E+07, 5.31E+07, 3.17E+07,
1.89E+07, 1.33E+07, 9.15E+06, 6.13E+06, 5.12E+06, 4.12E+06, 3.39E+06, 3.23E+06]]
Age_012 = []
for i in range(len(Age_origen[0])):
Age_012.append((Age_origen[0][i]*2+Age_origen[1][i])/3)
Remnant_mass_origen = [
[1.35, 1.48, 1.84, 2.04, 6.9, 12.5, 5.69, 9.89],
[1.31, 1.44, 1.87, 2.11, 7.18, 2.06, 2.09, 2.11]
]
Remnant_mass_012 = []
for i in range(len(Remnant_mass_origen[0])):
Remnant_mass_012.append((Remnant_mass_origen[0][i]*2+Remnant_mass_origen[1][i])/3)
Mass = [0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5,
1.6, 1.7, 1.8, 1.9, 2.0, 2.5, 3.0, 4.0, 5.0, 6.0,
7.0, 9.0, 12., 15., 20., 30., 40., 60., 100, 120]
Metallicity = [0.0004, 0.004, 0.008, 0.12]
Age = [
[4.28E+10, 2.37E+10, 1.41E+10, 8.97E+09, 6.03E+09, 4.23E+09, 3.08E+09, 2.34E+09, 1.92E+09, 1.66E+09, 1.39E+09,
1.18E+09, 1.11E+09, 9.66E+08, 8.33E+08, 4.64E+08, 3.03E+08, 1.61E+08, 1.01E+08, 7.15E+07, 5.33E+07, 3.42E+07,
2.13E+07, 1.54E+07, 1.06E+07, 6.90E+06, 5.45E+06, 4.20E+06, 3.32E+06, 3.11E+06],
[5.35E+10, 2.95E+10, 1.73E+10, 1.09E+10, 7.13E+09, 4.93E+09, 3.52E+09, 2.64E+09, 2.39E+09, 1.95E+09, 1.63E+09,
1.28E+09, 1.25E+09, 1.23E+09, 1.08E+09, 5.98E+08, 3.67E+08, 1.82E+08, 1.11E+08, 7.62E+07, 5.61E+07, 3.51E+07,
2.14E+07, 1.52E+07, 1.05E+07, 6.85E+06, 5.44E+06, 4.19E+06, 3.38E+06, 3.23E+06],
[6.47E+10, 3.54E+10, 2.09E+10, 1.30E+10, 8.46E+09, 5.72E+09, 4.12E+09, 2.92E+09, 2.36E+09, 2.18E+09, 1.82E+09,
1.58E+09, 1.41E+09, 1.25E+09, 1.23E+09, 6.86E+08, 4.12E+08, 1.93E+08, 1.15E+08, 7.71E+07, 5.59E+07, 3.44E+07,
2.10E+07, 1.49E+07, 1.01E+07, 6.65E+06, 5.30E+06, 4.15E+06, 3.44E+06, 3.32E+06],
Age_012]
len_mass = len(Mass)
log_Mass = []
for i in range(len_mass):
log_Mass.append(math.log(Mass[i], 10))
len_metal = len(Metallicity)
log_Metallicity = []
for i in range(len_metal):
log_Metallicity.append(math.log(Metallicity[i], 10))
log_Age = []
for i in range(len_metal):
log_Age.append([])
for j in range(len_mass):
log_Age[i].append(math.log(Age[i][j], 10))
fig, axs = plt.subplots(2, 1, sharex=True, figsize=(4, 4))
i = 0
while i < len(Z2_list):
ZZZ = list_fin_mass[i][0]
Z_box = round(math.log(ZZZ, 10)-math.log(0.01886, 10), 2)
axs[0].plot(list_ini_mass, list_lifetime[i][1], lw=(6-i)/2, label='Z={}, [Z]={}'.format(ZZZ, Z_box), color=colors[color_list_[i]])
(i) = (i + 1)
i = len_metal-1
# while i > -1:
# axs[0].scatter(log_Mass, log_Age[i], s=3, marker='*', edgecolors='w', linewidth='0.1', zorder=10)
# (i) = (i - 1)
axs[0].plot([-1, 2], [7, 7])
axs[0].plot([math.log(17, 10), math.log(17, 10)], [6, 15])
# axs[0].set_yticks(np.arange(6, 16, 2))
axs[0].set_ylim(6, 15)
axs[0].set_ylabel(r'log$_{10}$(life time [yr])')
axs[0].legend(prop={'size': 6}, loc='best')
Mass = [
[9, 12, 15, 20, 30, 40, 60, 100, 120],
[9, 12, 15, 20, 30, 40, 100, 120],
[9, 12, 15, 20, 30, 40, 60, 120],
[9, 12, 15, 20, 30, 40, 60, 120]
]
Metallicity = [0.0004, 0.004, 0.008, 0.12]
Remnant_mass = [
[1.35, 1.5, 1.8, 2.07, 6.98, 14.91, 24.58, 32.06, 30.6],
[1.35, 1.5, 1.82, 2.04, 6.98, 12.6, 36.7, 35.2],
[1.35, 1.48, 1.84, 2.04, 6.9, 12.5, 5.69, 9.89],
Remnant_mass_012
]
#################################################################
# WW95_solar = 0.01886
# Metallicity_WW95 = [0, WW95_solar*10**-4, WW95_solar*0.01, WW95_solar*0.1, WW95_solar]
# Mass_WW95 = [12, 13, 15, 18, 20, 22, 25, 30, 35, 40]
# Remnant_mass_WW95_B = [
# [1.32, 1.46, 1.43, 1.76, 2.06, 2.02, 2.07, 1.94, 3.86, 5.45],
# [1.38, 1.31, 1.49, 1.69, 1.97, 2.12, 1.99, 2.01, 3.39, 4.45],
# [1.40, 1.44, 1.56, 1.58, 1.98, 2.04, 1.87, 2.21, 2.42, 4.42],
# [1.28, 1.44, 1.63, 1.61, 1.97, 2.01, 1.87, 2.08, 3.03, 4.09],
# [1.35, 1.28, 1.53, 3.40, 4.12, 1.49, 1.90, 1.54, 7.62, 12.2]
# ]
# Interpolation_remnant_mass_WW95_B = interpolate.interp2d(Mass_WW95, Metallicity_WW95, Remnant_mass_WW95_B)
# Remnant_mass_WW95_B_new = []
# for i in range(len(Metallicity)):
# Remnant_mass_WW95_B_new.append([])
# for j in range(len(Mass_WW95)):
# Remnant_mass_WW95_B_new[i].append(Interpolation_remnant_mass_WW95_B(Mass_WW95[j], Metallicity[i]))
#
# log_Remnant_mass_WW95_B = []
# for i in range(len_metal):
# log_Remnant_mass_WW95_B.append([])
# for j in range(len(Remnant_mass_WW95_B[i])):
# log_Remnant_mass_WW95_B[i].append(math.log(Remnant_mass_WW95_B[i][j], 10))
#
# log_mass_WW95 = []
# for i in range(len(Mass_WW95)):
# log_mass_WW95.append(math.log(Mass_WW95[i], 10))
#################################################################
len_metal = len(Metallicity)
log_Metallicity = []
for i in range(len_metal):
log_Metallicity.append(math.log(Metallicity[i], 10))
log_Remnant_mass = []
for i in range(len_metal):
log_Remnant_mass.append([])
for j in range(len(Remnant_mass[i])):
log_Remnant_mass[i].append(math.log(Remnant_mass[i][j], 10))
log_mass = []
for i in range(len_metal):
log_mass.append([])
for j in range(len(Mass[i])):
log_mass[i].append(math.log(Mass[i][j], 10))
# print(log_mass)
# print(len(log_mass[0]))
# print(len(log_mass))
# print(len(log_Remnant_mass[0]))
i = 0
while i < len(Z2_list):
axs[1].plot(list_ini_mass, list_fin_mass[i][1], lw=(6-i)/2, label='Z={}'.format(list_fin_mass[i][0]), color=colors[color_list_[i]])
(i) = (i + 1)
i = len_metal-1
# while i > -1:
# axs[1].scatter(log_mass[i], log_Remnant_mass[i], s=10, marker='*', edgecolors='w', linewidth='0.1', zorder=10)
# (i) = (i - 1)
# i = len_metal-1
# # while i > -1:
# # axs[1].scatter(log_mass_WW95, log_Remnant_mass_WW95_B[i], s=10, marker='^', edgecolors='w', linewidth='0.1', zorder=10)
# # (i) = (i - 1)
axs[1].set_yticks(np.arange(-2, 2, 1))
axs[1].set_ylim(-1.5, 1.5)
axs[1].set_ylabel(r'log$_{10}(M_{\rm *, final}$ [$M_\odot$])')
axs[1].set_xlabel(r'log$_{10}(M_{\rm *, initial}$ [$M_\odot$])')
plt.tight_layout()
# Remove horizontal space between axes
fig.subplots_adjust(hspace=0)
plt.savefig('Interpolated_stellar_lifetime_final_mass.pdf', dpi=250)
plt.show()
return
def function_read_file(yield_table_name):
####################
### read in file ###
####################
if yield_table_name == "portinari98":
file_yield = open(
'yield_tables/agb_and_massive_stars_portinari98_marigo01_gce_totalyields.txt', 'r')
# 'yield_tables/agb_and_massive_stars_portinari98_marigo01.txt', 'r')
# Use net yields of Portinari and Marigo
# Net yields with masses up to 7Msun are from Marigo, above those of Portinari are taken.
# Only isotopes are selected which are available in both yield sets and go up to Fe.
# Initial masses go from the lowest mass available up to 100Msun.
# Yield set ID M01P98 in Ritter et al. 2017.
# References: Marigo et al. 2001, http://ukads.nottingham.ac.uk/abs/2001A%26A...370..194M
# Portinari et al. 1998, http://ukads.nottingham.ac.uk/abs/1998A%26A...334..505P
data = file_yield.readlines()
file_yield.close()
elif yield_table_name == "Kobayashi06":
file_yield = open(
'yield_tables/agb_and_massive_stars_Kobayashi06_marigo01_gce_totalyields.txt', 'r')
# Use net yields of Woosley S. E., Weaver T. A., 1995, ApJS, 101, 181 (WW95)
# Use WW95 model B which has the highest [Mg/Fe].
data = file_yield.readlines()
file_yield.close()
elif yield_table_name == "WW95":
file_yield = open(
'yield_tables/massive_stars_WW95_totalyields.txt', 'r')
# Use net yields of Woosley S. E., Weaver T. A., 1995, ApJS, 101, 181 (WW95)
# Use WW95 model B which has the highest [Mg/Fe].
data = file_yield.readlines()
file_yield.close()
elif yield_table_name == "marigo01":
file_yield = open(
'yield_tables/agb_marigo01_totalyields.txt', 'r')
data = file_yield.readlines()
file_yield.close()
###########################
### extract information ###
###########################
#
H_relative_line_number = function_get_element_line_number(data, 'H-1')
He_relative_line_number = function_get_element_line_number(data, 'He-4')
C_relative_line_number = function_get_element_line_number(data, 'C-12')
N_relative_line_number = function_get_element_line_number(data, 'N-14')
O_relative_line_number = function_get_element_line_number(data, 'O-16')
Ne_relative_line_number = function_get_element_line_number(data, 'Ne-20')
Mg_relative_line_number = function_get_element_line_number(data, 'Mg-24')
Si_relative_line_number = function_get_element_line_number(data, 'Si-28')
S_relative_line_number = function_get_element_line_number(data, 'S-32')
Ca_relative_line_number = function_get_element_line_number(data, 'Ca-40')
Fe_relative_line_number = function_get_element_line_number(data, 'Fe-56')
#
global M_list, Z_list, eject_mass_list, H_eject_mass_list, He_eject_mass_list, C_eject_mass_list, \
N_eject_mass_list, O_eject_mass_list, Ne_eject_mass_list, Mg_eject_mass_list, Si_eject_mass_list, \
S_eject_mass_list, Ca_eject_mass_list, Fe_eject_mass_list, Metal_eject_mass_list
global O_over_Mg_list, Mg_over_Fe_list, Ca_over_Fe_list, Si_over_Fe_list, C_over_H_list, Mg_over_H_list, \
Si_over_H_list, Fe_over_H_list, O_over_H_list, Z_over_H_list, \
Z_over_X_list, Z_over_Z0_list, XXX_list, YYY_list, ZZZ_list, O_over_Fe_list
#
i = len(data)-1
while i > -1:
line_i = str.split(data[i])
if line_i[1] == 'Table:':
line_H = str.split(data[i + H_relative_line_number])
line_He = str.split(data[i + He_relative_line_number])
line_C = str.split(data[i + C_relative_line_number])
line_N = str.split(data[i + N_relative_line_number])
line_O = str.split(data[i + O_relative_line_number])
line_Ne = str.split(data[i + Ne_relative_line_number])
line_Mg = str.split(data[i + Mg_relative_line_number])
line_Si = str.split(data[i + Si_relative_line_number])
line_S = str.split(data[i + S_relative_line_number])
line_Ca = str.split(data[i + Ca_relative_line_number])
line_Fe = str.split(data[i + Fe_relative_line_number])
line_Mfinal = str.split(data[i + 2])
(Z, M) = function_get_Z_M(line_i[2]) # metallicity and mass of the star
ejecta_mass = round((M - function_get_Mfinal(line_Mfinal[2])), 5) ####################
H_mass = function_get_element_mass(line_H[1])
He_mass = function_get_element_mass(line_He[1])
C_mass = function_get_element_mass(line_C[1])
N_mass = function_get_element_mass(line_N[1])
O_mass = function_get_element_mass(line_O[1])
Ne_mass = function_get_element_mass(line_Ne[1])
Mg_mass = function_get_element_mass(line_Mg[1])
Si_mass = function_get_element_mass(line_Si[1])
S_mass = function_get_element_mass(line_S[1])
Ca_mass = function_get_element_mass(line_Ca[1])
Fe_mass = function_get_element_mass(line_Fe[1])
H_num = H_mass/1.0079
C_num = C_mass/12.011
N_num = N_mass/14.007
O_num = O_mass/15.9994
Ne_num = Ne_mass/20.18
Mg_num = Mg_mass/24.305
Si_num = Si_mass/28.085
S_num = S_mass/32.06
Ca_num = Ca_mass/40.078
Fe_num = Fe_mass/55.845
Metal_num = C_num+N_num+O_num+Ne_num+Mg_num+Si_num+S_num+Ca_num+Fe_num
O_over_Mg = math.log(O_num/Mg_num, 10) - O_abundances_solar + Mg_abundances_solar
Mg_over_H = math.log(Mg_num/H_num, 10) - Mg_abundances_solar + H_abundances_solar
Si_over_H = math.log(Si_num/H_num, 10) - Si_abundances_solar + H_abundances_solar
C_over_H = math.log(C_num/H_num, 10) - C_abundances_solar + H_abundances_solar
Fe_over_H = math.log(Fe_num/H_num, 10) - Fe_abundances_solar + H_abundances_solar
O_over_H = math.log(O_num/H_num, 10) - O_abundances_solar + H_abundances_solar
Mg_over_Fe = math.log(Mg_num/Fe_num, 10) - Mg_abundances_solar + Fe_abundances_solar
Ca_over_Fe = math.log(Ca_num/Fe_num, 10) - Ca_abundances_solar + Fe_abundances_solar
Si_over_Fe = math.log(Si_num/Fe_num, 10) - Si_abundances_solar + Fe_abundances_solar
O_over_Fe = math.log(O_num/Fe_num, 10) - O_abundances_solar + Fe_abundances_solar
Metal_mass = round((ejecta_mass - H_mass - He_mass), 5) ####################
# Metal_mass = round((C_mass+N_mass+O_mass+Ne_mass+Mg_mass+Si_mass+S_mass+Ca_mass+Fe_mass), 5) ###### the same ######
if Metal_mass<0:
print("Warning: Metal_mass=", Metal_mass, "<0")
print("check stellar yield table with metallicity and mass being:", Z, "&", M)
Metal_mass = 0
Z_over_X = math.log(Metal_mass / H_mass, 10) - math.log(0.01886 / 0.7381, 10)
Z_over_Z0 = math.log(Metal_mass / ejecta_mass, 10) - math.log(0.01886, 10)
Z_over_H = math.log(Metal_num / H_num, 10) - math.log(0.01886 / 18 / 0.7381, 10) # where 18 is the estimated average atomic weight over the weight of hydrogen.
XXX = H_mass / ejecta_mass
YYY = He_mass / ejecta_mass
ZZZ = Metal_mass / ejecta_mass
if len(Z_list) == 0:
Z_list.append(Z)
Z_n = 0
M_list.append([])
eject_mass_list.append([])
H_eject_mass_list.append([])
He_eject_mass_list.append([])
C_eject_mass_list.append([])
N_eject_mass_list.append([])
O_eject_mass_list.append([])
Ne_eject_mass_list.append([])
Mg_eject_mass_list.append([])
Si_eject_mass_list.append([])
S_eject_mass_list.append([])
Ca_eject_mass_list.append([])
Fe_eject_mass_list.append([])
Metal_eject_mass_list.append([])
Z_over_H_list.append([])
Z_over_X_list.append([])
Z_over_Z0_list.append([])
XXX_list.append([])
YYY_list.append([])
ZZZ_list.append([])
O_over_Mg_list.append([])
Mg_over_Fe_list.append([])
Si_over_Fe_list.append([])
Ca_over_Fe_list.append([])
Mg_over_H_list.append([])
Si_over_H_list.append([])
C_over_H_list.append([])
Fe_over_H_list.append([])
O_over_H_list.append([])
O_over_Fe_list.append([])
if Z != Z_list[-1]:
Z_list.append(Z)
Z_n += 1
M_list.append([])
eject_mass_list.append([])
H_eject_mass_list.append([])
He_eject_mass_list.append([])
C_eject_mass_list.append([])
N_eject_mass_list.append([])
O_eject_mass_list.append([])
Ne_eject_mass_list.append([])
Mg_eject_mass_list.append([])
Si_eject_mass_list.append([])
S_eject_mass_list.append([])
Ca_eject_mass_list.append([])
Fe_eject_mass_list.append([])
Metal_eject_mass_list.append([])
O_over_Mg_list.append([])
Mg_over_Fe_list.append([])
Ca_over_Fe_list.append([])
Si_over_Fe_list.append([])
Mg_over_H_list.append([])
Si_over_H_list.append([])
C_over_H_list.append([])
Fe_over_H_list.append([])
O_over_H_list.append([])
Z_over_H_list.append([])
Z_over_X_list.append([])
Z_over_Z0_list.append([])
XXX_list.append([])
YYY_list.append([])
ZZZ_list.append([])
O_over_Fe_list.append([])
M_list[Z_n].append(M)
eject_mass_list[Z_n].append(ejecta_mass)
H_eject_mass_list[Z_n].append(H_mass)
He_eject_mass_list[Z_n].append(He_mass)
C_eject_mass_list[Z_n].append(C_mass)
N_eject_mass_list[Z_n].append(N_mass)
O_eject_mass_list[Z_n].append(O_mass)
Ne_eject_mass_list[Z_n].append(Ne_mass)
Mg_eject_mass_list[Z_n].append(Mg_mass)
Si_eject_mass_list[Z_n].append(Si_mass)
S_eject_mass_list[Z_n].append(S_mass)
Ca_eject_mass_list[Z_n].append(Ca_mass)
Fe_eject_mass_list[Z_n].append(Fe_mass)
Metal_eject_mass_list[Z_n].append(Metal_mass)
O_over_Mg_list[Z_n].append(O_over_Mg)
Mg_over_Fe_list[Z_n].append(Mg_over_Fe)
Ca_over_Fe_list[Z_n].append(Ca_over_Fe)
Si_over_Fe_list[Z_n].append(Si_over_Fe)
Mg_over_H_list[Z_n].append(Mg_over_H)
Si_over_H_list[Z_n].append(Si_over_H)
C_over_H_list[Z_n].append(C_over_H)
O_over_H_list[Z_n].append(O_over_H)
Z_over_H_list[Z_n].append(Z_over_H)
Z_over_X_list[Z_n].append(Z_over_X)
Z_over_Z0_list[Z_n].append(Z_over_Z0)
XXX_list[Z_n].append(XXX)
YYY_list[Z_n].append(YYY)
ZZZ_list[Z_n].append(ZZZ)
Fe_over_H_list[Z_n].append(Fe_over_H)
O_over_Fe_list[Z_n].append(O_over_Fe)
(i) = (i - 1)
return
def function_get_Mfinal(Mfinal_string):
i_end = len(Mfinal_string)
i = 0
mass_str = ''
while i < i_end:
mass_str += Mfinal_string[i]
(i) = (i + 1)
mass = float(mass_str)
return mass
def function_get_element_mass(element_mass_string):
i_end = len(element_mass_string)
i = 1
mass_str = ''
while i < i_end:
mass_str += element_mass_string[i]
(i) = (i + 1)
mass = float(mass_str)
return mass
def function_get_element_line_number(data, element):
i = 0
while i < len(data):
line_i = str.split(data[i])
if line_i[1] == 'Table:':
start = i
j = 0
while j < 100:
line_j = str.split(data[j])
if line_j[0] == '&'+element:
end = j
element_relative_line_number = j - i
break
(j) = (j+1)
break
(i) = (i + 1)
return element_relative_line_number
def function_get_Z_M(M_Z_string):
i = 0
i_M_start = 0
i_M_end = 0
i_Z_start = 0
i_Z_end = 0
while i < len(M_Z_string):
if M_Z_string[i] == 'M':
i_M_start = i+2
if M_Z_string[i] == ',':
i_M_end = i
i_Z_start = i+3
if M_Z_string[i] == ')':
i_Z_end = i
(i) = (i+1)
i = i_Z_start
Z_str = ''
while i < i_Z_end:
Z_str += M_Z_string[i]
(i) = (i + 1)
Z = float(Z_str)
i = i_M_start
M_str = ''
while i < i_M_end:
M_str += M_Z_string[i]
(i) = (i + 1)
M = float(M_str)
return (Z, M)
def funtion_plot_yields():
global O_over_Mg_list, Mg_over_Fe_list, C_over_H_list, Mg_over_H_list, Si_over_H_list, Fe_over_H_list, O_over_H_list, Z_over_X_list, Z_over_Z0_list, \
Z_over_H_list, O_over_Fe_list, M_list, Z_list, XXX_list, YYY_list, ZZZ_list
color_list_ = []
for i in range(len(Z_list)):
ZZZ = Z_list[i]
if ZZZ > 0:
Z_box = math.log(ZZZ, 10) - math.log(0.01886, 10)
else:
Z_box = -6
color_list_.append(round(((Z_box+7)**4.001 - (-6.001 + 7) ** 4.001) / ((1 + 7) ** 4.001 - (-6.001 + 7) ** 4.001) * 1000))
colors = plt.cm.hsv_r(np.linspace(0, 1, 1000))
j = 0
while j < len(M_list):
i = 0
while i < len(M_list[j]):
M_list[j][i] = math.log(M_list[j][i], 10)
(i) = (i+1)
(j) = (j+1)
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(1, figsize=(4, 3.5))
# plt.xlim(-0.5, 2.2)
# # plt.ylim(0, 2)
# i = len(M_list) - 1
# while i > -1:
# plt.plot(M_list[i], O_over_Mg_list[i], label='Z={}'.format(Z_list[i]))
# (i) = (i - 1)
# O_mass_eject_SNIa = 0.148 # TNH93 0.148 i99CDD1 0.09, i99CDD2 0.06, i99W7 0.14, ivo12/13 0.09-0.1, t03 0.14, t86 0.13
# Mg_mass_eject_SNIa = 0.009 # TNH93 0.009 i99CDD1 0.0077, i99CDD2 0.0042, i99W7 0.0085, ivo12/13 0.015-0.029, t03 0.013, t86 0.016
# O_num = O_mass_eject_SNIa / 15.9994
# Mg_num = Mg_mass_eject_SNIa / 24.305
# O_over_Mg_SNIa = math.log(O_num / Mg_num, 10) - O_abundances_solar + Mg_abundances_solar
# plt.plot([-0.3, 0.9], [O_over_Mg_SNIa, O_over_Mg_SNIa], ls="--", lw=2, label="SNIa")
# plt.legend(prop={'size': 6}, loc='best')
# plt.xlabel(r'log$_{10}$($M_{\rm *, initial}$/[$M_\odot$])')
# plt.ylabel(r'[O/Mg]')
# plt.tight_layout()
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(2, figsize=(4, 3.5))
# i = len(M_list) - 1
# while i > -1:
# plt.plot(M_list[i], Mg_over_Fe_list[i], label='Z={}'.format(Z_list[i]))
# (i) = (i - 1)
Mg_mass_eject_SNIa = 0.0158 # TNH93 0.148 i99CDD1 0.09, i99CDD2 0.06, i99W7 0.14, ivo12/13 0.09-0.1, t03 0.14, t86 0.13
Fe_mass_eject_SNIa = 0.68 #0.63 # Recchi2009 halfed to 0.372 # TNH93 0.744 i99CDD1 0.56, i99CDD2 0.76, i99W7 0.63, ivo12/13 0.62-0.67, t03 0.74, t86 0.63
Ca_mass_eject_SNIa = 0.0181
Si_mass_eject_SNIa = 0.142
Ca_num = Ca_mass_eject_SNIa / 40.078
Si_num = Si_mass_eject_SNIa / 28.085
Mg_num = Mg_mass_eject_SNIa / 24.305
Fe_num = Fe_mass_eject_SNIa / 55.845
Mg_over_Fe_SNIa = math.log(Mg_num / Fe_num, 10) - Mg_abundances_solar + Fe_abundances_solar
Si_over_Fe_SNIa = math.log(Si_num / Fe_num, 10) - Si_abundances_solar + Fe_abundances_solar
Ca_over_Fe_SNIa = math.log(Ca_num / Fe_num, 10) - Ca_abundances_solar + Fe_abundances_solar
# plt.plot([-0.3, 0.9], [Mg_over_Fe_SNIa, Mg_over_Fe_SNIa], ls="--", lw=2, label="SNIa")
# plt.plot([-2, 3], [0, 0], lw=0.5, ls='dotted')
# plt.xlim(-0.5, 2.2)
# plt.ylim(-2, 3.5)
# plt.legend(prop={'size': 6}, loc='best')
# plt.xlabel(r'log$_{10}$($M_{\rm *, initial}$ [$M_\odot$])')
# plt.ylabel(r'[Mg/Fe]')
# plt.tight_layout()
# plt.savefig('steller_yield_Mg_over_Fe.pdf', dpi=250)
#
#
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(3, figsize=(4, 3.5))
# plt.xlim(-0.5, 2.2)
# plt.ylim(-2, 7)
# i = len(M_list) - 1
# while i > -1:
# plt.plot(M_list[i], O_over_Fe_list[i], label='Z={}'.format(Z_list[i]))
# (i) = (i - 1)
# O_over_Fe_SNIa = math.log(O_num / Fe_num, 10) - O_abundances_solar + Fe_abundances_solar
# plt.plot([-0.3, 0.9], [O_over_Fe_SNIa, O_over_Fe_SNIa], ls="--", lw=2, label="SNIa")
# plt.legend(prop={'size': 6}, loc='best')
# plt.xlabel(r'log$_{10}$($M_{\rm *, initial}$/[$M_\odot$])')
# plt.ylabel(r'[O/Fe]')
# plt.tight_layout()
# #
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(4, figsize=(4, 3.5))
# plt.xlim(-0.5, 2.2)
# plt.ylim(-2, 2)
# i = len(M_list) - 1
# while i > -1:
# plt.plot(M_list[i], Mg_over_H_list[i], label='Z={}'.format(Z_list[i]))
# (i) = (i - 1)
# plt.plot([-2, 3], [0, 0], lw=0.1)
# plt.legend(prop={'size': 6}, loc='best')
# plt.xlabel(r'log$_{10}$($M_{\rm *, initial}$/[$M_\odot$])')
# plt.ylabel(r'[Mg/H]')
# plt.tight_layout()
# #
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(42, figsize=(4, 3.5))
# plt.xlim(-0.5, 2.2)
# plt.ylim(-2, 2)
# i = len(M_list) - 1
# while i > -1:
# plt.plot(M_list[i], Si_over_H_list[i], label='Z={}'.format(Z_list[i]))
# (i) = (i - 1)
# plt.plot([-2, 3], [0, 0], lw=0.1)
# plt.legend(prop={'size': 6}, loc='best')
# plt.xlabel(r'log$_{10}$($M_{\rm *, initial}$/[$M_\odot$])')
# plt.ylabel(r'[Si/H]')
# plt.tight_layout()
# #
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(41, figsize=(4, 3.5))
# plt.xlim(-0.5, 2.2)
# # plt.ylim(-2, 2)
# i = len(M_list) - 1
# while i > -1:
# plt.plot(M_list[i], C_over_H_list[i], label='Z={}'.format(Z_list[i]))
# (i) = (i - 1)
# plt.plot([-2, 3], [0, 0], lw=0.1)
# plt.legend(prop={'size': 6}, loc='best')
# plt.xlabel(r'log$_{10}$($M_{\rm *, initial}$/[$M_\odot$])')
# plt.ylabel(r'[C/H]')
# plt.tight_layout()
# plt.savefig('steller_yield_Mg.pdf', dpi=250)
#
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(5, figsize=(4, 3.5))
# plt.xlim(-0.5, 2.2)
# plt.ylim(-2, 2)
# i = len(M_list) - 1
# while i > -1:
# plt.plot(M_list[i], O_over_H_list[i], label='Z={}'.format(Z_list[i]))
# (i) = (i - 1)
# plt.plot([-2, 3], [0, 0], lw=0.1)
# plt.legend(prop={'size': 6}, loc='best')
# plt.xlabel(r'log$_{10}$($M_{\rm *, initial}$/[$M_\odot$])')
# plt.ylabel(r'[O/H]')
# plt.tight_layout()
# # plt.savefig('steller_yield_O.pdf', dpi=250)
#
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(6, figsize=(4, 3.5))
# plt.xlim(-0.5, 2.2)
# plt.ylim(-2, 2)
# i = len(M_list)-1
# while i > -1:
# plt.plot(M_list[i], Fe_over_H_list[i], label='Z={}'.format(Z_list[i]))
# (i) = (i - 1)
# plt.plot([-2, 3], [0, 0], lw=0.1)
# plt.legend(prop={'size': 6}, loc='best')
# plt.xlabel(r'log$_{10}$($M_{\rm *, initial}$/[$M_\odot$])')
# plt.ylabel(r'[Fe/H]')
# plt.tight_layout()
# # plt.savefig('steller_yield_Fe.pdf', dpi=250)
#
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(7, figsize=(4, 3.5))
# plt.xlim(-0.5, 2.2)
# plt.ylim(-2, 2)
# i = len(M_list) - 1
# while i > -1:
# plt.plot(M_list[i], Z_over_H_list[i], label='Z={}'.format(Z_list[i]))
# (i) = (i - 1)
# plt.plot([-2, 3], [0, 0], lw=0.1)
# plt.legend(prop={'size': 6}, loc='best')
# plt.xlabel(r'log$_{10}$($M_{\rm *, initial}$/[$M_\odot$])')
# plt.ylabel(r'[Z/H]')
# plt.title("Number ratio")
# plt.tight_layout()
#
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(8, figsize=(4, 3.5))
# plt.xlim(-0.5, 2.2)
# plt.ylim(-2, 2)
# i = len(M_list) - 1
# while i > -1:
# plt.plot(M_list[i], Z_over_X_list[i], label='Z={}'.format(Z_list[i]))
# (i) = (i - 1)
# plt.plot([-2, 3], [0, 0], lw=0.1)
# plt.legend(prop={'size': 6}, loc='best')
# plt.xlabel(r'log$_{10}$($M_{\rm *, initial}$/[$M_\odot$])')
# plt.ylabel(r'[Z/X]')
# plt.title("Mass ratio")
# plt.tight_layout()
#
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(11, figsize=(4, 3.5))
# plt.xlim(-0.5, 2.2)
# plt.ylim(0.23, 0.6)
# i = len(M_list) - 1
# while i > -1:
# plt.plot(M_list[i], YYY_list[i], label='Z={}'.format(Z_list[i]))
# (i) = (i - 1)
# # plt.plot([-2, 3], [0.25, 0.25], lw=0.5)
# plt.legend(prop={'size': 6}, loc='best')
# plt.xlabel(r'log$_{10}$($M_{\rm *, initial}$/[$M_\odot$])')
# plt.ylabel('Y')
# plt.tight_layout()
# # plt.savefig('steller_yield_Y.pdf', dpi=250)
##########
fig, axs = plt.subplots(3, 1, sharex=True, figsize=(3, 4))
# i = len(M_list) - 1
# while i > -1:
# axs[0].plot(M_list[i], Z_over_Z0_list[i], lw=(i+2)/2, color=colors[color_list_[i]])
# (i) = (i - 1)
# axs[0].plot([-2, 3], [0, 0], lw=0.7, ls='dotted')
# # axs[0].set_yticks(np.arange(-1, 2.1, 1))
# axs[0].set_ylim(-2, 1.6)
# axs[0].set_ylabel(r'[Z]')
#
# i = len(M_list) - 1
# while i > -1:
# # axs[1].plot(M_list[i], XXX_list[i], lw=(i+2)/2, color=colors[color_list_[i]])
# axs[1].plot(M_list[i], YYY_list[i], lw=(i+2)/2, color=colors[color_list_[i]])
# # axs[1].plot(M_list[i], ZZZ_list[i], lw=(i+2)/2, color=colors[color_list_[i]])
# (i) = (i - 1)
# axs[1].plot([-2, 3], [0.273, 0.273], lw=0.7, ls='dotted')
# # axs[1].set_yticks(np.arange(0.2, 0.61, 0.1))
# axs[1].set_ylim(0.24, 0.605)
# axs[1].set_xlim(-0.5, 2.2)
# axs[1].set_ylabel('Y')
# axs[0].plot([1.3073, 1.3073], [-0.1, 1.7], lw=0.2)
axs[0].axvspan(1.3073, 3, alpha=0.2, color='red')
i = len(M_list) - 1
while i > -1:
ZZZ = Z_list[i]
if ZZZ > 0:
Z_box = round(math.log(ZZZ, 10) - math.log(0.01886, 10), 2)
else:
Z_box = -6
M_list[i].insert(0, math.log(150, 10))
O_over_Fe_list[i].insert(0, O_over_Fe_list[i][0])
axs[0].plot(M_list[i], O_over_Fe_list[i], lw=2**(i**0.5), label=r'$Z={}$'.format(ZZZ), color='k', ls=['-', 'dashed', 'dotted', '-.'][i])
(i) = (i - 1)
# axs[0].plot([-0.3, 0.9], [O_over_Fe_SNIa, O_over_Fe_SNIa], ls="--", lw=1, label="SNIa", c='k')
# axs[0].plot([-2, 3], [0, 0], lw=0.7, ls='dotted')
# axs[0].set_yticks(np.arange(-2, 2.1, 2))
axs[0].set_xlim(0.7, 1.7)
# axs[0].set_ylim(-0.5, 1.7)
axs[0].set_ylabel(r'[O/Fe]')
axs[0].set_xlabel(r'log$_{10}(M_{\rm *, initial}$ [$M_\odot$])')
axs[0].legend(prop={'size': 6}, loc='best')
axs[1].axvspan(1.3073, 3, alpha=0.2, color='red')
i = len(M_list) - 1
while i > -1:
Mg_over_Fe_list[i].insert(0, Mg_over_Fe_list[i][0])
axs[1].plot(M_list[i], Mg_over_Fe_list[i], lw=2**(i**0.5), label=r'$Z={}$'.format(ZZZ),
color='k', ls=['-', 'dashed', 'dotted', '-.'][i])
(i) = (i - 1)
# axs[1].plot([-0.3, 0.9], [Mg_over_Fe_SNIa, Mg_over_Fe_SNIa], ls="--", lw=1, label="SNIa", c='k')
# axs[1].plot([-2, 3], [0, 0], lw=0.7, ls='dotted')
# axs[1].set_yticks(np.arange(-2, 2.1, 2))
# axs[1].set_ylim(-0.1, 1.7)
axs[1].set_ylabel(r'[Mg/Fe]')
axs[1].set_xlabel(r'log$_{10}(M_{\rm *, initial}$ [$M_\odot$])')
# axs[1].legend(prop={'size': 6}, loc='best')
axs[2].axvspan(1.3073, 3, alpha=0.2, color='red')
i = len(M_list) - 1
while i > -1:
Si_over_Fe_list[i].insert(0, Si_over_Fe_list[i][0])
axs[2].plot(M_list[i], Si_over_Fe_list[i], lw=2**(i**0.5), label=r'$Z={}$'.format(ZZZ),
color='k', ls=['-', 'dashed', 'dotted', '-.'][i])
(i) = (i - 1)
# axs[2].plot([-0.3, 0.9], [Si_over_Fe_SNIa, Si_over_Fe_SNIa], ls="--", lw=1, label="SNIa", c='k')
# axs[2].plot([-2, 3], [0, 0], lw=0.7, ls='dotted')
# axs[2].set_yticks(np.arange(-2, 2.1, 2))
# axs[2].set_ylim(-0.1, 1.7)
axs[2].set_ylabel(r'[Si/Fe]')
axs[2].set_xlabel(r'log$_{10}(M_{\rm *, initial}$ [$M_\odot$])')
# axs[2].legend(prop={'size': 6}, loc='best')
plt.tight_layout()
# Remove horizontal space between axes
fig.subplots_adjust(hspace=0)
# plt.savefig('stellar_yields.pdf', dpi=250)
plt.show()
return
if __name__ == '__main__':
start_time = time.time()
Z_list = []
M_list = []
eject_mass_list = []
H_eject_mass_list = []
He_eject_mass_list = []
C_eject_mass_list = []
N_eject_mass_list = []
O_eject_mass_list = []
Ne_eject_mass_list = []
Mg_eject_mass_list = []
Si_eject_mass_list = []
S_eject_mass_list = []
Ca_eject_mass_list = []
Fe_eject_mass_list = []
Metal_eject_mass_list = []
O_over_Mg_list = []
Mg_over_H_list = []
Si_over_H_list = []
C_over_H_list = []
Fe_over_H_list = []
O_over_H_list = []
Z_over_H_list = []
Z_over_X_list = []
Z_over_Z0_list = []
XXX_list = []
YYY_list = []
ZZZ_list = []
Mg_over_Fe_list = []
Si_over_Fe_list = []
Ca_over_Fe_list = []
O_over_Fe_list = []
yield_table_name = "Kobayashi06" # being "WW95" or "portinari98" or "marigo01"
function_read_file(yield_table_name)
funtion_plot_yields()
plot_lifetime_and_finalmass()
print(" - Run time: %s -" % round((time.time() - start_time), 2))
| 38,525 | 41.243421 | 172 | py |
galIMF | galIMF-master/element_weight_table.py | # this function returns the element weight
def function_element_weight(element_name):
# element weight: https://www.lenntech.com/periodic/mass/atomic-mass.htm
if element_name == "H":
element_weight = 1.0079
elif element_name == "He":
element_weight = 4.0026
elif element_name == "C":
element_weight = 12.0107
elif element_name == "N":
element_weight = 14.0067
elif element_name == "O":
element_weight = 15.9994
elif element_name == "Ne":
element_weight = 20.1797
elif element_name == "Mg":
element_weight = 24.305
elif element_name == "Si":
element_weight = 28.0855
elif element_name == "S":
element_weight = 32.065
elif element_name == "Ca":
element_weight = 40.078
elif element_name == "Fe":
element_weight = 55.845
else:
print("Wrong element name for function_element_weight")
element_weight = None
return element_weight
| 986 | 29.84375 | 76 | py |
galIMF | galIMF-master/example_galaxy_wide_IMF.py | # Python3 code
# An example file that demonstrates how to construct galaxy-wide IMF
# as well as getting each stellar mass in the galaxy applying the IGIMF theory with the galIMF model.
# Made by: Yan Zhiqiang & Tereza Jerabkova
# The outputs of this example are:
# - the comparison plot of galaxy-wide IMF, canonical IMF, and the histogram of stellar masses;
# - the txt file containing the galaxy-wide IMF.
# - the txt file containing the number of stars in each mass bin;
# --------------------------------------------------------------------------------------------------------------------------------
# Import modules and libraries
# --------------------------------------------------------------------------------------------------------------------------------
import galimf # galIMF containing IGIMF function and OSGIMF function and additional computational modules
import matplotlib.pyplot as plt # matplotlib for plotting
import numpy as np
import math
import time
from scipy.integrate import quad
# -----------------------------------------------------------------------
print("\n ===============================\n"
" === example_galaxy_wide_IMF ===\n"
" ===============================\n")
print(" This test code serves as an example, "
"demonstrating how to construct and visualize IGIMF and OSGIMF with GalIMF open source code "
"for a given galaxy-wide SFR and metallicity.\n")
# --------------------------------------------------------------------------------------------------------------------------------
# Set the final out put plot size:
# --------------------------------------------------------------------------------------------------------------------------------
fig0 = plt.figure(figsize=(3.4, 2.5))
# --------------------------------------------------------------------------------------------------------------------------------
# Define code parameters necessary for the computations:
# --------------------------------------------------------------------------------------------------------------------------------
# the most crucial ones, which you most likely might want to change
SFR = float(input(" Please input the galaxy-wide SFR in solar mass per year and ended the input with the return "
"key.\n A typical input SFR is from 0.0001 to 10000 (You can input 1e-4 as 0.0001).\n "
"We recommend a value smaller than 1 for the first run as high SFR calculations take more time.\n\n"
" SFR [Msolar/yr] = "))
# Star Formation Rate [solar mass / yr]
M_over_H = float(input("\n Please input the metallicity, [M/H]"
"\n A typical input should be smaller than 0, i.e., metal poor."
"\n\n [M/H] = "))
bindw = galimf.resolution_histogram_relative = 10 ** (max((0 - math.log(SFR, 10)), 0) ** 0.2 - 1.9)
# will change the resolution of histogram for optimal sampling automatically adjusted with SFR value.
alpha3_model = 2 # IMF high-mass-end power-index model, see file 'galimf.py'
alpha_2 = 2.3 # IMF middle-mass power-index
alpha_1 = 1.3 # IMF low-mass-end power-index
alpha2_model = 'Z' # see file 'galimf.py'
alpha1_model = 'Z' # see file 'galimf.py'
beta_model = 1
M_str_L = 0.08 # star mass lower limit [solar mass]
M_str_U = 150 # star mass upper limit [solar mass]
M_turn = 0.5 # IMF power-index breaking mass [solar mass]
M_turn2 = 1. # IMF power-index breaking mass [solar mass]
M_ecl_U = 10**9 # embedded cluster mass upper limit [solar mass]
M_ecl_L = 5. # embedded cluster mass lower limit [solar mass]
# ----------------------------------------------------------------
# Parameters below are internal parameters of the theory.
# Read Yan, Jerabkova, Kroupa (2017, A&A...607A.126Y) carefully before change them!
delta_t = 10. # star formation epoch [Myr]
I_ecl = 1. # normalization factor in the Optimal Sampling condition equation
I_str = 1. # normalization factor in the Optimal Sampling condition equation
# --------------------------------------------------------------------------------------------------------------------------------
# Construct IGIMF:
# --------------------------------------------------------------------------------------------------------------------------------
print("\n Calculating galaxy-wide IMF......")
start_time = time.time()
galimf.function_galimf(
"I", # IorS ### "I" for IGIMF; "OS" for OSGIMF
'IGIMF', # 'R14'
SFR, # Star Formation Rate [solar mass / yr]
alpha3_model, # IMF high-mass-end power-index model, see file 'galimf.py'
delta_t, # star formation epoch [Myr]
M_over_H,
I_ecl, # normalization factor in the Optimal Sampling condition equation
M_ecl_U, # embedded cluster mass upper limit [solar mass]
M_ecl_L, # embedded cluster mass lower limit [solar mass]
beta_model, # ECMF power-index model, see file 'galimf.py'
I_str, # normalization factor in the Optimal Sampling condition equation
M_str_L, # star mass lower limit [solar mass]
alpha_1, # IMF low-mass-end power-index
alpha1_model, # see file 'galimf.py'
M_turn, # IMF power-index change point [solar mass]
alpha_2, # IMF middle-mass power-index
alpha2_model, # see file 'galimf.py'
M_turn2, # IMF power-index change point [solar mass]
M_str_U, # star mass upper limit [solar mass]
printout=True # save the generated IMF
)
print(" - Galaxy-wide IMF calculation complete -")
masses = np.array(galimf.List_M_str_for_xi_str)
igimf = np.array(galimf.List_xi)
# igimf is normalized by default to a total mass formed in 10 Myr given the SFR
# to change the normalization follow the commented part of a code
# Norm = simps(igimf*masses,masses) #- normalization to a total mass
# Norm = simps(igimf,masses) #- normalization to number of stars
# Mtot1Myr = SFR*10*1.e6 #total mass formed in 10 Myr
# igimf = np.array(igimf)*Mtot1Myr/Norm
plt.plot(np.log10(masses+1.e-50), np.log10(igimf+1.e-50), color='blue', lw=2.5, label='Galaxy-wide IMF')
ylim_min = np.min(igimf+1.e-50)
ylim_max = np.max(igimf+1.e-50)
plt.ylim(np.log10(ylim_min), np.log10(ylim_max))
# --------------------------------------------------------------------------------------------------------------------------------
# Construct OSGIMF if required by interactive input:
# --------------------------------------------------------------------------------------------------------------------------------
OSrequest = input("\n Do you wants to calculate OSGIMF? "
"\n OSGIMF gives the stellar masses generated in a 10 Myr epoch "
"with constant inputted SFR. This may take time for high SFR input"
"\n Input 1 as yes: ")
if OSrequest == "y" or OSrequest == "Y" or OSrequest == "yes" or OSrequest == "Yes" or OSrequest == "1":
galimf.resolution_histogram_relative = bindw / float(
input("\n Please input the result resolution (Input 1 for the first run): \n\n"
"Resolution = "))
print("\n Calculating stellar masses of the galaxy......")
start_time = time.time()
galimf.function_galimf(
"OS", # IorS ### "I" for IGIMF; "OS" for OSGIMF
'IGIMF', # 'R14'
SFR, # Star Formation Rate [solar mass / yr]
alpha3_model, # IMF high-mass-end power-index model, see file 'galimf.py'
delta_t, # star formation epoch [Myr]
M_over_H,
I_ecl, # normalization factor in the Optimal Sampling condition equation
M_ecl_U, # embedded cluster mass upper limit [solar mass]
M_ecl_L, # embedded cluster mass lower limit [solar mass]
beta_model, # ECMF power-index model, see file 'galimf.py'
I_str, # normalization factor in the Optimal Sampling condition equation
M_str_L, # star mass lower limit [solar mass]
alpha_1, # IMF low-mass-end power-index
alpha1_model, # see file 'galimf.py'
M_turn, # IMF power-index change point [solar mass]
alpha_2, # IMF middle-mass power-index
alpha2_model, # see file 'galimf.py'
M_turn2, # IMF power-index change point [solar mass]
M_str_U, # star mass upper limit [solar mass]
printout=True # save the generated OSGIMF
)
print(" - Stellar masses calculation complete - Run time: %ss -" % round((time.time() - start_time), 2))
# One can easily import data considering number of stars in each mass bin assuming optimal sampling
mass_range_center = galimf.mass_range_center
mass_range = galimf.mass_range
mass_range_upper_limit = galimf.mass_range_upper_limit
mass_range_lower_limit = galimf.mass_range_lower_limit
star_number = galimf.star_number
mass_range_center, mass_range, mass_range_upper_limit, mass_range_lower_limit, star_number = zip(
*sorted(zip(mass_range_center, mass_range, mass_range_upper_limit, mass_range_lower_limit, star_number)))
masses = np.array(galimf.List_mass_grid_x_axis) + 1.e-50
osgimf = np.array(galimf.List_star_number_in_mass_grid_y_axis) + 1.e-50
plt.plot(np.log10(masses), np.log10(osgimf), color='green', lw=2.5, label='Stellar mass histogram')
# --------------------------------------------------------------------------------------------------------------------------------
# Make a grid with power-law index -2.3 to compare with the resulting IMFs.
# --------------------------------------------------------------------------------------------------------------------------------
for k in range(20):
sal_IMF = masses ** (-2.3)
plt.plot(np.log10(masses), np.log10((1.e5*np.max(igimf)/np.max(sal_IMF))*sal_IMF)-k, c='grey', lw=0.5)
N = 100
can_imf = np.zeros(N)
masses = np.logspace(np.log10(0.08), np.log10(120), N, base=10)
for i, m in enumerate(masses):
if m <= 0.5:
can_imf[i] = m ** (-1.3)
else:
can_imf[i] = 0.5*m ** (-2.3)
def imf(mass, k_star, alpha):
return k_star*mass*mass**(-alpha)
Norm = quad(imf, 0.08, 0.5, args=(1, 1.3))[0] + quad(imf, 0.5, 120, args=(0.5, 2.3))[0]
Mtot1Myr = SFR*10*1.e6 # total mass formed in 10 Myr
can_imf = np.array(can_imf)*Mtot1Myr/Norm
plt.plot(np.log10(masses), np.log10(can_imf), color='r', lw=2, label='canonical IMF')
if ylim_max < np.max(can_imf):
ylim_max = np.max(can_imf)
# --------------------------------------------------------------------------------------------------------------------------------
# Plot settings
# --------------------------------------------------------------------------------------------------------------------------------
plt.xlabel('$\log{(m\,[M_{\odot}])}$')
plt.ylabel('$\log{(\\xi_{\mathrm{gal}}\,[M_{\odot}^{-1}])}$')
plt.ylim(np.log10(ylim_min), np.log10(ylim_max))
plt.xlim(math.log(0.06, 10), math.log(160, 10))
plt.legend(loc='best', ncol=1, fancybox=True, prop={'size': 7})
plt.tight_layout()
fig0.savefig('galaxy_wide_IMF_plot.pdf', dpi=250)
print("\n Please check the prompted window for the result."
"\n IMFs in the plot are normalized by the same galaxy stellar mass."
"\n The generated plot is saved in the file galaxy_wide_IMF_plot.pdf."
"\n The program will finish when you close it.")
plt.show()
print("\n Example complete.\n"
" ======================\n")
| 11,352 | 47.725322 | 130 | py |
galIMF | galIMF-master/galevo.py | # A python3 code
# This is a single-zone closed-box galaxy chemical evolution module.
# It is coupled with a variable galaxy-wide IMF that depends on the galactic property at the time of star formation.
# The stellar population forms at every 10 Myr (the shortest time step) over 10 Gyr;
# with each stellar population a different galaxy-wide IMF calculated using the IGIMF theory (the galimf.py model).
# The parameters assumed for the simulation are specified at the end of this file or imported from other files,
# i.e., element_weight_table.py, element_abundances_solar.py, element_abundances_primordial.py.
import numpy as np
import time
import math
import importlib
from scipy.integrate import quad
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
import gc
import sys
import warnings
import os
import errno
warnings.filterwarnings("ignore")
sys.path.insert(0, 'Generated_IGIMFs')
sys.path.insert(0, 'IMFs')
sys.path.insert(0, 'yield_tables')
import element_weight_table, element_abundances_solar, element_abundances_primordial, stellar_luminosity
from IMFs import Kroupa_IMF, diet_Salpeter_IMF
from yield_tables import SNIa_yield
def galaxy_evol(imf='igimf', STF=0.5, SFEN=1, Z_0=0.000000134, solar_mass_component='Anders1989_mass',
str_yield_table='portinari98',
IMF_name='Kroupa', steller_mass_upper_bound=150,
time_resolution_in_Myr=1, mass_boundary_observe_low=1.5, mass_boundary_observe_up=8,
SFH_model='provided', SFE=0.05,
SNIa_ON=True, SNIa_yield_table='Thielemann1993', solar_abu_table='Anders1989',
high_time_resolution=None, plot_show=None, plot_save=None, outflow=None, check_igimf=False):
start_time = time.time()
######################
# If imf='igimf', the model will use variable IMF, imf='Kroupa' will use Kroupa IMF
# A 1 in SFH.txt stand for SFR = 1 [solar mass/year] in a 10 Myr epoch.
# STF is the total stellar mass/total gas mass in 13Gyr, which determines the initial gas mass. See Yan et al. 2019
# Z_0 is the initial metallicity
######################
global igimf_mass_function, mass_grid_table, mass_grid_table2, Mfinal_table, Mmetal_table, M_element_table
global time_axis, gas_Z_over_X_list, total_star_formed, \
O_over_H_list, Mg_over_H_list, C_over_H_list, N_over_H_list, Ca_over_H_list, Fe_over_H_list, \
S_over_H_list, Si_over_H_list, Ne_over_H_list, \
S_over_H_list, Si_over_H_list, Ne_over_H_list, X_list, Y_list, Z_list, \
ejected_O_mass_till_this_time_tot_list, ejected_O_mass_till_this_time_SNII_list, ejected_O_mass_till_this_time_SNIa_list, \
ejected_Mg_mass_till_this_time_tot_list, ejected_Mg_mass_till_this_time_SNII_list, ejected_Mg_mass_till_this_time_SNIa_list, \
ejected_Fe_mass_till_this_time_tot_list, ejected_Fe_mass_till_this_time_SNII_list, ejected_Fe_mass_till_this_time_SNIa_list, \
ejected_S_mass_till_this_time_tot_list, ejected_S_mass_till_this_time_SNII_list, ejected_S_mass_till_this_time_SNIa_list, \
ejected_Si_mass_till_this_time_tot_list, ejected_Si_mass_till_this_time_SNII_list, ejected_Si_mass_till_this_time_SNIa_list, \
ejected_Ne_mass_till_this_time_tot_list, ejected_Ne_mass_till_this_time_SNII_list, ejected_Ne_mass_till_this_time_SNIa_list, \
ejected_Ca_mass_till_this_time_tot_list, ejected_Ca_mass_till_this_time_SNII_list, ejected_Ca_mass_till_this_time_SNIa_list, \
Mg_over_Fe_list, C_over_Fe_list, N_over_O_list, Ca_over_Fe_list, O_over_Fe_list, S_over_Fe_list, Si_over_Fe_list, Ne_over_Fe_list, \
stellar_O_over_H_list, stellar_Mg_over_H_list, stellar_C_over_H_list, stellar_N_over_H_list, \
stellar_Ca_over_H_list, stellar_Fe_over_H_list, stellar_Si_over_H_list, stellar_S_over_H_list, stellar_Ne_over_H_list, \
stellar_X_list, stellar_Y_list, stellar_Z_list, \
stellar_Mg_over_Fe_list, stellar_C_over_Fe_list, stellar_N_over_O_list, stellar_Ca_over_Fe_list, \
stellar_S_over_Fe_list, stellar_Si_over_Fe_list, stellar_Ne_over_Fe_list, \
stellar_O_over_Fe_list, stellar_Z_over_X_list, stellar_Z_over_H_list, \
stellar_O_over_H_list_luminosity_weighted, stellar_Mg_over_H_list_luminosity_weighted, \
stellar_C_over_H_list_luminosity_weighted, stellar_N_over_H_list_luminosity_weighted, \
stellar_Ca_over_H_list_luminosity_weighted, stellar_Ne_over_H_list_luminosity_weighted, stellar_Si_over_H_list_luminosity_weighted, stellar_S_over_H_list_luminosity_weighted, \
stellar_X_list_luminosity_weighted, stellar_Y_list_luminosity_weighted, stellar_Z_list_luminosity_weighted, \
stellar_Fe_over_H_list_luminosity_weighted, stellar_Mg_over_Fe_list_luminosity_weighted, \
stellar_C_over_Fe_list_luminosity_weighted, stellar_N_over_O_list_luminosity_weighted, \
stellar_Ca_over_Fe_list_luminosity_weighted, stellar_O_over_Fe_list_luminosity_weighted, \
stellar_S_over_Fe_list_luminosity_weighted, stellar_Si_over_Fe_list_luminosity_weighted, stellar_Ne_over_Fe_list_luminosity_weighted, \
stellar_Z_over_X_list_luminosity_weighted, stellar_Z_over_H_list_luminosity_weighted, \
remnant_mass_list, total_gas_mass_list, stellar_mass_list, \
ejected_gas_mass_list, ejected_metal_mass_list, \
ejected_gas_Mg_over_Fe_list, instant_ejected_gas_Mg_over_Fe_list, expansion_factor_list, \
expansion_factor_instantaneous_list, expansion_factor_adiabat_list
global SNIa_energy_release_list, SNIa_number_list, SNIa_number_per_century, \
SNII_energy_release_list, SNII_number_list, SNII_number_per_century, \
total_energy_release_list, SN_number_per_century, total_gas_kinetic_energy_list, original_gas_mass # , binding_energy_list
global BH_mass_list, NS_mass_list, WD_mass_list, all_sf_imf, all_sfr
global times_calculate_igimf, instantaneous_recycling, primary_He_mass_fraction
global X_solar, Y_solar, Z_solar, log_binding_energy_initial
instantaneous_recycling = False
times_calculate_igimf = 0
###################
### preparation ###
###################
# Warning flags:
Warning_ejected_gas_mass_of_this_epoch = False
Warning_WD_mass_till_this_time = False
Warning_galaxy_mass_ejected_gas_mass = False
# get all avaliable metallicity from stellar evolution table
(Z_table_list, Z_table_list_2, Z_table_list_3) = function_get_avaliable_Z(str_yield_table)
# read in SFH
SFH_input = np.loadtxt('SFH.txt')
length_list_SFH_input = len(SFH_input)
i = 0
total_SF = 0
while i < length_list_SFH_input:
total_SF += SFH_input[i]
(i) = (i + 1)
# Star Trasnformation fraction (STF)
total_star_formed = 10 ** 7 * total_SF
original_gas_mass = total_star_formed / STF # in solar mass unit
print("original_gas_mass =", math.log(original_gas_mass, 10))
ratio_gas_over_DM_radii = 0.3
log_binding_energy_initial = round(
math.log(6.674 * 1.989 ** 2 / 3.086 / 242, 10) + 40 +
math.log(0.5 * original_gas_mass ** 2 + ratio_gas_over_DM_radii * (
1 + 1.37 * ratio_gas_over_DM_radii) / 2 / 3.1415926 * 3 * 10 ** 6 * original_gas_mass, 10), 3)
print("log_binding_energy_initial =", log_binding_energy_initial)
# Create the time steps (x axis) for final output
time_axis = [10**6]
time_resolution = time_resolution_in_Myr * 10 ** 5 * 10
for i in range(10 ** 9, 15 * 10 ** 9, time_resolution * 1000):
time_axis += [i]
if high_time_resolution==True:
for i in range(10 ** 7, 10 ** 8, time_resolution * 10):
time_axis += [i]
for i in range(10 ** 8, 10 ** 9, time_resolution * 100):
time_axis += [i]
else:
plot_at_age = [1 * 10 ** 7, 2 * 10 ** 7, 3 * 10 ** 7, 4 * 10 ** 7, 5 * 10 ** 7,
6 * 10 ** 7, 7 * 10 ** 7, 8 * 10 ** 7, 9 * 10 ** 7,
1 * 10 ** 8, 2 * 10 ** 8, 3 * 10 ** 8, 4 * 10 ** 8, 5 * 10 ** 8,
6 * 10 ** 8, 7 * 10 ** 8, 8 * 10 ** 8, 9 * 10 ** 8,
1 * 10 ** 9, 101 * 10 ** 7, 102 * 10 ** 7, 103 * 10 ** 7,
104 * 10 ** 7, 105 * 10 ** 7, 106 * 10 ** 7, 107 * 10 ** 7, 108 * 10 ** 7, 11 * 10 ** 8,
12 * 10 ** 8, 14 * 10 ** 8, 16 * 10 ** 8, 18 * 10 ** 8,
2 * 10 ** 9, 23 * 10 ** 8, 26 * 10 ** 8, 29 * 10 ** 8,
32 * 10 ** 8, 35 * 10 ** 8, 38 * 10 ** 8, 41 * 10 ** 8, 45 * 10 ** 8,
5 * 10 ** 9, 6 * 10 ** 9,
7 * 10 ** 9, 8 * 10 ** 9, 9 * 10 ** 9, 10 * 10 ** 9, 11 * 10 ** 9]
# plot_at_age = [1 * 10 ** 8, 1 * 10 ** 9, 10.8 * 10 ** 9]
time_axis += plot_at_age
for i in range(10 ** 9, 15 * 10 ** 9, time_resolution * 1000):
time_axis += [i]
# consider also all star formation event happend times
# where the time resolution should be temporarily increased.
time_axis_for_SFH_input = []
time_axis_for_SFH_input_D = []
i = 0
while i < length_list_SFH_input:
if SFH_input[i] > 0:
if high_time_resolution == True:
add_time = 1
while add_time < 70:
add_time_step = round(10**(add_time/20)) * 10 ** 7
add_time_step += i * 10 ** 7 + add_time_step
if add_time_step < 14 * 10**9:
time_axis_for_SFH_input += [add_time_step]
(add_time) = (add_time+1)
# time_axis_for_SFH_input += [i * 10 ** 7]
# time_axis_for_SFH_input += [i * 10 ** 7 + 1 * 10 ** 4]
# time_axis_for_SFH_input += [i * 10 ** 7 + 5 * 10 ** 4]
# time_axis_for_SFH_input += [i * 10 ** 7 + 7 * 10 ** 4]
# time_axis_for_SFH_input += [i * 10 ** 7 + 8 * 10 ** 4]
# time_axis_for_SFH_input += [i * 10 ** 7 + 9 * 10 ** 4]
# time_axis_for_SFH_input += [i * 10 ** 7 + 1 * 10 ** 5]
# time_axis_for_SFH_input += [i * 10 ** 7 + 2 * 10 ** 5]
# time_axis_for_SFH_input += [i * 10 ** 7 + 5 * 10 ** 5]
# time_axis_for_SFH_input += [i * 10 ** 7 + 1 * 10 ** 6]
# time_axis_for_SFH_input += [i * 10 ** 7 + 2 * 10 ** 6]
# time_axis_for_SFH_input += [i * 10 ** 7 + 5 * 10 ** 6]
# time_axis_for_SFH_input += [i * 10 ** 7 + 1 * 10 ** 7]
# time_axis_for_SFH_input += [i * 10 ** 7 + 2 * 10 ** 7]
# time_axis_for_SFH_input += [i * 10 ** 7 + 3 * 10 ** 7]
# time_axis_for_SFH_input += [i * 10 ** 7 + 4 * 10 ** 7]
# time_axis_for_SFH_input += [i * 10 ** 7 + 5 * 10 ** 7]
# time_axis_for_SFH_input += [i * 10 ** 7 + 6 * 10 ** 7]
# time_axis_for_SFH_input += [i * 10 ** 7 + 7 * 10 ** 7]
# time_axis_for_SFH_input += [i * 10 ** 7 + 8 * 10 ** 7]
# time_axis_for_SFH_input += [i * 10 ** 7 + 9 * 10 ** 7]
# time_axis_for_SFH_input += [i * 10 ** 7 + 10 * 10 ** 7]
# time_axis_for_SFH_input += [i * 10 ** 7 + 11 * 10 ** 7]
# time_axis_for_SFH_input += [i * 10 ** 7 + 12 * 10 ** 7]
# time_axis_for_SFH_input += [i * 10 ** 7 + 2 * 10 ** 8]
# time_axis_for_SFH_input += [i * 10 ** 7 + 5 * 10 ** 8]
# time_axis_for_SFH_input += [i * 10 ** 7 + 1 * 10 ** 9]
# time_axis_for_SFH_input += [i * 10 ** 7 + 2 * 10 ** 9]
# time_axis_for_SFH_input += [i * 10 ** 7 + 5 * 10 ** 9]
else:
time_axis_for_SFH_input += [i * 10 ** 7]
# time_axis_for_SFH_input += [i * 10 ** 7 + 9 * 10 ** 6]
(i) = (i + 1)
# the final time axis is the sorted combination of the two
time_axis = sorted(list(set(time_axis + time_axis_for_SFH_input)))
# print("\nSimulation results will be give at galactic age [yr] =\n", time_axis)
length_list_time_step = len(time_axis)
print('time_axis:', length_list_time_step, time_axis)
###################
### main loop ###
###################
S_F_R_of_this_epoch_list = []
S_F_F = 1
# define an array save SF event informations that will be used in every latter time steps
all_sf_imf = []
all_sfr = []
epoch_info = [] # This array saves the properties,
# i.e., [S_F_R_of_this_epoch, M_tot_of_this_epoch, igimf_mass_function, igimf_normalization],
# of all the stellar populations formed at different time step (the so-called star formation epoch),
# Thus that at any later given time (aging), the effects (yield) of these populations can be easily computed.
BH_mass_list = []
NS_mass_list = []
WD_mass_list = []
gas_Z_over_X_list = []
O_over_H_list = []
ejected_O_mass_till_this_time_tot_list = []
ejected_O_mass_till_this_time_SNII_list = []
ejected_O_mass_till_this_time_SNIa_list = []
ejected_Mg_mass_till_this_time_tot_list = []
ejected_Mg_mass_till_this_time_SNII_list = []
ejected_Mg_mass_till_this_time_SNIa_list = []
ejected_Fe_mass_till_this_time_tot_list = []
ejected_Fe_mass_till_this_time_SNII_list = []
ejected_Fe_mass_till_this_time_SNIa_list = []
ejected_Ca_mass_till_this_time_tot_list = []
ejected_Ca_mass_till_this_time_SNII_list = []
ejected_Ca_mass_till_this_time_SNIa_list = []
ejected_S_mass_till_this_time_tot_list = []
ejected_S_mass_till_this_time_SNII_list = []
ejected_S_mass_till_this_time_SNIa_list = []
ejected_Si_mass_till_this_time_tot_list = []
ejected_Si_mass_till_this_time_SNII_list = []
ejected_Si_mass_till_this_time_SNIa_list = []
ejected_Ne_mass_till_this_time_tot_list = []
ejected_Ne_mass_till_this_time_SNII_list = []
ejected_Ne_mass_till_this_time_SNIa_list = []
X_list = []
Y_list = []
Z_list = []
Mg_over_H_list = []
C_over_H_list = []
N_over_H_list = []
Ca_over_H_list = []
Ne_over_H_list = []
Si_over_H_list = []
S_over_H_list = []
Fe_over_H_list = []
Mg_over_Fe_list = []
C_over_Fe_list = []
N_over_O_list = []
Ca_over_Fe_list = []
Ne_over_Fe_list = []
Si_over_Fe_list = []
S_over_Fe_list = []
O_over_Fe_list = []
stellar_O_over_H_list = []
stellar_Mg_over_H_list = []
stellar_C_over_H_list = []
stellar_N_over_H_list = []
stellar_Ca_over_H_list = []
stellar_Ne_over_H_list = []
stellar_Si_over_H_list = []
stellar_S_over_H_list = []
stellar_Fe_over_H_list = []
stellar_X_list = []
stellar_Y_list = []
stellar_Z_list = []
stellar_Mg_over_Fe_list = []
stellar_C_over_Fe_list = []
stellar_N_over_O_list = []
stellar_Ca_over_Fe_list = []
stellar_Ne_over_Fe_list = []
stellar_Si_over_Fe_list = []
stellar_S_over_Fe_list = []
stellar_O_over_Fe_list = []
stellar_Z_over_X_list = []
stellar_Z_over_H_list = []
stellar_O_over_H_list_luminosity_weighted = []
stellar_Mg_over_H_list_luminosity_weighted = []
stellar_C_over_H_list_luminosity_weighted = []
stellar_N_over_H_list_luminosity_weighted = []
stellar_Ca_over_H_list_luminosity_weighted = []
stellar_Ne_over_H_list_luminosity_weighted = []
stellar_Si_over_H_list_luminosity_weighted = []
stellar_S_over_H_list_luminosity_weighted = []
stellar_X_list_luminosity_weighted = []
stellar_Y_list_luminosity_weighted = []
stellar_Z_list_luminosity_weighted = []
stellar_Fe_over_H_list_luminosity_weighted = []
stellar_Mg_over_Fe_list_luminosity_weighted = []
stellar_C_over_Fe_list_luminosity_weighted = []
stellar_N_over_O_list_luminosity_weighted = []
stellar_Ca_over_Fe_list_luminosity_weighted = []
stellar_Ne_over_Fe_list_luminosity_weighted = []
stellar_Si_over_Fe_list_luminosity_weighted = []
stellar_S_over_Fe_list_luminosity_weighted = []
stellar_O_over_Fe_list_luminosity_weighted = []
stellar_Z_over_X_list_luminosity_weighted = []
stellar_Z_over_H_list_luminosity_weighted = []
remnant_mass_list = []
total_gas_mass_list = []
ejected_gas_mass_list = []
ejected_gas_Mg_over_Fe_list = []
instant_ejected_gas_Mg_over_Fe_list = []
ejected_metal_mass_list = []
expansion_factor_instantaneous_list = []
expansion_factor_adiabat_list = []
expansion_factor_list = []
stellar_mass_list = []
total_energy_release_list = []
SN_number_per_century = []
# binding_energy_list = []
total_gas_kinetic_energy_list = []
SNIa_energy_release_list = []
SNIa_number_list = []
SNIa_number_per_century = []
SNII_energy_release_list = []
SNII_number_list = []
SNII_number_per_century = []
Z_solar = element_abundances_solar.function_solar_element_abundances(solar_mass_component, 'Metal')
Y_solar = element_abundances_solar.function_solar_element_abundances(solar_mass_component, 'He')
X_solar = element_abundances_solar.function_solar_element_abundances(solar_mass_component, 'H')
primary_H_mass_fraction = element_abundances_primordial.function_element_mass_primary_fraction(
solar_abu_table, "H", Z_0, Z_solar)
primary_He_mass_fraction = element_abundances_primordial.function_element_mass_primary_fraction(
solar_abu_table, "He", Z_0, Z_solar)
Z_over_X = math.log(Z_0 / primary_H_mass_fraction, 10) - math.log(Z_solar / X_solar, 10)
# do calculation for each time start from time 0
time_step = 0
gc_collect_check = 1
# do calculation for each time to the end time
while time_step < length_list_time_step:
# get time
this_time = time_axis[time_step]
# calculated the array index (line number in SFH.txt) this_time has reached
epoch_index_limit = (this_time + 1) / 10 ** 7
if epoch_index_limit > length_list_SFH_input:
epoch_index_limit = length_list_SFH_input
last_time_age = 0
age_of_this_epoch = 0
number_in_SNIa_boundary = 0
# get masses and metallicity at this time (values are calculated by the end of last time step)
# initialize values
total_energy_release = 0
SNIa_energy_release = 0
SNIa_number_from_all_epoch = 0
SNII_energy_release = 0
SNII_number = 0
if time_step == 0:
eject_H_mass = 0
eject_C_mass = 0
eject_N_mass = 0
eject_O_mass = 0
eject_Mg_mass = 0
eject_Ca_mass = 0
eject_Ne_mass = 0
eject_Si_mass = 0
eject_S_mass = 0
eject_Fe_mass = 0
eject_metal_mass = 0
total_gas_mass_at_this_time = original_gas_mass
ejected_gas_mass_at_this_time = 0
ejected_metal_mass_at_last_time = 0
M_tot_up_to_last_time = 0
M_tot_up_to_this_time = 0
stellar_mass_at_last_time = 0
stellar_mass_at_this_time = 0
stellar_luminosity_at_this_time = 0
ejected_gas_mass_till_last_time = 0
ejected_metal_mass_till_last_time = 0
ejected_H_mass_till_last_time = 0
ejected_He_mass_till_last_time = 0
ejected_C_mass_till_last_time = 0
ejected_N_mass_till_last_time = 0
ejected_O_mass_till_last_time = 0
ejected_Mg_mass_till_last_time = 0
ejected_Ca_mass_till_last_time = 0
ejected_Ne_mass_till_last_time = 0
ejected_Si_mass_till_last_time = 0
ejected_S_mass_till_last_time = 0
ejected_Fe_mass_till_last_time = 0
ejected_gas_mass_till_this_time = 0
ejected_metal_mass_till_this_time = 0
ejected_H_mass_till_this_time = 0
ejected_He_mass_till_this_time = 0
ejected_C_mass_till_this_time = 0
ejected_N_mass_till_this_time = 0
ejected_O_mass_till_this_time = 0
ejected_Mg_mass_till_this_time = 0
ejected_Ca_mass_till_this_time = 0
ejected_Ne_mass_till_this_time = 0
ejected_Si_mass_till_this_time = 0
ejected_S_mass_till_this_time = 0
ejected_Fe_mass_till_this_time = 0
BH_mass_till_this_time = 0
NS_mass_till_this_time = 0
WD_mass_till_this_time = 0
remnant_mass_at_this_time = 0
# Fe_H_mass_ratio_at_last_time = 0 #################################
Z_gas_this_time_step = Z_0
total_metal_mass_at_this_time = total_gas_mass_at_this_time * Z_gas_this_time_step
total_H_mass_at_this_time = 0
total_He_mass_at_this_time = 0
total_C_mass_at_this_time = 0
total_N_mass_at_this_time = 0
total_O_mass_at_this_time = 0
total_Mg_mass_at_this_time = 0
total_Ca_mass_at_this_time = 0
total_Ne_mass_at_this_time = 0
total_Si_mass_at_this_time = 0
total_S_mass_at_this_time = 0
total_Fe_mass_at_this_time = 0
total_H_mass_at_last_time = original_gas_mass * primary_H_mass_fraction
H_weight = element_weight_table.function_element_weight("H")
total_He_mass_at_last_time = original_gas_mass * primary_He_mass_fraction
total_C_mass_at_last_time = original_gas_mass * element_abundances_primordial.function_element_mass_primary_fraction(
solar_abu_table, "C", Z_0, Z_solar)
total_N_mass_at_last_time = original_gas_mass * element_abundances_primordial.function_element_mass_primary_fraction(
solar_abu_table, "N", Z_0, Z_solar)
total_O_mass_at_last_time = original_gas_mass * element_abundances_primordial.function_element_mass_primary_fraction(
solar_abu_table, "O", Z_0, Z_solar)
total_Mg_mass_at_last_time = original_gas_mass * element_abundances_primordial.function_element_mass_primary_fraction(
solar_abu_table, "Mg", Z_0, Z_solar)
total_Ca_mass_at_last_time = original_gas_mass * element_abundances_primordial.function_element_mass_primary_fraction(
solar_abu_table, "Ca", Z_0, Z_solar)
total_Ne_mass_at_last_time = original_gas_mass * element_abundances_primordial.function_element_mass_primary_fraction(
solar_abu_table, "Ne", Z_0, Z_solar)
total_Si_mass_at_last_time = original_gas_mass * element_abundances_primordial.function_element_mass_primary_fraction(
solar_abu_table, "Si", Z_0, Z_solar)
total_S_mass_at_last_time = original_gas_mass * element_abundances_primordial.function_element_mass_primary_fraction(
solar_abu_table, "S", Z_0, Z_solar)
total_Fe_mass_at_last_time = original_gas_mass * element_abundances_primordial.function_element_mass_primary_fraction(
solar_abu_table, "Fe", Z_0, Z_solar)
total_metal_mass_in_gas_at_last_time = original_gas_mass * Z_0
total_gas_mass_at_last_time = original_gas_mass
stellar_metal_mass_at_this_time = 0
stellar_H_mass_at_this_time = 0
stellar_He_mass_at_this_time = 0
stellar_C_mass_at_this_time = 0
stellar_N_mass_at_this_time = 0
stellar_O_mass_at_this_time = 0
stellar_Mg_mass_at_this_time = 0
stellar_Ca_mass_at_this_time = 0
stellar_Fe_mass_at_this_time = 0
stellar_Ne_mass_at_this_time = 0
stellar_Si_mass_at_this_time = 0
stellar_S_mass_at_this_time = 0
stellar_metal_luminosity_at_this_time = 0
stellar_H_luminosity_at_this_time = 0
stellar_He_luminosity_at_this_time = 0
stellar_C_luminosity_at_this_time = 0
stellar_N_luminosity_at_this_time = 0
stellar_O_luminosity_at_this_time = 0
stellar_Mg_luminosity_at_this_time = 0
stellar_Ca_luminosity_at_this_time = 0
stellar_Fe_luminosity_at_this_time = 0
stellar_Ne_luminosity_at_this_time = 0
stellar_Si_luminosity_at_this_time = 0
stellar_S_luminosity_at_this_time = 0
metal_mass_fraction_in_gas = [Z_gas_this_time_step, primary_H_mass_fraction, primary_He_mass_fraction,
total_C_mass_at_last_time / original_gas_mass,
total_N_mass_at_last_time / original_gas_mass,
total_O_mass_at_last_time / original_gas_mass,
total_Mg_mass_at_last_time / original_gas_mass,
total_Ca_mass_at_last_time / original_gas_mass,
total_Fe_mass_at_last_time / original_gas_mass,
total_Ne_mass_at_last_time / original_gas_mass,
total_Si_mass_at_last_time / original_gas_mass,
total_S_mass_at_last_time / original_gas_mass]
# H He C N O Mg Ca Fe Ne Si S
else:
total_gas_mass_at_last_time = total_gas_mass_at_this_time
# total_gas_mass_at_this_time is set in below
ejected_gas_mass_at_this_time = 0
total_metal_mass_in_gas_at_last_time = total_metal_mass_at_this_time
total_metal_mass_at_this_time = 0
total_H_mass_at_last_time = total_H_mass_at_this_time
total_H_mass_at_this_time = 0
total_He_mass_at_last_time = total_He_mass_at_this_time
total_He_mass_at_this_time = 0
total_C_mass_at_last_time = total_C_mass_at_this_time
total_C_mass_at_this_time = 0
total_N_mass_at_last_time = total_N_mass_at_this_time
total_N_mass_at_this_time = 0
total_O_mass_at_last_time = total_O_mass_at_this_time
total_O_mass_at_this_time = 0
total_Mg_mass_at_last_time = total_Mg_mass_at_this_time
total_Mg_mass_at_this_time = 0
total_Ca_mass_at_last_time = total_Ca_mass_at_this_time
total_Ca_mass_at_this_time = 0
total_Si_mass_at_last_time = total_Si_mass_at_this_time
total_Si_mass_at_this_time = 0
total_S_mass_at_last_time = total_S_mass_at_this_time
total_S_mass_at_this_time = 0
total_Ne_mass_at_last_time = total_Ne_mass_at_this_time
total_Ne_mass_at_this_time = 0
total_Fe_mass_at_last_time = total_Fe_mass_at_this_time
total_Fe_mass_at_this_time = 0
M_tot_up_to_last_time = M_tot_up_to_this_time
M_tot_up_to_this_time = 0
stellar_mass_at_last_time = stellar_mass_at_this_time
stellar_mass_at_this_time = 0
stellar_luminosity_at_this_time = 0
BH_mass_till_this_time = 0
NS_mass_till_this_time = 0
WD_mass_till_this_time = 0
remnant_mass_at_this_time = 0
ejected_gas_mass_till_last_time = ejected_gas_mass_till_this_time
ejected_metal_mass_till_last_time = ejected_metal_mass_till_this_time
ejected_H_mass_till_last_time = ejected_H_mass_till_this_time
ejected_He_mass_till_last_time = ejected_He_mass_till_this_time
ejected_C_mass_till_last_time = ejected_C_mass_till_this_time
ejected_N_mass_till_last_time = ejected_N_mass_till_this_time
ejected_O_mass_till_last_time = ejected_O_mass_till_this_time
ejected_Mg_mass_till_last_time = ejected_Mg_mass_till_this_time
ejected_Ca_mass_till_last_time = ejected_Ca_mass_till_this_time
ejected_Ne_mass_till_last_time = ejected_Ne_mass_till_this_time
ejected_Si_mass_till_last_time = ejected_Si_mass_till_this_time
ejected_S_mass_till_last_time = ejected_S_mass_till_this_time
ejected_Fe_mass_till_last_time = ejected_Fe_mass_till_this_time
ejected_gas_mass_till_this_time = 0
ejected_metal_mass_till_this_time = 0
ejected_H_mass_till_this_time = 0
ejected_He_mass_till_this_time = 0
ejected_C_mass_till_this_time = 0
ejected_N_mass_till_this_time = 0
ejected_O_mass_till_this_time = 0
ejected_Mg_mass_till_this_time = 0
ejected_Ca_mass_till_this_time = 0
ejected_Ne_mass_till_this_time = 0
ejected_Si_mass_till_this_time = 0
ejected_S_mass_till_this_time = 0
ejected_Fe_mass_till_this_time = 0
ejected_metal_mass_at_last_time = ejected_metal_mass_at_this_time
# Fe_H_mass_ratio_at_last_time = Fe_H_mass_ratio_at_this_time
Z_gas_this_time_step = total_metal_mass_in_gas_at_last_time / total_gas_mass_at_last_time
metal_mass_fraction_in_gas = [Z_gas_this_time_step,
total_H_mass_at_last_time / total_gas_mass_at_last_time,
total_He_mass_at_last_time / total_gas_mass_at_last_time,
total_C_mass_at_last_time / total_gas_mass_at_last_time,
total_N_mass_at_last_time / total_gas_mass_at_last_time,
total_O_mass_at_last_time / total_gas_mass_at_last_time,
total_Mg_mass_at_last_time / total_gas_mass_at_last_time,
total_Ca_mass_at_last_time / total_gas_mass_at_last_time,
total_Fe_mass_at_last_time / total_gas_mass_at_last_time,
total_Ne_mass_at_last_time / total_gas_mass_at_last_time,
total_Si_mass_at_last_time / total_gas_mass_at_last_time,
total_S_mass_at_last_time / total_gas_mass_at_last_time]
stellar_metal_mass_at_this_time = 0
stellar_H_mass_at_this_time = 0
stellar_He_mass_at_this_time = 0
stellar_C_mass_at_this_time = 0
stellar_N_mass_at_this_time = 0
stellar_O_mass_at_this_time = 0
stellar_Mg_mass_at_this_time = 0
stellar_Ca_mass_at_this_time = 0
stellar_Ne_mass_at_this_time = 0
stellar_Si_mass_at_this_time = 0
stellar_S_mass_at_this_time = 0
stellar_Fe_mass_at_this_time = 0
stellar_metal_luminosity_at_this_time = 0
stellar_H_luminosity_at_this_time = 0
stellar_He_luminosity_at_this_time = 0
stellar_C_luminosity_at_this_time = 0
stellar_N_luminosity_at_this_time = 0
stellar_O_luminosity_at_this_time = 0
stellar_Mg_luminosity_at_this_time = 0
stellar_Ca_luminosity_at_this_time = 0
stellar_Fe_luminosity_at_this_time = 0
stellar_Ne_luminosity_at_this_time = 0
stellar_Si_luminosity_at_this_time = 0
stellar_S_luminosity_at_this_time = 0
# add up metals contributed by SSP from each SF epoch
# consider only the SF event (epoch) that had happend
Fe_production_SNII = 0
Mg_production_SNII = 0
Ca_production_SNII = 0
Ne_production_SNII = 0
Si_production_SNII = 0
S_production_SNII = 0
O_production_SNII = 0
epoch_index = 0
while epoch_index < epoch_index_limit:
# get age
age_of_this_epoch = this_time - epoch_index * 10 ** 7
# get SFR, M_tot, igimf, integrated igimf, stellar lifetime and stellar remnant mass for this metallicity
# check if the info of this epoch has been recorded in previous time steps...
if epoch_index == len(epoch_info): # if not:
# SFR
if SFH_model == 'provided':
# This model apply the SFH specified by the SFH.txt
S_F_R_of_this_epoch = SFH_input[epoch_index]
elif SFH_model == 'gas_mass_dependent':
# In this model, the SFR is determined by the current gas mass
# if the current time is shorter than SFEN * 10^7 yr.
S_F_R_of_this_epoch = total_gas_mass_at_this_time * SFE / 10 ** 7
if SFH_input[epoch_index] == 0 or S_F_R_of_this_epoch < 3.5*1e-6 or epoch_index>99:
S_F_R_of_this_epoch = 0
# print(epoch_index, '*10 Myr SFR:', S_F_R_of_this_epoch)
print(S_F_R_of_this_epoch)
else:
print("Wrong input parameter for 'SFH_model'.")
# M_tot
# if total_gas_mass_at_last_time > 10**12:
# M_tot_of_this_epoch = max((min(((total_gas_mass_at_last_time - 10 * stellar_mass_at_last_time) / 5), 10**12)), 0)
# else:
# M_tot_of_this_epoch = 0
M_tot_of_this_epoch = S_F_R_of_this_epoch * 10 ** 7
# if S_F_F == 1:
# S_F_R_of_this_epoch = total_gas_mass_at_last_time**(0.99) * 3.97 * 10**(-10) # Pflamm-Altenburg & Kroupa 2009
# S_F_R_of_this_epoch_list += [S_F_R_of_this_epoch]
# if S_F_R_of_this_epoch < S_F_R_of_this_epoch_list[0] * 0.8:
# S_F_F = 0
# else:
# S_F_R_of_this_epoch = 0
#
#
# print(S_F_R_of_this_epoch)
# M_tot_of_this_epoch = S_F_R_of_this_epoch * 10 ** 7
# if S_F_R_of_this_epoch > 0:
# if high_time_resolution == True:
# time_axis_for_SFH_input_D += [epoch_index * 10 ** 7]
# # time_axis_for_SFH_input_D += [epoch_index * 10 ** 7 + 5 * 10 ** 5]
# # time_axis_for_SFH_input_D += [epoch_index * 10 ** 7 + 1 * 10 ** 6]
# # time_axis_for_SFH_input_D += [epoch_index * 10 ** 7 + 2 * 10 ** 6]
# # time_axis_for_SFH_input_D += [epoch_index * 10 ** 7 + 4 * 10 ** 6]
# # time_axis_for_SFH_input_D += [epoch_index * 10 ** 7 + 6 * 10 ** 6]
# # time_axis_for_SFH_input_D += [epoch_index * 10 ** 7 + 8 * 10 ** 6]
# time_axis_for_SFH_input_D += [epoch_index * 10 ** 7 + 10 * 10 ** 6]
# else:
# time_axis_for_SFH_input_D += [epoch_index * 10 ** 7]
# time_axis = sorted(list(set(time_axis + time_axis_for_SFH_input_D)))
# length_list_time_step = len(time_axis)
if S_F_R_of_this_epoch > 0:
# Total mass normalized IGIMF and unnormalized other IMFs
if imf == 'igimf':
igimf_of_this_epoch = function_get_igimf_for_this_epoch(S_F_R_of_this_epoch, Z_over_X,
this_time, epoch_index,
check_igimf) # Fe_over_H_number_ratio)
elif imf == 'Kroupa':
igimf_of_this_epoch = Kroupa_IMF
elif imf == 'Salpeter':
from IMFs import Salpeter_IMF
igimf_of_this_epoch = Salpeter_IMF
elif imf == 'diet_Salpeter':
igimf_of_this_epoch = diet_Salpeter_IMF
elif imf == 'given':
from IMFs import given_IMF
igimf_of_this_epoch = given_IMF
igimf = igimf_of_this_epoch
#
def igimf_xi_function(mass):
return igimf_of_this_epoch.custom_imf(mass, this_time)
def igimf_mass_function(mass):
return igimf_of_this_epoch.custom_imf(mass, this_time) * mass
def igimf_luminous_function(mass):
return igimf_of_this_epoch.custom_imf(mass, this_time) * \
stellar_luminosity.stellar_luminosity_function(mass)
# integrated igimf_mass_function from 0.08 to steller_mass_upper_bound
if imf == 'diet_Salpeter':
integrate_igimf_mass = quad(igimf_mass_function, 0.1, 100, limit=50)[0]
else:
integrate_igimf_mass = quad(igimf_mass_function, 0.08, steller_mass_upper_bound, limit=50)[0]
# as the integration of the IGIMF always has a small (at least for low SFRs) computational error,
# it need to be fixed by mutiplying a calibration factor which is close to 1:
mass_calibration_factor = M_tot_of_this_epoch / integrate_igimf_mass
# print("mass_calibration_factor:", mass_calibration_factor) # the calibration factor is about 1%
# integrate_igimf_mass_l = quad(igimf_mass_function, 0.08, 3, limit=40)[0]
# integrate_igimf_mass_h = quad(igimf_mass_function, 8, steller_mass_upper_bound, limit=50)[0]
# integrate_igimf_mass_m = quad(igimf_mass_function, 3, 8, limit=40)[0]
# print("high mass star mass ratio:", integrate_igimf_mass_h/integrate_igimf_mass)
# print("middle mass star mass ratio:", integrate_igimf_mass_m/integrate_igimf_mass)
# print("Low mass star mass ratio:", integrate_igimf_mass_l/integrate_igimf_mass)
# integrate_igimf_number = quad(igimf_xi_function, 0.08, steller_mass_upper_bound, limit=50)[0]
# integrate_igimf_number_l = quad(igimf_xi_function, 0.08, 3, limit=40)[0]
# integrate_igimf_number_h = quad(igimf_xi_function, 8, steller_mass_upper_bound, limit=50)[0]
# integrate_igimf_number_m = quad(igimf_xi_function, 3, 8, limit=40)[0]
# print("high mass star number ratio:", integrate_igimf_number_h/integrate_igimf_number)
# print("middle mass star number ratio:", integrate_igimf_number_m/integrate_igimf_number)
# print("Low mass star number ratio:", integrate_igimf_number_l/integrate_igimf_number)
# Choose the closest metallicity
Z_select_in_table = function_select_metal(Z_gas_this_time_step, Z_table_list)
# Z_select_in_table = ('in/out', Z_select__low, Z_gas_this_time_step, Z_select__high)
Z_select_in_table_2 = function_select_metal(Z_gas_this_time_step, Z_table_list_2)
if str_yield_table != "portinari98":
Z_select_in_table_3 = function_select_metal(Z_gas_this_time_step, Z_table_list_3)
else:
Z_select_in_table_3 = None
# read in interpolated stellar lifetime table
(mass_1, mass, lifetime_table) = function_read_lifetime(str_yield_table, Z_select_in_table)
# read in interpolated stellar final mass
(mass_12, Mfinal_table) = function_read_Mfinal(str_yield_table, Z_select_in_table)
# read in interpolated stellar ejected metal mass
(mass_2, mass2, Mmetal_table) = function_read_Mmetal(str_yield_table, Z_select_in_table_2,
Z_select_in_table_3)
# read in interpolated stellar ejected elements mass
MH_table = function_read_M_element("H", str_yield_table, Z_select_in_table_2, Z_select_in_table_3)
MHe_table = function_read_M_element("He", str_yield_table, Z_select_in_table_2, Z_select_in_table_3)
MC_table = function_read_M_element("C", str_yield_table, Z_select_in_table_2, Z_select_in_table_3)
MN_table = function_read_M_element("N", str_yield_table, Z_select_in_table_2, Z_select_in_table_3)
MO_table = function_read_M_element("O", str_yield_table, Z_select_in_table_2, Z_select_in_table_3)
MMg_table = function_read_M_element("Mg", str_yield_table, Z_select_in_table_2, Z_select_in_table_3)
MNe_table = function_read_M_element("Ne", str_yield_table, Z_select_in_table_2, Z_select_in_table_3)
MSi_table = function_read_M_element("Si", str_yield_table, Z_select_in_table_2, Z_select_in_table_3)
MS_table = function_read_M_element("S", str_yield_table, Z_select_in_table_2, Z_select_in_table_3)
MCa_table = function_read_M_element("Ca", str_yield_table, Z_select_in_table_2, Z_select_in_table_3)
MFe_table = function_read_M_element("Fe", str_yield_table, Z_select_in_table_2, Z_select_in_table_3)
M_element_table = [MH_table, MHe_table, MC_table, MN_table, MO_table, MMg_table, MNe_table,
MSi_table, MS_table, MCa_table, MFe_table]
# check if the in put lifetime and final mass table used the same mass grid
# if mass_1 != mass_12:
# print('Error! Stellar lifetime and final mass input data do not match.\n'
# 'Check the table file: yield_tables/rearranged___/setllar_final_mass_from_portinari98/portinari98_Z={}.txt\n'
# 'and table file: yield_tables/rearranged___/setllar_lifetime_from_portinari98/portinari98_Z={}.txt'.format(
# Z_select_in_table,
# Z_select_in_table))
# else:
# mass_grid_table = mass
# mass_grid_table2 = mass2
mass_grid_table = mass
mass_grid_table2 = mass2
last_time_age = age_of_this_epoch
number_in_SNIa_boundary = mass_calibration_factor * quad(igimf_xi_function, 3, 8, limit=50)[
0] # see function_number_SNIa below
if imf == 'diet_Salpeter' or imf == 'Salpeter':
number_all = mass_calibration_factor * quad(igimf_xi_function, 0.1, 100, limit=50)[0] # see function_number_SNIa below
else:
number_all = mass_calibration_factor * quad(igimf_xi_function, 0.08, steller_mass_upper_bound, limit=50)[0] # see function_number_SNIa below
# number_low = quad(igimf_xi_function, 0.08, 2, limit=40)[0] # see function_number_SNIa below
# number_up = quad(igimf_xi_function, 8, steller_mass_upper_bound, limit=50)[0] # see function_number_SNIa below
# print("up", number_up/number_all)
SNIa_number_prob = number_in_SNIa_boundary ** 2 / number_all / M_tot_of_this_epoch
# SNIa_number_prob = number_in_SNIa_boundary**2 / number_all * 10**2 * 0.61
# number_in_SNIa_boundary = SNIa_number_prob
# SNIa_number_prob = number_in_SNIa_boundary / integrate_igimf_mass
# print("SNIa SNIa_number_prob:", SNIa_number_prob)
# print("total star number", number_all)
# print("low", number_low/number_all)
age_of_this_epoch_at_end = (length_list_SFH_input - epoch_index - 1) * 10 ** 7
mass_boundary_at_end = fucntion_mass_boundary(age_of_this_epoch_at_end, mass_grid_table,
lifetime_table)
all_sf_imf.append([igimf, mass_boundary_at_end, this_time])
time_of_the_epoch_in_Gyr = epoch_index / 100
all_sfr.append([S_F_R_of_this_epoch, time_of_the_epoch_in_Gyr])
epoch_info.append(
[S_F_R_of_this_epoch, M_tot_of_this_epoch, igimf_of_this_epoch, integrate_igimf_mass,
mass_grid_table, lifetime_table, Mfinal_table, mass_grid_table2, Mmetal_table, M_element_table,
last_time_age, SNIa_number_prob, metal_mass_fraction_in_gas, mass_calibration_factor])
metal_in_gas = metal_mass_fraction_in_gas
else: # if SFR == 0
time_of_the_epoch_in_Gyr = epoch_index / 100
all_sfr.append([10 ** -22, time_of_the_epoch_in_Gyr])
epoch_info.append(
[0, 0, 0, 0, 0, 0, 0, 0, 0, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0, 0, [0, 0, 0, 0, 0], 0])
else: # if epoch_index =! len(epoch_info)
S_F_R_of_this_epoch = epoch_info[epoch_index][0]
M_tot_of_this_epoch = epoch_info[epoch_index][1]
igimf_of_this_epoch = epoch_info[epoch_index][2]
integrate_igimf_mass = epoch_info[epoch_index][3]
mass_grid_table = epoch_info[epoch_index][4]
lifetime_table = epoch_info[epoch_index][5]
Mfinal_table = epoch_info[epoch_index][6]
mass_grid_table2 = epoch_info[epoch_index][7]
Mmetal_table = epoch_info[epoch_index][8]
M_element_table = epoch_info[epoch_index][9]
last_time_age = epoch_info[epoch_index][10]
epoch_info[epoch_index][10] = age_of_this_epoch
SNIa_number_prob = epoch_info[epoch_index][11]
metal_in_gas = epoch_info[epoch_index][12]
mass_calibration_factor = epoch_info[epoch_index][13]
def igimf_xi_function(mass):
return igimf_of_this_epoch.custom_imf(mass, this_time)
def igimf_mass_function(mass):
return igimf_of_this_epoch.custom_imf(mass, this_time) * mass
def igimf_luminous_function(mass):
return igimf_of_this_epoch.custom_imf(mass, this_time) * \
stellar_luminosity.stellar_luminosity_function(mass)
if S_F_R_of_this_epoch > 0:
# get M_tot (total initial mass of all star ever formed)
M_tot_up_to_this_time += M_tot_of_this_epoch
# calculate stellar initial mass that is still alive (dead star mass boundary)
mass_boundary = fucntion_mass_boundary(age_of_this_epoch, mass_grid_table, lifetime_table)
# output of this epoch
# Mtarget_table_number:
# 1: Mfinal_table
# 2: Mmetal_table
# 3: MH_table
# 4: M_element_table
# ...
if integrate_igimf_mass != 0:
# m1 = quad(igimf_mass_function, 0.08, 10, limit=40)[0]
# m2 = quad(igimf_mass_function, 10, 150, limit=40)[0]
# print(m1)
# print(m2)
# print(m1 / m2)
inte_limit = max(round((math.log(mass_boundary, 10)+1) / (math.log(steller_mass_upper_bound, 10)+1) * 50), 20)
if imf == 'diet_Salpeter':
integrate_star_mass = quad(igimf_mass_function, 0.1, 100, limit=inte_limit)[0] # normalized mass
stellar_luminosity_of_a_epoch_at_a_time_step = \
quad(igimf_luminous_function, 0.1, 100, limit=inte_limit)[0]
else:
integrate_star_mass = quad(igimf_mass_function, 0.08, mass_boundary, limit=inte_limit)[0] # normalized mass
stellar_luminosity_of_a_epoch_at_a_time_step = \
quad(igimf_luminous_function, 0.08, mass_boundary, limit=inte_limit)[0]
stellar_mass_of_a_epoch_at_a_time_step = mass_calibration_factor * integrate_star_mass # real mass
# apprent metal mass (neglect stellar evolution, only account for the initial metal mass when SF):
# the stellar metal abandance is the gas abdandance at the time of star foramtion (metal_in_gas).
stellar_metal_mass_of_this_epoch = stellar_mass_of_a_epoch_at_a_time_step * metal_in_gas[0]
stellar_H_mass_of_this_epoch = stellar_mass_of_a_epoch_at_a_time_step * metal_in_gas[1]
stellar_He_mass_of_this_epoch = stellar_mass_of_a_epoch_at_a_time_step * metal_in_gas[2]
stellar_C_mass_of_this_epoch = stellar_mass_of_a_epoch_at_a_time_step * metal_in_gas[3]
stellar_N_mass_of_this_epoch = stellar_mass_of_a_epoch_at_a_time_step * metal_in_gas[4]
stellar_O_mass_of_this_epoch = stellar_mass_of_a_epoch_at_a_time_step * metal_in_gas[5]
stellar_Mg_mass_of_this_epoch = stellar_mass_of_a_epoch_at_a_time_step * metal_in_gas[6]
stellar_Ca_mass_of_this_epoch = stellar_mass_of_a_epoch_at_a_time_step * metal_in_gas[7]
stellar_Fe_mass_of_this_epoch = stellar_mass_of_a_epoch_at_a_time_step * metal_in_gas[8]
stellar_Ne_mass_of_this_epoch = stellar_mass_of_a_epoch_at_a_time_step * metal_in_gas[9]
stellar_Si_mass_of_this_epoch = stellar_mass_of_a_epoch_at_a_time_step * metal_in_gas[10]
stellar_S_mass_of_this_epoch = stellar_mass_of_a_epoch_at_a_time_step * metal_in_gas[11]
# The luminosity-weighted metallicity is in its exact form. However,
# the luminosity-weighted element abundance, e.g., weighted-with-luminosity([Fe/H]) is approximated
# by [the-number-of(weighted-with-luminosity(mass-fraction-of(Fe)))/the-number-of(weighted-with-luminosity(mass-fraction-of(H)))]
# below is the first step to calculate the weighted-with-luminosity(mass-fraction-of(An-element))
stellar_metal_luminosity_of_this_epoch = stellar_luminosity_of_a_epoch_at_a_time_step * metal_in_gas[0]
stellar_H_luminosity_of_this_epoch = stellar_luminosity_of_a_epoch_at_a_time_step * metal_in_gas[1]
stellar_He_luminosity_of_this_epoch = stellar_luminosity_of_a_epoch_at_a_time_step * metal_in_gas[2]
stellar_C_luminosity_of_this_epoch = stellar_luminosity_of_a_epoch_at_a_time_step * metal_in_gas[3]
stellar_N_luminosity_of_this_epoch = stellar_luminosity_of_a_epoch_at_a_time_step * metal_in_gas[4]
stellar_O_luminosity_of_this_epoch = stellar_luminosity_of_a_epoch_at_a_time_step * metal_in_gas[5]
stellar_Mg_luminosity_of_this_epoch = stellar_luminosity_of_a_epoch_at_a_time_step * metal_in_gas[6]
stellar_Ca_luminosity_of_this_epoch = stellar_luminosity_of_a_epoch_at_a_time_step * metal_in_gas[7]
stellar_Fe_luminosity_of_this_epoch = stellar_luminosity_of_a_epoch_at_a_time_step * metal_in_gas[8]
stellar_Ne_luminosity_of_this_epoch = stellar_luminosity_of_a_epoch_at_a_time_step * metal_in_gas[9]
stellar_Si_luminosity_of_this_epoch = stellar_luminosity_of_a_epoch_at_a_time_step * metal_in_gas[10]
stellar_S_luminosity_of_this_epoch = stellar_luminosity_of_a_epoch_at_a_time_step * metal_in_gas[11]
#
BH_mass_of_this_epoch = get_BH_mass(mass_boundary, 1, 1, mass_calibration_factor,
steller_mass_upper_bound)
NS_mass_of_this_epoch = get_NS_mass(mass_boundary, 1, 1, mass_calibration_factor)
WD_mass_of_this_epoch = get_WD_mass(mass_boundary, 1, 1, mass_calibration_factor)
remnant_mass_of_this_epoch = WD_mass_of_this_epoch + NS_mass_of_this_epoch + BH_mass_of_this_epoch
# Note: M_tot_of_this_epoch =! ejected_gas_mass_of_this_epoch +
# stellar_mass_of_a_epoch_at_a_time_step + remnant_mass_of_this_epoch
# because the remnant_mass is a spline fitted value
# while metall mass ejection is calculated with M_metal = M_ini - M_final - M_H - M_He,
# where M_final is the remnant mass given by the stellar yield table.
#
# # consider direct black hole as in Heger et al. (2003) (maybe not self-consistant with the stellar evolution table)
# if mass_boundary > 100:
# SNII_number_of_this_epoch_1 = quad(igimf_xi_function, mass_boundary, steller_mass_upper_bound, limit=50)[0]
# SNII_number_of_this_epoch_2 = 0
# elif mass_boundary > 40:
# SNII_number_of_this_epoch_1 = quad(igimf_xi_function, 100, steller_mass_upper_bound, limit=50)[0]
# SNII_number_of_this_epoch_2 = 0
# elif mass_boundary > 8:
# SNII_number_of_this_epoch_1 = quad(igimf_xi_function, 100, steller_mass_upper_bound, limit=50)[0]
# SNII_number_of_this_epoch_2 = quad(igimf_xi_function, mass_boundary, 40, limit=40)[0]
# else:
# SNII_number_of_this_epoch_1 = quad(igimf_xi_function, 100, steller_mass_upper_bound, limit=50)[0]
# SNII_number_of_this_epoch_2 = quad(igimf_xi_function, 8, 40, limit=40)[0]
# SNII_number_of_this_epoch = (SNII_number_of_this_epoch_1 + SNII_number_of_this_epoch_2) * mass_calibration_factor
if mass_boundary > 8:
SNII_number_of_this_epoch = \
quad(igimf_xi_function, mass_boundary, steller_mass_upper_bound, limit=50)[0]
SNII_ejected_mass_of_this_epoch = \
quad(igimf_xi_function, mass_boundary, steller_mass_upper_bound, limit=50)[0]
else:
SNII_number_of_this_epoch = quad(igimf_xi_function, 8, steller_mass_upper_bound, limit=50)[0]
SNII_number_of_this_epoch = SNII_number_of_this_epoch * mass_calibration_factor
SNII_energy_release_per_event = 0.03* 10 ** 51 # Bradamante 1998
SNII_number += SNII_number_of_this_epoch
SNII_energy_release += SNII_energy_release_per_event * SNII_number_of_this_epoch
# ejected_ :
metal_mass_of_this_epoch = function_get_target_mass_in_range(mass_boundary,
steller_mass_upper_bound, 2, 2,
mass_calibration_factor)
H_mass_of_this_epoch = function_get_target_mass_in_range(mass_boundary, steller_mass_upper_bound, 2,
"H", mass_calibration_factor)
He_mass_of_this_epoch = function_get_target_mass_in_range(mass_boundary, steller_mass_upper_bound,
2, "He", mass_calibration_factor)
ejected_gas_mass_of_this_epoch = H_mass_of_this_epoch + He_mass_of_this_epoch + \
metal_mass_of_this_epoch
C_mass_of_this_epoch = function_get_target_mass_in_range(mass_boundary, steller_mass_upper_bound,
2, "C", mass_calibration_factor)
N_mass_of_this_epoch = function_get_target_mass_in_range(mass_boundary, steller_mass_upper_bound,
2, "N", mass_calibration_factor)
O_mass_of_this_epoch = function_get_target_mass_in_range(mass_boundary, steller_mass_upper_bound,
2, "O", mass_calibration_factor)
Mg_mass_of_this_epoch = function_get_target_mass_in_range(mass_boundary, steller_mass_upper_bound,
2, "Mg", mass_calibration_factor)
Ca_mass_of_this_epoch = function_get_target_mass_in_range(mass_boundary, steller_mass_upper_bound,
2, "Ca", mass_calibration_factor)
S_mass_of_this_epoch = function_get_target_mass_in_range(mass_boundary, steller_mass_upper_bound,
2, "S", mass_calibration_factor)
Si_mass_of_this_epoch = function_get_target_mass_in_range(mass_boundary, steller_mass_upper_bound,
2, "Si", mass_calibration_factor)
Ne_mass_of_this_epoch = function_get_target_mass_in_range(mass_boundary, steller_mass_upper_bound,
2, "Ne", mass_calibration_factor)
Fe_mass_of_this_epoch = function_get_target_mass_in_range(mass_boundary, steller_mass_upper_bound,
2, "Fe", mass_calibration_factor)
Fe_production_SNII += Fe_mass_of_this_epoch
Ca_production_SNII += Ca_mass_of_this_epoch
Ne_production_SNII += Ne_mass_of_this_epoch
Si_production_SNII += Si_mass_of_this_epoch
S_production_SNII += S_mass_of_this_epoch
Mg_production_SNII += Mg_mass_of_this_epoch
O_production_SNII += O_mass_of_this_epoch
# if age_of_this_epoch == 1 * 10 ** 9:
# print("Fe_production_SNII", Fe_production_SNII)
# print("O_production_SNII", O_production_SNII)
# print("Mg_production_SNII", Mg_production_SNII)
# _mass_of_this_epoch = function_get_target_mass_in_range(mass_boundary, steller_mass_upper_bound, 2, "",
# mass_calibration_factor)
else:
print("Error: integrate_igimf_mass == 0 while S_F_R_of_this_epoch != 0.")
stellar_mass_of_a_epoch_at_a_time_step = 0
BH_mass_of_this_epoch = 0
NS_mass_of_this_epoch = 0
WD_mass_of_this_epoch = 0
remnant_mass_of_this_epoch = 0
ejected_gas_mass_of_this_epoch = 0
metal_mass_of_this_epoch = 0
H_mass_of_this_epoch = 0
He_mass_of_this_epoch = 0
C_mass_of_this_epoch = 0
N_mass_of_this_epoch = 0
O_mass_of_this_epoch = 0
Mg_mass_of_this_epoch = 0
Ca_mass_of_this_epoch = 0
Si_mass_of_this_epoch = 0
S_mass_of_this_epoch = 0
Ne_mass_of_this_epoch = 0
Fe_mass_of_this_epoch = 0
# if consider SNIa
if SNIa_ON == True or 'power-law' or 'SD':
# read in SNIa yield table
# (here only account for the most abandant element yields)
# (but should account as long as the SNIa yield is comparable with SNII yield)
Fe_mass_eject = SNIa_yield.function_mass_ejected(SNIa_yield_table, 'Fe')
Si_mass_eject = SNIa_yield.function_mass_ejected(SNIa_yield_table, 'Si')
O_mass_eject = SNIa_yield.function_mass_ejected(SNIa_yield_table, 'O')
S_mass_eject = SNIa_yield.function_mass_ejected(SNIa_yield_table, 'S')
Mg_mass_eject = SNIa_yield.function_mass_ejected(SNIa_yield_table, 'Mg')
Ne_mass_eject = SNIa_yield.function_mass_ejected(SNIa_yield_table, 'Ne')
if SNIa_yield_table=='Seitenzahl2013' or SNIa_yield_table=='Iwamoto1999':
Ca_mass_eject = SNIa_yield.function_mass_ejected(SNIa_yield_table, 'Ca')
Ne_mass_eject = SNIa_yield.function_mass_ejected(SNIa_yield_table, 'Ne')
S_mass_eject = SNIa_yield.function_mass_ejected(SNIa_yield_table, 'S')
Si_mass_eject = SNIa_yield.function_mass_ejected(SNIa_yield_table, 'Si')
C_mass_eject = SNIa_yield.function_mass_ejected(SNIa_yield_table, 'C')
total_mass_eject_per_SNIa = Fe_mass_eject + Si_mass_eject + O_mass_eject + S_mass_eject + Mg_mass_eject + Ne_mass_eject
Chandrasekhar_mass = 1.44
pre_SNIa_NS_mass = 1
SNIa_energy_release_per_event = 0.8*10**51 # in the unit of 10^51 erg
# # Yin J., Matteucci F., Vladilo G., 2011, A&A, 531, A136
# integrate SNIa number from last_delay_time to this_delay_time contributed by this SF epoch
if SNIa_ON == 'SD':
if age_of_this_epoch == 0:
mass_boundary_SNIa_Padovani93 = 100
# mass_boundary_SNIa_Greggio83 = 8
else:
mass_boundary_SNIa_Padovani93 = 10 ** (7.764 - (1.79 - (1.338 - math.log(age_of_this_epoch, 10) * 0.1116) ** 2) / 0.2232)
# if age_of_this_epoch < 3 * 1e7:
# mass_boundary_SNIa_Greggio83 = 8
# else:
# mass_boundary_SNIa_Greggio83 = fucntion_mass_boundary_SNIa_Greggio83(age_of_this_epoch, 8)
# SNIa_number_from_this_epoch_till_this_time = function_number_SNIa_SD(mass_boundary_SNIa_Greggio83, igimf_xi_function, mass_calibration_factor)
# SNIa_number_from_this_epoch_till_this_time = function_number_SNIa_SD(mass_boundary_SNIa_Padovani93, igimf_xi_function, mass_calibration_factor)
SNIa_number_from_this_epoch_till_this_time = function_number_SNIa_SD(mass_boundary, igimf_xi_function, mass_calibration_factor)
# print('...', time_axis[time_step]/1e6, mass_boundary)
# print(SNIa_number_from_this_epoch_till_this_time)
elif SNIa_ON == True or 'power-law':
SNIa_number_from_this_epoch_till_this_time = function_number_SNIa_power_law(0, age_of_this_epoch,
SNIa_number_prob,
S_F_R_of_this_epoch)
# the following should result in 0.0022+-50% for a SSP,
# but now calibrate to a different value to fit with galaxy [Fe/H] observation
if age_of_this_epoch == 10 * 10 ** 9 - 1 * 10 ** 7:
# print(function_number_SNIa_power_law(0, 10 * 10 ** 9, 1, 0))
# print("SN number per star in range:", SNIa_number_from_this_epoch_till_this_time/number_in_SNIa_boundary)
print("\nType Ia supernova activated. "
"Total SNIa number per solar mass of star formed at t = 10Gyr:",
SNIa_number_from_this_epoch_till_this_time / M_tot_of_this_epoch)
# print("Total SNIa number between t = 9 and 10 Gyr:",
# function_number_SNIa_power_law(9 * 10 ** 9 - 1 * 10 ** 7, age_of_this_epoch,
# number_in_SNIa_boundary,
# S_F_R_of_this_epoch))
# update the element masses
ejected_gas_mass_of_this_epoch += total_mass_eject_per_SNIa * SNIa_number_from_this_epoch_till_this_time
metal_mass_of_this_epoch += (Chandrasekhar_mass - (Chandrasekhar_mass - pre_SNIa_NS_mass) *
Z_gas_this_time_step) * SNIa_number_from_this_epoch_till_this_time
O_mass_of_SNIa = O_mass_eject * SNIa_number_from_this_epoch_till_this_time
Mg_mass_of_SNIa = Mg_mass_eject * SNIa_number_from_this_epoch_till_this_time
Fe_mass_of_SNIa = (Fe_mass_eject
# - (Chandrasekhar_mass - pre_SNIa_NS_mass) * Fe_H_mass_ratio_at_last_time * 0.7057 # this term is small and can be neglected
) * SNIa_number_from_this_epoch_till_this_time
# Si_mass_of_SNIa = Si_mass_eject * SNIa_number_from_this_epoch_till_this_time
# S_mass_of_SNIa = S_mass_eject * SNIa_number_from_this_epoch_till_this_time
# Ne_mass_of_SNIa = Ne_mass_eject * SNIa_number_from_this_epoch_till_this_time
if SNIa_yield_table == 'Seitenzahl2013' or SNIa_yield_table=='Iwamoto1999':
Ca_mass_of_SNIa = Ca_mass_eject * SNIa_number_from_this_epoch_till_this_time
Si_mass_of_SNIa = Si_mass_eject * SNIa_number_from_this_epoch_till_this_time
S_mass_of_SNIa = S_mass_eject * SNIa_number_from_this_epoch_till_this_time
Ne_mass_of_SNIa = Ne_mass_eject * SNIa_number_from_this_epoch_till_this_time
C_mass_of_SNIa = C_mass_eject * SNIa_number_from_this_epoch_till_this_time
O_mass_of_this_epoch += O_mass_of_SNIa
Mg_mass_of_this_epoch += Mg_mass_of_SNIa
Fe_mass_of_this_epoch += Fe_mass_of_SNIa
# Si_mass_of_this_epoch += Si_mass_of_SNIa
# S_mass_of_this_epoch += S_mass_of_SNIa
# Ne_mass_of_this_epoch += Ne_mass_of_SNIa
if SNIa_yield_table == 'Seitenzahl2013' or SNIa_yield_table=='Iwamoto1999':
Ca_mass_of_this_epoch += Ca_mass_of_SNIa
Ne_mass_of_this_epoch += Ne_mass_of_SNIa
S_mass_of_this_epoch += S_mass_of_SNIa
Si_mass_of_this_epoch += Si_mass_of_SNIa
C_mass_of_this_epoch += C_mass_of_SNIa
remnant_mass_of_this_epoch -= pre_SNIa_NS_mass * SNIa_number_from_this_epoch_till_this_time
WD_mass_of_this_epoch -= pre_SNIa_NS_mass * SNIa_number_from_this_epoch_till_this_time
SNIa_number_from_all_epoch += SNIa_number_from_this_epoch_till_this_time
SNIa_energy_release += SNIa_energy_release_per_event * SNIa_number_from_this_epoch_till_this_time
#
stellar_mass_at_this_time += stellar_mass_of_a_epoch_at_a_time_step
stellar_metal_mass_at_this_time += stellar_metal_mass_of_this_epoch
stellar_H_mass_at_this_time += stellar_H_mass_of_this_epoch
stellar_He_mass_at_this_time += stellar_He_mass_of_this_epoch
stellar_O_mass_at_this_time += stellar_O_mass_of_this_epoch
stellar_C_mass_at_this_time += stellar_C_mass_of_this_epoch
stellar_N_mass_at_this_time += stellar_N_mass_of_this_epoch
stellar_Ca_mass_at_this_time += stellar_Ca_mass_of_this_epoch
stellar_Si_mass_at_this_time += stellar_Si_mass_of_this_epoch
stellar_S_mass_at_this_time += stellar_S_mass_of_this_epoch
stellar_Ne_mass_at_this_time += stellar_Ne_mass_of_this_epoch
stellar_Mg_mass_at_this_time += stellar_Mg_mass_of_this_epoch
stellar_Fe_mass_at_this_time += stellar_Fe_mass_of_this_epoch
#
# The luminosity-weighted element mass fraction is,
# e.g., stellar_Fe_luminosity_at_this_time / stellar_luminosity_at_this_time
stellar_luminosity_at_this_time += stellar_luminosity_of_a_epoch_at_a_time_step
stellar_metal_luminosity_at_this_time += stellar_metal_luminosity_of_this_epoch
stellar_H_luminosity_at_this_time += stellar_H_luminosity_of_this_epoch
stellar_He_luminosity_at_this_time += stellar_He_luminosity_of_this_epoch
stellar_O_luminosity_at_this_time += stellar_O_luminosity_of_this_epoch
stellar_C_luminosity_at_this_time += stellar_C_luminosity_of_this_epoch
stellar_N_luminosity_at_this_time += stellar_N_luminosity_of_this_epoch
stellar_Ca_luminosity_at_this_time += stellar_Ca_luminosity_of_this_epoch
stellar_Ne_luminosity_at_this_time += stellar_Ne_luminosity_of_this_epoch
stellar_S_luminosity_at_this_time += stellar_S_luminosity_of_this_epoch
stellar_Si_luminosity_at_this_time += stellar_Si_luminosity_of_this_epoch
stellar_Mg_luminosity_at_this_time += stellar_Mg_luminosity_of_this_epoch
stellar_Fe_luminosity_at_this_time += stellar_Fe_luminosity_of_this_epoch
BH_mass_till_this_time += BH_mass_of_this_epoch
NS_mass_till_this_time += NS_mass_of_this_epoch
WD_mass_till_this_time += WD_mass_of_this_epoch
remnant_mass_at_this_time += remnant_mass_of_this_epoch
ejected_gas_mass_till_this_time += ejected_gas_mass_of_this_epoch
ejected_metal_mass_till_this_time += metal_mass_of_this_epoch
ejected_H_mass_till_this_time += H_mass_of_this_epoch
ejected_He_mass_till_this_time += He_mass_of_this_epoch
ejected_O_mass_till_this_time += O_mass_of_this_epoch
ejected_C_mass_till_this_time += C_mass_of_this_epoch
ejected_N_mass_till_this_time += N_mass_of_this_epoch
ejected_Ca_mass_till_this_time += Ca_mass_of_this_epoch
ejected_Si_mass_till_this_time += Si_mass_of_this_epoch
ejected_S_mass_till_this_time += S_mass_of_this_epoch
ejected_Ne_mass_till_this_time += Ne_mass_of_this_epoch
ejected_Mg_mass_till_this_time += Mg_mass_of_this_epoch
ejected_Fe_mass_till_this_time += Fe_mass_of_this_epoch
# Goes to the next SF epoch until all SF event before this time step is accounted:
(epoch_index) = (epoch_index + 1)
# output of this time step
total_energy_release = SNIa_energy_release + SNII_energy_release
### yeilds at this time step from all SF epoch:
ejected_gas_mass_at_this_time = ejected_gas_mass_till_this_time - ejected_gas_mass_till_last_time
ejected_metal_mass_at_this_time = ejected_metal_mass_till_this_time - ejected_metal_mass_till_last_time
ejected_H_mass_at_this_time = ejected_H_mass_till_this_time - ejected_H_mass_till_last_time
ejected_He_mass_at_this_time = ejected_He_mass_till_this_time - ejected_He_mass_till_last_time
ejected_C_mass_at_this_time = ejected_C_mass_till_this_time - ejected_C_mass_till_last_time
ejected_N_mass_at_this_time = ejected_N_mass_till_this_time - ejected_N_mass_till_last_time
ejected_O_mass_at_this_time = ejected_O_mass_till_this_time - ejected_O_mass_till_last_time
ejected_Mg_mass_at_this_time = ejected_Mg_mass_till_this_time - ejected_Mg_mass_till_last_time
ejected_Ca_mass_at_this_time = ejected_Ca_mass_till_this_time - ejected_Ca_mass_till_last_time
ejected_Ne_mass_at_this_time = ejected_Ne_mass_till_this_time - ejected_Ne_mass_till_last_time
ejected_S_mass_at_this_time = ejected_S_mass_till_this_time - ejected_S_mass_till_last_time
ejected_Si_mass_at_this_time = ejected_Si_mass_till_this_time - ejected_Si_mass_till_last_time
ejected_Fe_mass_at_this_time = ejected_Fe_mass_till_this_time - ejected_Fe_mass_till_last_time
ejected_gas_Mg_over_Fe_till_this_time = function_element_abundunce(solar_abu_table, "Mg", "Fe",
ejected_Mg_mass_till_this_time,
ejected_Fe_mass_till_this_time, False)
ejected_gas_Mg_over_Fe_at_this_time = function_element_abundunce(solar_abu_table, "Mg", "Fe",
ejected_Mg_mass_at_this_time,
ejected_Fe_mass_at_this_time, True)
M_tot_of_this_time = M_tot_up_to_this_time - M_tot_up_to_last_time # new SF mass added at this time step
#
galaxy_mass_without_gas_at_this_time = stellar_mass_at_this_time + remnant_mass_at_this_time
if galaxy_mass_without_gas_at_this_time == 0 or ejected_gas_mass_at_this_time == 0:
expansion_factor_instantaneous = 1
expansion_factor_adiabat = 1
elif galaxy_mass_without_gas_at_this_time < ejected_gas_mass_at_this_time:
Warning_galaxy_mass_ejected_gas_mass = True
# Warning: galaxy_mass < ejected_gas_mass.
# This is due to too large a timestep.
# It is easy to aviod this issue by applying the "high_time_resolution=True"
# but the simulation will take much longer time.
expansion_factor_instantaneous = 10
expansion_factor_adiabat = (
galaxy_mass_without_gas_at_this_time + ejected_gas_mass_at_this_time) / galaxy_mass_without_gas_at_this_time
else:
expansion_factor_instantaneous = galaxy_mass_without_gas_at_this_time / (
galaxy_mass_without_gas_at_this_time - ejected_gas_mass_at_this_time)
expansion_factor_adiabat = (
galaxy_mass_without_gas_at_this_time + ejected_gas_mass_at_this_time) / galaxy_mass_without_gas_at_this_time
expansion_factor = 10 ** (
(math.log(expansion_factor_instantaneous, 10) + math.log(expansion_factor_adiabat, 10)) / 2)
# calculate the gravitational binding engergy:
# gravitational_constant = 6.674
# # galaxy_mass_without_gas_at_this_time, original_gas_mass, total_gas_mass_at_this_time, ejected_gas_mass_at_this_time
# # gas_mass = max(ejected_gas_mass_at_this_time, 1)
# # galaxy mass--radii relation adopted from Dabringhausen 2008 eq.4
# Dabringhausen_2008_a = 2.95
# Dabringhausen_2008_b = 0.596
# initial_expansion_factor = 10000 # need change for every simulation. use the expansion_factor at final time
# # initial_expansion_factor = expansion_factor_list[-1]
# if expansion_factor_list == []:
# current_expansion_factor = initial_expansion_factor
# else:
# current_expansion_factor = initial_expansion_factor - expansion_factor_list[-1]
# # log_binding_energy = round(
# # math.log(3 / 5 * gravitational_constant * 1.989**2 / 3.086, 10) + 40 + (2 - Dabringhausen_2008_b) *
# # math.log(original_gas_mass, 10) - math.log(Dabringhausen_2008_a, 10) +
# # 6 * Dabringhausen_2008_b + math.log(current_expansion_factor, 10), 3)
# # # 40 = 30 (solar mass) * 2 - 11 (Gravitational constant) - 16 (pc to meter) + 7 (J to erg)
# # # binding_energy = 10 ** log_binding_energy # [erg]
### Element abundances in the gas phase (in solar unit):
# log_binding_energy = 53.7-(7-math.log(original_gas_mass, 10))*2 # 52.8 # 52.74 # #
# print('log_binding_energy', log_binding_energy)
# if total_energy_release > 0:
# print(epoch_index, "log total_energy_release =", math.log(total_energy_release, 10))
if outflow is not None and total_energy_release > 0 and math.log(total_energy_release, 10) > log_binding_energy_initial:
lockup_and_outflow_mass = M_tot_of_this_epoch * outflow # lockup gas mass in BDs is about 4% thus neglected while the uniform outflow is often assumed to be the same value as the formed stellar mass.
# print("lockup_and_outflow_mass", lockup_and_outflow_mass)
else:
lockup_and_outflow_mass = M_tot_of_this_epoch
total_gas_mass_at_this_time = total_gas_mass_at_last_time - lockup_and_outflow_mass + ejected_gas_mass_at_this_time
if total_gas_mass_at_this_time < 0.0001:
total_gas_mass_at_this_time = 0.0001
total_metal_mass_at_this_time = total_metal_mass_in_gas_at_last_time - lockup_and_outflow_mass * \
Z_gas_this_time_step + ejected_metal_mass_at_this_time
if total_metal_mass_at_this_time < 0.0001:
total_metal_mass_at_this_time = 0.0001
total_H_mass_at_this_time = total_H_mass_at_last_time - lockup_and_outflow_mass * (
total_H_mass_at_last_time / total_gas_mass_at_last_time) + ejected_H_mass_at_this_time
if total_H_mass_at_this_time < 0.0001:
total_H_mass_at_this_time = 0.0001
total_He_mass_at_this_time = total_He_mass_at_last_time - lockup_and_outflow_mass * (
total_He_mass_at_last_time / total_gas_mass_at_last_time) + ejected_He_mass_at_this_time
if total_He_mass_at_this_time < 0.0001:
total_He_mass_at_this_time = 0.0001
total_C_mass_at_this_time = total_C_mass_at_last_time - lockup_and_outflow_mass * (
total_C_mass_at_last_time / total_gas_mass_at_last_time) + ejected_C_mass_at_this_time
if total_C_mass_at_this_time < 0.0001:
total_C_mass_at_this_time = 0.0001
total_N_mass_at_this_time = total_N_mass_at_last_time - lockup_and_outflow_mass * (
total_N_mass_at_last_time / total_gas_mass_at_last_time) + ejected_N_mass_at_this_time
if total_N_mass_at_this_time < 0.0001:
total_N_mass_at_this_time = 0.0001
total_O_mass_at_this_time = total_O_mass_at_last_time - lockup_and_outflow_mass * (
total_O_mass_at_last_time / total_gas_mass_at_last_time) + ejected_O_mass_at_this_time
if total_O_mass_at_this_time < 0.0001:
total_O_mass_at_this_time = 0.0001
total_Mg_mass_at_this_time = total_Mg_mass_at_last_time - lockup_and_outflow_mass * (
total_Mg_mass_at_last_time / total_gas_mass_at_last_time) + ejected_Mg_mass_at_this_time
if total_Mg_mass_at_this_time < 0.0001:
total_Mg_mass_at_this_time = 0.0001
total_Ca_mass_at_this_time = total_Ca_mass_at_last_time - lockup_and_outflow_mass * (
total_Ca_mass_at_last_time / total_gas_mass_at_last_time) + ejected_Ca_mass_at_this_time
if total_Ca_mass_at_this_time < 0.0001:
total_Ca_mass_at_this_time = 0.0001
total_Ne_mass_at_this_time = total_Ne_mass_at_last_time - lockup_and_outflow_mass * (
total_Ne_mass_at_last_time / total_gas_mass_at_last_time) + ejected_Ne_mass_at_this_time
if total_Ne_mass_at_this_time < 0.0001:
total_Ne_mass_at_this_time = 0.0001
total_Si_mass_at_this_time = total_Si_mass_at_last_time - lockup_and_outflow_mass * (
total_Si_mass_at_last_time / total_gas_mass_at_last_time) + ejected_Si_mass_at_this_time
if total_Si_mass_at_this_time < 0.0001:
total_Si_mass_at_this_time = 0.0001
total_S_mass_at_this_time = total_S_mass_at_last_time - lockup_and_outflow_mass * (
total_S_mass_at_last_time / total_gas_mass_at_last_time) + ejected_S_mass_at_this_time
if total_S_mass_at_this_time < 0.0001:
total_S_mass_at_this_time = 0.0001
total_Fe_mass_at_this_time = total_Fe_mass_at_last_time - lockup_and_outflow_mass * (
total_Fe_mass_at_last_time / total_gas_mass_at_last_time) + ejected_Fe_mass_at_this_time
if total_Fe_mass_at_this_time < 0.0001:
total_Fe_mass_at_this_time = 0.0001
# calculate the kinetic energy of the gas if they have a uniform temperature of 2 keV:
X_for_H = total_H_mass_at_this_time / total_gas_mass_at_this_time
Y_for_He = total_He_mass_at_this_time / total_gas_mass_at_this_time
Z_for_metal = total_metal_mass_at_this_time / total_gas_mass_at_this_time
mean_molecular_weight = 1 / (2 * X_for_H + 3 / 4 * Y_for_He + Z_for_metal / 2) * \
element_weight_table.function_element_weight("H") / 6.022140857 / 1.9891
# / 10**23 / 10**33 (i.e., 10**56) mean_molecular_weight in solar mass unit.
log_mean_molecular_weight = math.log(mean_molecular_weight, 10) - 56 # log [M_sun]
log_total_number_of_molecule = math.log(total_gas_mass_at_this_time,
10) - log_mean_molecular_weight # log [Number]
# 1 [keV] = 1.60217662 * 10**(-9) [erg]
log_energy_per_molecule = math.log(2 * 1.60217662, 10) - 9 # [erg]
log_total_gas_kinetic_energy = log_total_number_of_molecule + log_energy_per_molecule # log [erg]
total_gas_kinetic_energy = 10 ** log_total_gas_kinetic_energy
# if outflow is None:
# if total_energy_release == 0:
# outflow = None
# elif math.log(total_energy_release, 10) + 51 > log_binding_energy:
# outflow = True
# elif outflow == True:
# if total_energy_release == 0:
# outflow = None
# elif math.log(total_energy_release, 10) + 51 < log_binding_energy:
# outflow = None
#
# if gas_infall == True:
# function_update_element_gas_infall()
# gas metallicity_at_this_time = total_metal_mass_at_this_time (in gas) / total_gas_mass_at_this_time
Z_over_X = math.log(total_metal_mass_at_this_time / total_H_mass_at_this_time, 10) - math.log(Z_solar / X_solar,
10)
# Fe_H_mass_ratio_at_this_time = total_Fe_mass_at_this_time / total_H_mass_at_this_time
gas_X_at_this_time = X_for_H
gas_Y_at_this_time = Y_for_He
gas_Z_at_this_time = Z_for_metal
O_over_H_number_ratio = function_element_abundunce(solar_abu_table, "O", "H",
total_O_mass_at_this_time, total_H_mass_at_this_time, False)
Mg_over_H_number_ratio = function_element_abundunce(solar_abu_table, "Mg", "H",
total_Mg_mass_at_this_time, total_H_mass_at_this_time, False)
C_over_H_number_ratio = function_element_abundunce(solar_abu_table, "C", "H",
total_C_mass_at_this_time, total_H_mass_at_this_time, False)
N_over_H_number_ratio = function_element_abundunce(solar_abu_table, "N", "H",
total_N_mass_at_this_time, total_H_mass_at_this_time, False)
Ca_over_H_number_ratio = function_element_abundunce(solar_abu_table, "Ca", "H",
total_Ca_mass_at_this_time, total_H_mass_at_this_time, False)
S_over_H_number_ratio = function_element_abundunce(solar_abu_table, "S", "H",
total_S_mass_at_this_time, total_H_mass_at_this_time, False)
Si_over_H_number_ratio = function_element_abundunce(solar_abu_table, "Si", "H",
total_Si_mass_at_this_time, total_H_mass_at_this_time, False)
Ne_over_H_number_ratio = function_element_abundunce(solar_abu_table, "Ne", "H",
total_Ne_mass_at_this_time, total_H_mass_at_this_time, False)
Fe_over_H_number_ratio = function_element_abundunce(solar_abu_table, "Fe", "H",
total_Fe_mass_at_this_time, total_H_mass_at_this_time, False)
C_over_Fe_number_ratio = function_element_abundunce(solar_abu_table, "C", "Fe",
total_C_mass_at_this_time, total_Fe_mass_at_this_time, False)
N_over_O_number_ratio = function_element_abundunce(solar_abu_table, "N", "O",
total_N_mass_at_this_time, total_Fe_mass_at_this_time, False)
O_over_Fe_number_ratio = function_element_abundunce(solar_abu_table, "O", "Fe",
total_O_mass_at_this_time, total_Fe_mass_at_this_time, False)
Mg_over_Fe_number_ratio = function_element_abundunce(solar_abu_table, "Mg", "Fe",
total_Mg_mass_at_this_time, total_Fe_mass_at_this_time, False)
Ca_over_Fe_number_ratio = function_element_abundunce(solar_abu_table, "Ca", "Fe",
total_Ca_mass_at_this_time, total_Fe_mass_at_this_time, False)
Ne_over_Fe_number_ratio = function_element_abundunce(solar_abu_table, "Ne", "Fe",
total_Ne_mass_at_this_time, total_Fe_mass_at_this_time, False)
S_over_Fe_number_ratio = function_element_abundunce(solar_abu_table, "S", "Fe",
total_S_mass_at_this_time, total_Fe_mass_at_this_time, False)
Si_over_Fe_number_ratio = function_element_abundunce(solar_abu_table, "Si", "Fe",
total_Si_mass_at_this_time, total_Fe_mass_at_this_time, False)
### Element abundances in of stars (consider only the metal of stellar surface, i.e., neglect stellar evolution
# This raises errors from very low mass stars which are fully convective but may not be observationally important):
##### mass weighted abundances
# (total metal in stars / total H in stars):
if stellar_mass_at_this_time > 0:
mass_weighted_stellar_X = stellar_H_mass_at_this_time / stellar_mass_at_this_time
else:
mass_weighted_stellar_X = primary_H_mass_fraction
if stellar_mass_at_this_time > 0:
mass_weighted_stellar_Y = stellar_He_mass_at_this_time / stellar_mass_at_this_time
else:
mass_weighted_stellar_Y = primary_He_mass_fraction
if stellar_mass_at_this_time > 0:
mass_weighted_stellar_Z = stellar_metal_mass_at_this_time / stellar_mass_at_this_time
else:
mass_weighted_stellar_Z = 1-primary_H_mass_fraction-primary_He_mass_fraction
mass_weighted_stellar_O_over_H = function_element_abundunce(solar_abu_table, "O", "H",
stellar_O_mass_at_this_time,
stellar_H_mass_at_this_time, False)
mass_weighted_stellar_Mg_over_H = function_element_abundunce(solar_abu_table, "Mg", "H",
stellar_Mg_mass_at_this_time,
stellar_H_mass_at_this_time, False)
mass_weighted_stellar_C_over_H = function_element_abundunce(solar_abu_table, "C", "H",
stellar_C_mass_at_this_time,
stellar_H_mass_at_this_time, False)
mass_weighted_stellar_N_over_H = function_element_abundunce(solar_abu_table, "N", "H",
stellar_N_mass_at_this_time,
stellar_H_mass_at_this_time, False)
mass_weighted_stellar_Ca_over_H = function_element_abundunce(solar_abu_table, "Ca", "H",
stellar_Ca_mass_at_this_time,
stellar_H_mass_at_this_time, False)
mass_weighted_stellar_Si_over_H = function_element_abundunce(solar_abu_table, "Si", "H",
stellar_Si_mass_at_this_time,
stellar_H_mass_at_this_time, False)
mass_weighted_stellar_S_over_H = function_element_abundunce(solar_abu_table, "S", "H",
stellar_S_mass_at_this_time,
stellar_H_mass_at_this_time, False)
mass_weighted_stellar_Ne_over_H = function_element_abundunce(solar_abu_table, "Ne", "H",
stellar_Ne_mass_at_this_time,
stellar_H_mass_at_this_time, False)
mass_weighted_stellar_Fe_over_H = function_element_abundunce(solar_abu_table, "Fe", "H",
stellar_Fe_mass_at_this_time,
stellar_H_mass_at_this_time, False)
mass_weighted_stellar_C_over_Fe = function_element_abundunce(solar_abu_table, "C", "Fe",
stellar_C_mass_at_this_time,
stellar_Fe_mass_at_this_time, False)
mass_weighted_stellar_N_over_O = function_element_abundunce(solar_abu_table, "N", "O",
stellar_N_mass_at_this_time,
stellar_Fe_mass_at_this_time, False)
mass_weighted_stellar_O_over_Fe = function_element_abundunce(solar_abu_table, "O", "Fe",
stellar_O_mass_at_this_time,
stellar_Fe_mass_at_this_time, False)
mass_weighted_stellar_Mg_over_Fe = function_element_abundunce(solar_abu_table, "Mg", "Fe",
stellar_Mg_mass_at_this_time,
stellar_Fe_mass_at_this_time, False)
mass_weighted_stellar_Ca_over_Fe = function_element_abundunce(solar_abu_table, "Ca", "Fe",
stellar_Ca_mass_at_this_time,
stellar_Fe_mass_at_this_time, False)
mass_weighted_stellar_Ne_over_Fe = function_element_abundunce(solar_abu_table, "Ne", "Fe",
stellar_Ne_mass_at_this_time,
stellar_Fe_mass_at_this_time, False)
mass_weighted_stellar_S_over_Fe = function_element_abundunce(solar_abu_table, "S", "Fe",
stellar_S_mass_at_this_time,
stellar_Fe_mass_at_this_time, False)
mass_weighted_stellar_Si_over_Fe = function_element_abundunce(solar_abu_table, "Si", "Fe",
stellar_Si_mass_at_this_time,
stellar_Fe_mass_at_this_time, False)
##### luminosity weighted abundances
# (total metal in stars / total H in stars):
if stellar_luminosity_at_this_time > 0:
luminosity_weighted_stellar_X = stellar_H_luminosity_at_this_time / stellar_luminosity_at_this_time
else:
luminosity_weighted_stellar_X = primary_H_mass_fraction
if stellar_luminosity_at_this_time > 0:
luminosity_weighted_stellar_Y = stellar_He_luminosity_at_this_time / stellar_luminosity_at_this_time
else:
luminosity_weighted_stellar_Y = primary_He_mass_fraction
if stellar_luminosity_at_this_time > 0:
luminosity_weighted_stellar_Z = stellar_metal_luminosity_at_this_time / stellar_luminosity_at_this_time
else:
luminosity_weighted_stellar_Z = 1-primary_H_mass_fraction-primary_He_mass_fraction
# below the input shall be the luminosity-weighted element mass,
# e.g., stellar_O_luminosity_at_this_time / stellar_luminosity_at_this_time * total-stellar-mass-at-this-time,
# but since stellar_luminosity_at_this_time and total-stellar-mass-at-this-time are the same for both element,
# the constants cancel in function_element_abundunce.
luminosity_weighted_stellar_O_over_H = function_element_abundunce(solar_abu_table, "O", "H",
stellar_O_luminosity_at_this_time,
stellar_H_luminosity_at_this_time, False)
luminosity_weighted_stellar_Mg_over_H = function_element_abundunce(solar_abu_table, "Mg", "H",
stellar_Mg_luminosity_at_this_time,
stellar_H_luminosity_at_this_time, False)
luminosity_weighted_stellar_C_over_H = function_element_abundunce(solar_abu_table, "C", "H",
stellar_C_luminosity_at_this_time,
stellar_H_luminosity_at_this_time, False)
luminosity_weighted_stellar_N_over_H = function_element_abundunce(solar_abu_table, "N", "H",
stellar_N_luminosity_at_this_time,
stellar_H_luminosity_at_this_time, False)
luminosity_weighted_stellar_Ca_over_H = function_element_abundunce(solar_abu_table, "Ca", "H",
stellar_Ca_luminosity_at_this_time,
stellar_H_luminosity_at_this_time, False)
luminosity_weighted_stellar_Si_over_H = function_element_abundunce(solar_abu_table, "Si", "H",
stellar_Si_luminosity_at_this_time,
stellar_H_luminosity_at_this_time, False)
luminosity_weighted_stellar_S_over_H = function_element_abundunce(solar_abu_table, "S", "H",
stellar_S_luminosity_at_this_time,
stellar_H_luminosity_at_this_time, False)
luminosity_weighted_stellar_Ne_over_H = function_element_abundunce(solar_abu_table, "Ne", "H",
stellar_Ne_luminosity_at_this_time,
stellar_H_luminosity_at_this_time, False)
luminosity_weighted_stellar_Fe_over_H = function_element_abundunce(solar_abu_table, "Fe", "H",
stellar_Fe_luminosity_at_this_time,
stellar_H_luminosity_at_this_time, False)
luminosity_weighted_stellar_C_over_Fe = function_element_abundunce(solar_abu_table, "C", "Fe",
stellar_C_luminosity_at_this_time,
stellar_Fe_luminosity_at_this_time, False)
luminosity_weighted_stellar_N_over_O = function_element_abundunce(solar_abu_table, "N", "O",
stellar_N_luminosity_at_this_time,
stellar_Fe_luminosity_at_this_time, False)
luminosity_weighted_stellar_O_over_Fe = function_element_abundunce(solar_abu_table, "O", "Fe",
stellar_O_luminosity_at_this_time,
stellar_Fe_luminosity_at_this_time, False)
luminosity_weighted_stellar_Mg_over_Fe = function_element_abundunce(solar_abu_table, "Mg", "Fe",
stellar_Mg_luminosity_at_this_time,
stellar_Fe_luminosity_at_this_time, False)
luminosity_weighted_stellar_Ca_over_Fe = function_element_abundunce(solar_abu_table, "Ca", "Fe",
stellar_Ca_luminosity_at_this_time,
stellar_Fe_luminosity_at_this_time, False)
luminosity_weighted_stellar_Ne_over_Fe = function_element_abundunce(solar_abu_table, "Ne", "Fe",
stellar_Ne_luminosity_at_this_time,
stellar_Fe_luminosity_at_this_time, False)
luminosity_weighted_stellar_S_over_Fe = function_element_abundunce(solar_abu_table, "S", "Fe",
stellar_S_luminosity_at_this_time,
stellar_Fe_luminosity_at_this_time, False)
luminosity_weighted_stellar_Si_over_Fe = function_element_abundunce(solar_abu_table, "Si", "Fe",
stellar_Si_luminosity_at_this_time,
stellar_Fe_luminosity_at_this_time, False)
if stellar_H_mass_at_this_time == 0:
mass_weighted_stellar_Z_over_X = math.log(Z_0 / Z_solar, 10) # approximated with [Z]
luminosity_weighted_stellar_Z_over_X = mass_weighted_stellar_Z_over_X
else:
mass_weighted_stellar_Z_over_X = math.log(stellar_metal_mass_at_this_time / stellar_H_mass_at_this_time, 10) \
- math.log(Z_solar / X_solar, 10)
luminosity_weighted_stellar_Z_over_X = math.log(
stellar_metal_luminosity_at_this_time / stellar_H_luminosity_at_this_time, 10) \
- math.log(Z_solar / X_solar, 10)
# the so called [Z/H] as determined by observation with equation: [Z/H] = [Fe/H] + A[Mg/Fe] where A=0.94 (Thomas 2003)
if mass_weighted_stellar_Fe_over_H is None or mass_weighted_stellar_Mg_over_Fe is None:
mass_weighted_stellar_Z_over_H = None
else:
mass_weighted_stellar_Z_over_H = mass_weighted_stellar_Fe_over_H + 0.94 * mass_weighted_stellar_Mg_over_Fe
luminosity_weighted_stellar_Z_over_H = luminosity_weighted_stellar_Fe_over_H + 0.94 * luminosity_weighted_stellar_Mg_over_Fe
##########################################
##### luminosity weighted abundances #####
##########################################
if BH_mass_till_this_time == 0:
BH_mass_list += [10 ** (-10)]
else:
BH_mass_list += [BH_mass_till_this_time]
if NS_mass_till_this_time == 0:
NS_mass_list += [10 ** (-10)]
else:
NS_mass_list += [NS_mass_till_this_time]
if WD_mass_till_this_time == 0:
WD_mass_list += [10 ** (-10)]
elif WD_mass_till_this_time < 0:
Warning_WD_mass_till_this_time = True
# Warning: more SNIa formed than WD avaliable. Please modify the SNIa rate assumption
WD_mass_list += [10 ** (-10)]
else:
WD_mass_list += [WD_mass_till_this_time]
if Z_over_X == 0:
print("Warning: Z_over_X == 0")
gas_Z_over_X_list += [math.log(Z_0 / Z_solar, 10)] # approximated with [Z]
else:
gas_Z_over_X_list += [Z_over_X]
ejected_O_mass_till_this_time_tot_list += [ejected_O_mass_till_this_time]
ejected_O_mass_till_this_time_SNII_list += [O_production_SNII]
ejected_O_mass_till_this_time_SNIa_list += [ejected_O_mass_till_this_time - O_production_SNII]
ejected_Mg_mass_till_this_time_tot_list += [ejected_Mg_mass_till_this_time]
ejected_Mg_mass_till_this_time_SNII_list += [Mg_production_SNII]
ejected_Mg_mass_till_this_time_SNIa_list += [ejected_Mg_mass_till_this_time - Mg_production_SNII]
ejected_Fe_mass_till_this_time_tot_list += [ejected_Fe_mass_till_this_time]
ejected_Fe_mass_till_this_time_SNII_list += [Fe_production_SNII]
ejected_Fe_mass_till_this_time_SNIa_list += [ejected_Fe_mass_till_this_time - Fe_production_SNII]
ejected_Ca_mass_till_this_time_tot_list += [ejected_Ca_mass_till_this_time]
ejected_Ca_mass_till_this_time_SNII_list += [Ca_production_SNII]
ejected_Ca_mass_till_this_time_SNIa_list += [ejected_Ca_mass_till_this_time - Ca_production_SNII]
ejected_S_mass_till_this_time_tot_list += [ejected_S_mass_till_this_time]
ejected_S_mass_till_this_time_SNII_list += [S_production_SNII]
ejected_S_mass_till_this_time_SNIa_list += [ejected_S_mass_till_this_time - S_production_SNII]
ejected_Si_mass_till_this_time_tot_list += [ejected_Si_mass_till_this_time]
ejected_Si_mass_till_this_time_SNII_list += [Si_production_SNII]
ejected_Si_mass_till_this_time_SNIa_list += [ejected_Si_mass_till_this_time - Si_production_SNII]
ejected_Ne_mass_till_this_time_tot_list += [ejected_Ne_mass_till_this_time]
ejected_Ne_mass_till_this_time_SNII_list += [Ne_production_SNII]
ejected_Ne_mass_till_this_time_SNIa_list += [ejected_Ne_mass_till_this_time - Ne_production_SNII]
X_list += [gas_X_at_this_time]
Y_list += [gas_Y_at_this_time]
Z_list += [gas_Z_at_this_time]
O_over_H_list += [O_over_H_number_ratio]
Mg_over_H_list += [Mg_over_H_number_ratio]
C_over_H_list += [C_over_H_number_ratio]
N_over_H_list += [N_over_H_number_ratio]
Ca_over_H_list += [Ca_over_H_number_ratio]
Si_over_H_list += [Si_over_H_number_ratio]
S_over_H_list += [S_over_H_number_ratio]
Ne_over_H_list += [Ne_over_H_number_ratio]
Fe_over_H_list += [Fe_over_H_number_ratio]
C_over_Fe_list += [C_over_Fe_number_ratio]
N_over_O_list += [N_over_O_number_ratio]
O_over_Fe_list += [O_over_Fe_number_ratio]
Mg_over_Fe_list += [Mg_over_Fe_number_ratio]
Ca_over_Fe_list += [Ca_over_Fe_number_ratio]
Ne_over_Fe_list += [Ne_over_Fe_number_ratio]
S_over_Fe_list += [S_over_Fe_number_ratio]
Si_over_Fe_list += [Si_over_Fe_number_ratio]
stellar_O_over_H_list += [mass_weighted_stellar_O_over_H]
stellar_Mg_over_H_list += [mass_weighted_stellar_Mg_over_H]
stellar_C_over_H_list += [mass_weighted_stellar_C_over_H]
stellar_N_over_H_list += [mass_weighted_stellar_N_over_H]
stellar_Ca_over_H_list += [mass_weighted_stellar_Ca_over_H]
stellar_Si_over_H_list += [mass_weighted_stellar_Si_over_H]
stellar_S_over_H_list += [mass_weighted_stellar_S_over_H]
stellar_Ne_over_H_list += [mass_weighted_stellar_Ne_over_H]
stellar_X_list += [mass_weighted_stellar_X]
stellar_Y_list += [mass_weighted_stellar_Y]
stellar_Z_list += [mass_weighted_stellar_Z]
stellar_Fe_over_H_list += [mass_weighted_stellar_Fe_over_H]
stellar_C_over_Fe_list += [mass_weighted_stellar_C_over_Fe]
stellar_N_over_O_list += [mass_weighted_stellar_N_over_O]
stellar_O_over_Fe_list += [mass_weighted_stellar_O_over_Fe]
stellar_Mg_over_Fe_list += [mass_weighted_stellar_Mg_over_Fe]
stellar_Ca_over_Fe_list += [mass_weighted_stellar_Ca_over_Fe]
stellar_Ne_over_Fe_list += [mass_weighted_stellar_Ne_over_Fe]
stellar_S_over_Fe_list += [mass_weighted_stellar_S_over_Fe]
stellar_Si_over_Fe_list += [mass_weighted_stellar_Si_over_Fe]
stellar_Z_over_X_list += [mass_weighted_stellar_Z_over_X]
stellar_Z_over_H_list += [mass_weighted_stellar_Z_over_H]
stellar_O_over_H_list_luminosity_weighted += [luminosity_weighted_stellar_O_over_H]
stellar_Mg_over_H_list_luminosity_weighted += [luminosity_weighted_stellar_Mg_over_H]
stellar_C_over_H_list_luminosity_weighted += [luminosity_weighted_stellar_C_over_H]
stellar_N_over_H_list_luminosity_weighted += [luminosity_weighted_stellar_N_over_H]
stellar_Ca_over_H_list_luminosity_weighted += [luminosity_weighted_stellar_Ca_over_H]
stellar_Si_over_H_list_luminosity_weighted += [luminosity_weighted_stellar_Si_over_H]
stellar_S_over_H_list_luminosity_weighted += [luminosity_weighted_stellar_S_over_H]
stellar_Ne_over_H_list_luminosity_weighted += [luminosity_weighted_stellar_Ne_over_H]
stellar_X_list_luminosity_weighted += [luminosity_weighted_stellar_X]
stellar_Y_list_luminosity_weighted += [luminosity_weighted_stellar_Y]
stellar_Z_list_luminosity_weighted += [luminosity_weighted_stellar_Z]
stellar_Fe_over_H_list_luminosity_weighted += [luminosity_weighted_stellar_Fe_over_H]
stellar_C_over_Fe_list_luminosity_weighted += [luminosity_weighted_stellar_C_over_Fe]
stellar_N_over_O_list_luminosity_weighted += [luminosity_weighted_stellar_N_over_O]
stellar_O_over_Fe_list_luminosity_weighted += [luminosity_weighted_stellar_O_over_Fe]
stellar_Mg_over_Fe_list_luminosity_weighted += [luminosity_weighted_stellar_Mg_over_Fe]
stellar_Ca_over_Fe_list_luminosity_weighted += [luminosity_weighted_stellar_Ca_over_Fe]
stellar_Ne_over_Fe_list_luminosity_weighted += [luminosity_weighted_stellar_Ne_over_Fe]
stellar_S_over_Fe_list_luminosity_weighted += [luminosity_weighted_stellar_S_over_Fe]
stellar_Si_over_Fe_list_luminosity_weighted += [luminosity_weighted_stellar_Si_over_Fe]
stellar_Z_over_X_list_luminosity_weighted += [luminosity_weighted_stellar_Z_over_X]
stellar_Z_over_H_list_luminosity_weighted += [luminosity_weighted_stellar_Z_over_H]
if remnant_mass_at_this_time == 0:
remnant_mass_list += [10 ** (-10)]
else:
remnant_mass_list += [remnant_mass_at_this_time]
if total_gas_mass_at_this_time == 0:
total_gas_mass_list += [10 ** (-10)]
else:
total_gas_mass_list += [total_gas_mass_at_this_time]
if ejected_gas_mass_till_this_time == 0 or ejected_gas_mass_till_this_time < 0:
ejected_gas_mass_list += [10 ** (-10)]
else:
ejected_gas_mass_list += [ejected_gas_mass_till_this_time]
ejected_gas_Mg_over_Fe_list += [ejected_gas_Mg_over_Fe_till_this_time]
instant_ejected_gas_Mg_over_Fe_list += [ejected_gas_Mg_over_Fe_at_this_time]
if M_tot_up_to_this_time > 0:
ejected_metal_mass_list += [ejected_metal_mass_till_this_time / M_tot_up_to_this_time]
else:
ejected_metal_mass_list += [0]
if expansion_factor_instantaneous_list == []:
expansion_factor_instantaneous_list += [1]
else:
expansion_factor_instantaneous_list += [
expansion_factor_instantaneous * expansion_factor_instantaneous_list[-1]]
if expansion_factor_adiabat_list == []:
expansion_factor_adiabat_list += [1]
else:
expansion_factor_adiabat_list += [expansion_factor_adiabat * expansion_factor_adiabat_list[-1]]
if expansion_factor_list == []:
expansion_factor_list += [1]
else:
expansion_factor_list += [expansion_factor * expansion_factor_list[-1]]
if stellar_mass_at_this_time == 0:
stellar_mass_list += [10 ** (-10)]
else:
stellar_mass_list += [stellar_mass_at_this_time]
SNIa_energy_release_list += [SNIa_energy_release]
SNIa_number_list += [SNIa_number_from_all_epoch]
if len(SNIa_number_per_century) == 0:
SNIa_number_per_century += [SNIa_number_list[0]]
else:
SNIa_number_per_century += [
(SNIa_number_list[-1] - SNIa_number_list[-2]) / (time_axis[time_step] - time_axis[time_step - 1]) * 100]
SNII_energy_release_list += [SNII_energy_release]
SNII_number_list += [SNII_number]
if len(SNII_number_per_century) == 0:
SNII_number_per_century += [SNII_number_list[0]]
else:
SNII_number_per_century += [
(SNII_number_list[-1] - SNII_number_list[-2]) / (time_axis[time_step] - time_axis[time_step - 1]) * 100]
if total_energy_release == 0:
total_energy_release_list += [0]
else:
total_energy_release_list += [total_energy_release] # [math.log((total_energy_release), 10)]
# if binding_energy == 0:
# binding_energy_list += [0]
# else:
# binding_energy_list += [binding_energy]#[math.log((binding_energy), 10)]
if total_gas_kinetic_energy_list == 0:
total_gas_kinetic_energy_list += [0]
else:
total_gas_kinetic_energy_list += [total_gas_kinetic_energy] # [math.log((total_gas_kinetic_energy), 10)]
SN_number_per_century += [SNIa_number_per_century[-1] + SNII_number_per_century[-1]]
# go to next time step
if time_step / 50 > gc_collect_check:
gc_collect_check += 1
print("gc_collect:", gc_collect_check)
gc.collect()
(time_step) = (time_step + 1)
######################
### Show Warnings ###
######################
if Warning_ejected_gas_mass_of_this_epoch == True:
print('Warning: ejected_gas_mass_of_this_epoch < 0. See comments in galevo.py')
if Warning_WD_mass_till_this_time == True:
print("Warning: WD_mass_till_this_time < 0. See comments in galevo.py")
if Warning_galaxy_mass_ejected_gas_mass == True:
print('Warning: galaxy_mass < ejected_gas_mass. See comments in galevo.py.')
# Warning: galaxy_mass < ejected_gas_mass.
# This is due to too large a timestep.
# It is easy to aviod this issue by applying the "high_time_resolution=True"
# but the simulation will take much longer time.
computation_time_seconds = round((time.time() - start_time), 2)
minutes, seconds = divmod(computation_time_seconds, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
days = int(days)
hours = int(hours)
minutes = int(minutes)
seconds = round(seconds, 4)
print("- Simulation complete. Computation time: {} d {} h {} m {} s -".format(days, hours, minutes, seconds))
###################
### output data ###
###################
# Remnant_Star_ratio = [0]*len(stellar_mass_list)
# for i in range(len(remnant_mass_list)):
# Remnant_Star_ratio[i] = remnant_mass_list[i]/stellar_mass_list[i]
# import csv
# with open('GalEvo_time.txt', 'w') as f:
# writer = csv.writer(f, delimiter=' ')
# f.write("# galevo.py output file.\n# time\n")
# writer.writerows(
# zip(time_axis))
# with open('GalEvo_ratio.txt', 'w') as f:
# writer = csv.writer(f, delimiter=' ')
# f.write("# galevo.py output file.\n# Remnant_Star_ratio\n")
# writer.writerows(
# zip(Remnant_Star_ratio))
###################
### output ###
###################
log_Z_0 = round(math.log(Z_0 / Z_solar, 10), 2)
text_output(imf, STF, round(math.log(max(SFH_input), 10), 1), SFEN, original_gas_mass, log_Z_0)
# if output plot applies
plot_output(plot_show, plot_save, imf, igimf, round(math.log(max(SFH_input), 10), 1), SFEN, log_Z_0, STF)
###################
### end ###
###################
return
# def function_update_element_gas_infall():
# return
# # # calculate the diet_Salpeter_mass_to_number_ratio:
# # Bell & de Jong (2001). Salpeter IMF x = 1.35 with a flat x = 0 slope below 0.35
# def function_xi_diet_Salpeter_IMF(mass):
# # integrate this function's output xi result in the number of stars in mass limits.
# xi = diet_Salpeter_IMF.custom_imf(mass, 0)
# return xi
# def function_mass_diet_Salpeter_IMF(mass):
# # integrate this function's output m result in the total stellar mass for stars in mass limits.
# m = mass * diet_Salpeter_IMF.custom_imf(mass, 0)
# return m
# integrate_all_for_function_mass_SNIa = quad(function_mass_diet_Salpeter_IMF, 0.1, 100, limit=50)[0]
# integrate_28_for_function_number_SNIa = quad(function_xi_diet_Salpeter_IMF, 3, 8, limit=50)[0]
# diet_Salpeter_mass_to_number_ratio = integrate_all_for_function_mass_SNIa / integrate_28_for_function_number_SNIa
def function_xi_Kroupa_IMF(mass): # there is no time dependence for Kroupa IMF
if mass < 0.08:
return 0
elif mass < 0.5:
return 2*mass**(-1.3)
elif mass < 150:
return mass**(-2.3)
else:
return 0
def function_mass_Kroupa_IMF(mass):
# integrate this function's output m result in the total stellar mass for stars in mass limits.
m = mass * function_xi_Kroupa_IMF(mass)
return m
integrate_all_for_function_mass_SNIa = quad(function_mass_Kroupa_IMF, 0.08, 150, limit=50)[0]
integrate_total_number_SNIa = quad(function_xi_Kroupa_IMF, 0.08, 150, limit=50)[0]
integrate_28_for_function_number_SNIa = quad(function_xi_Kroupa_IMF, 3, 8, limit=50)[0]
SNIa_number_prob_Kroupa = integrate_28_for_function_number_SNIa ** 2 / integrate_total_number_SNIa /integrate_all_for_function_mass_SNIa
def function_number_SNIa_power_law(last_delay_time, this_delay_time, SNIa_number_prob__, S_F_R_of_this_epoch):
# This function calculate the number of SNIa between last_delay_time and this_delay_time
# It is commonly assumed that the maximum stellar mass able to produce a degenerate C–O white dwarf is 8 M⊙,
# The minimum possible binary mass is assumed to be 3 M⊙ in order to ensure that the
# smallest possible white dwarf can accrete enough mass from the secondary star to reach the Chandrasekhar mass.
# see Greggio, L., & Renzini, A. 1983, A & A, 118, 217
# Thus we should normalize the DTD according to the number (but currently, mass) of stars between 1.5 and 8 solar mass
# normalized with a SNIa assuming fixed diet-Salpeter IMF (Bell et al. 149:289–312, 2003)
# See Dan Maoz and Filippo Mannucci 2012 review
global diet_Salpeter_mass_to_number_ratio
SNIa_normalization_parameter = funtion_SNIa_DTD_normalization_parameter(S_F_R_of_this_epoch)
# SNIa_normalization_parameter considers the possible variation of binary encounter rate in different system density
# integrate SNIa number from last_delay_time to this_delay_time using observationally determined DTD assuming diet-Salpeter IMF
SNIa_number_per_solar_mass = quad(function_SNIa_DTD, last_delay_time, this_delay_time, limit=40)[0]
# calculate actual SNIa event number
# SNIa_number = stellar_number_in_SNIa_boundary * SNIa_normalization_parameter * SNIa_number_per_solar_mass * diet_Salpeter_mass_to_number_ratio
SNIa_number = S_F_R_of_this_epoch * 10**7 * SNIa_number_per_solar_mass / SNIa_number_prob_Kroupa * SNIa_number_prob__ * SNIa_normalization_parameter
return SNIa_number
def function_number_SNIa_SD(mass_boundary, igimf_xi_function, mass_calibration_factor):
# this function calculate the total number of SNIa events orignicated from a single 10 Myr star formation epoch
# (i.e. a burst) since the birth of this stellar population till the current time step
# The age of the population corresponds to a stellar mass (mass_boundary) with a lifetime equal to this age
if mass_boundary > 8:
SNIa_number = 0
else:
M2min = max(mass_boundary, 0.8)
M2max = 8
A_SNIa_Matteucci01 = 0.006 * 14 / 0.0024077124353644908 * 0.002
SNIa_number = mass_calibration_factor * A_SNIa_Matteucci01 * quad(function_SNIa_SD_xi_2, M2min, M2max, args=(igimf_xi_function))[0]
# print(SNIa_number)
return SNIa_number
def function_SNIa_SD_xi_2(M_2, igimf_xi_function):
# M_B_min = 3.9
# M_1_min = 3
# M_B_inf = max((2*M_2), M_B_min, M_1_min+M_2)
# M_up = min((8 + M_2), 9.5)
M_B_min = 3
M_B_inf = max((2*M_2), M_B_min)
M_up = 8 + M_2
xi_2 = quad(function_SD_f_times_xi, M_B_inf, M_up, args=(M_2, igimf_xi_function))[0]
return xi_2
def function_SD_f_times_xi(M_B, M_2, igimf_xi_function):
gamma_f = 2
mu = M_2 / M_B
if mu > 0.5 or mu == 0 or mu < 0:
f_times_xi = 0 # since f = 0
else:
# f_times_xi = (mu**gamma_f) * igimf_xi_function(M_B)
f_times_xi = (2**(1+gamma_f) * (1+gamma_f) * mu**gamma_f) * igimf_xi_function(M_B) / M_B
return f_times_xi
def funtion_SNIa_DTD_normalization_parameter(SFR):
# this modification on the SNIa rate is to honor the fact that the number of SNIa should
# not only depends on the number of potential progenitor but also the density of the stellar system
# as is expected by the dynamical encounter rate.
# x = 0.95
# xplusSFR = SFR / 16 + x
# gamma_DTD = 0.8 / (0.5 + math.log(xplusSFR, 10))
# output = xplusSFR ** gamma_DTD
output = 1
# This is a toy model relation. Roughly keep the SNIa-rate/stellar-mass-formed unchanged for different SFRs (IMFs).
# When SFR is high, top-heavy IMF reduce the number possible SNIa progenitor within mass range 1.5-8 solar mass.
# the dense stellar region pump the SNIa rate as the stars have a larger chance to meet with another star.
# The lower SFR do not further reduce the SNIa number as the low SFR happens after the star burst epoch
# thus the newly formed star can still meet with stars formed at ealier epochs regredless of its current SFR.
return output
####### the following code is for test, showing the renormalization function of SNIa# #######
# xxx = [-4, -3, -2, -1, 0, 1, 2, 3, 4]
# y0 = funtion_SNIa_DTD_normalization_parameter(0.0001)
# yyy = [1, funtion_SNIa_DTD_normalization_parameter(0.001)/y0,
# funtion_SNIa_DTD_normalization_parameter(0.01)/y0, funtion_SNIa_DTD_normalization_parameter(0.1)/y0,
# funtion_SNIa_DTD_normalization_parameter(1)/y0, funtion_SNIa_DTD_normalization_parameter(10)/y0,
# funtion_SNIa_DTD_normalization_parameter(100)/y0, funtion_SNIa_DTD_normalization_parameter(1000)/y0,
# funtion_SNIa_DTD_normalization_parameter(10000)/y0]
#
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(200, figsize=(3, 2.5))
# fig.add_subplot(1, 1, 1)
# plt.plot(xxx, yyy)
# plt.xlabel(r'log$_{10}$(SFR [$M_\odot$/yr])')
# plt.ylabel(r'SNIa Number renormalization')
# plt.legend(prop={'size': 7})
# plt.tight_layout()
# plt.show()
def function_SNIa_DTD(delay_time):
# The delay time distribution (DTD) in the unit of per year per total stellar mass [solar]
# DTD for SNIa is adopted from Maoz & Mannucci 2012, 29, 447–465, their equation 13
# with a consistent assumed IMF – the Bell et al. 2003 diet-Salpeter IMF
# gaptime = 6 * 10 ** 7
gaptime = 4 * 10 ** 7
powerindex = -1
if delay_time < gaptime: # [yr] # 2.3 * 10 ** 7 for a burst of star formation from Greggio 1983
number = 0
else:
number = 4 * 10 ** (-4) * delay_time ** (powerindex) / (gaptime)**(powerindex) * (gaptime)**(-1) # DTD of Maoz2012
# Normalized such that the DTD integral over 10Gyr for diet-Salpeter is N_SN/M_sun = 2 * 10^-3 (M_sun^-1)
# number = 0.16288551211 * 10 ** (-4) / 10**(delay_time/(5*10**9)) / 10**(gaptime/(5*10**9)) * (gaptime)**(-1) # DTD of Lacchin19
# Normalized such that the DTD integral over 10Gyr for diet-Salpeter is N_SN/M_sun = 2 * 10^-3 (M_sun^-1)
# number = 0.50986652089 * 10 ** (-4) / 10**(delay_time/(5*10**9)) / 10**(gaptime/(5*10**9)) * (gaptime)**(-1) # DTD of Lacchin19
# Normalized such that the SNIa rate at 10 Gyr is the same as the Maoz2012's DTD.
# Normalized such that the DTD integral over 10Gyr for diet-Salpeter is N_SN/M_sun = 2 * 10^-3 (M_sun^-1)
# This value changes with igimf where top-heavy and bottom-heavy IGIMF will have lower number of SNIa
# as the number of stars within the mass range 3.0001 to 8 solar mass is smaller.
# The observational uncertainty being +-50%. See Maoz & Mannucci 2012 their Table 1
return number
def function_read_lifetime(str_yield_table, Z_select_in_table):
#### if apply instantaneous recycling approximation ####
global instantaneous_recycling
if instantaneous_recycling == True:
mass_1 = 0
mass = [0.08, 1, 1.00001, 150]
lifetime_table = [1e12, 1e12, 0.1, 0.1]
elif Z_select_in_table[0] == 'out':
file_lifetime = open(
'yield_tables/rearranged/setllar_lifetime_from_portinari98/portinari98_Z={}.txt'.format(Z_select_in_table[1]),
'r')
data = file_lifetime.readlines()
metallicity = data[1]
mass_1 = data[3]
lifetime_ = data[5]
file_lifetime.close()
mass = [float(x) for x in mass_1.split()]
lifetime_table = [float(x) for x in lifetime_.split()]
else:
file_lifetime_low = open(
'yield_tables/rearranged/setllar_lifetime_from_portinari98/portinari98_Z={}.txt'.format(
Z_select_in_table[1]),
'r')
data_low = file_lifetime_low.readlines()
metallicity = data_low[1]
mass_1 = data_low[3]
lifetime_low = data_low[5]
file_lifetime_low.close()
file_lifetime_high = open(
'yield_tables/rearranged/setllar_lifetime_from_portinari98/portinari98_Z={}.txt'.format(
Z_select_in_table[3]),
'r')
data_high = file_lifetime_high.readlines()
lifetime_high = data_high[5]
file_lifetime_high.close()
mass = [float(x) for x in mass_1.split()]
lifetime_table_low = [float(x) for x in lifetime_low.split()]
lifetime_table_high = [float(x) for x in lifetime_high.split()]
x1 = Z_select_in_table[1]
x2 = Z_select_in_table[2]
x3 = Z_select_in_table[3]
lifetime_table = [y1+(y3-y1)*(x2-x1)/(x3-x1) for y1, y3 in zip(lifetime_table_low, lifetime_table_high)]
return (mass_1, mass, lifetime_table)
def function_read_Mfinal(str_yield_table, Z_select_in_table):
if Z_select_in_table[0] == 'out':
file_final_mass = open(
"yield_tables/rearranged___/setllar_final_mass_from_portinari98/portinari98_Z={}.txt".format(
Z_select_in_table[1]),
'r')
data = file_final_mass.readlines()
metallicity2 = data[1]
mass_2 = data[3]
Mfinal_ = data[5]
file_final_mass.close()
Mfinal_table = [float(x) for x in Mfinal_.split()]
else:
file_final_mass = open(
"yield_tables/rearranged___/setllar_final_mass_from_portinari98/portinari98_Z={}.txt".format(
Z_select_in_table[1]),
'r')
data = file_final_mass.readlines()
metallicity2 = data[1]
mass_2 = data[3]
Mfinal_low = data[5]
file_final_mass.close()
Mfinal_table_low = [float(x) for x in Mfinal_low.split()]
file_final_mass = open(
"yield_tables/rearranged___/setllar_final_mass_from_portinari98/portinari98_Z={}.txt".format(
Z_select_in_table[3]),
'r')
data = file_final_mass.readlines()
Mfinal_high = data[5]
file_final_mass.close()
Mfinal_table_high = [float(x) for x in Mfinal_high.split()]
x1 = Z_select_in_table[1]
x2 = Z_select_in_table[2]
x3 = Z_select_in_table[3]
Mfinal_table = [y1+(y3-y1)*(x2-x1)/(x3-x1) for y1, y3 in zip(Mfinal_table_low, Mfinal_table_high)]
return (mass_2, Mfinal_table)
def lindexsplit(List, *lindex):
index = list(lindex)
index.sort()
templist1 = []
templist2 = []
templist3 = []
breakcounter = 0
itemcounter = 0
finalcounter = 0
numberofbreaks = len(index)
totalitems = len(List)
lastindexval = index[(len(index) - 1)]
finalcounttrigger = (totalitems - (lastindexval + 1))
for item in List:
itemcounter += 1
indexofitem = itemcounter - 1
nextbreakindex = index[breakcounter]
# Less than the last cut
if breakcounter <= numberofbreaks:
if indexofitem < nextbreakindex:
templist1.append(item)
elif breakcounter < (numberofbreaks - 1):
templist1.append(item)
templist2.append(templist1)
templist1 = []
breakcounter += 1
else:
if indexofitem <= lastindexval and indexofitem <= totalitems:
templist1.append(item)
templist2.append(templist1)
templist1 = []
else:
if indexofitem >= lastindexval and indexofitem < totalitems + 1:
finalcounter += 1
templist3.append(item)
if finalcounter == finalcounttrigger:
templist2.append(templist3)
return templist2
def function_read_Mmetal(str_yield_table, Z_select_in_table_2, Z_select_in_table_3):
global mm, zz
if str_yield_table == "Kobayashi06" or str_yield_table == "portinari98":
if Z_select_in_table_2[0] == 'out':
file_Metal_eject = open(
'yield_tables/rearranged___/setllar_Metal_eject_mass_from_{}/{}_Z={}.txt'.format(str_yield_table,
str_yield_table,
Z_select_in_table_2[1]),
'r')
data = file_Metal_eject.readlines()
metallicity = data[1]
mass_2 = data[3]
Metal_eject_ = data[5]
file_Metal_eject.close()
mass = [float(x) for x in mass_2.split()]
Metal_eject_table = [float(x) for x in Metal_eject_.split()]
else:
file_Metal_eject = open(
'yield_tables/rearranged___/setllar_Metal_eject_mass_from_{}/{}_Z={}.txt'.format(str_yield_table,
str_yield_table,
Z_select_in_table_2[
1]),
'r')
data = file_Metal_eject.readlines()
metallicity = data[1]
mass_2 = data[3]
Metal_eject_low = data[5]
file_Metal_eject.close()
mass = [float(x) for x in mass_2.split()]
Metal_eject_table_low = [float(x) for x in Metal_eject_low.split()]
file_Metal_eject = open(
'yield_tables/rearranged___/setllar_Metal_eject_mass_from_{}/{}_Z={}.txt'.format(str_yield_table,
str_yield_table,
Z_select_in_table_2[
3]),
'r')
data = file_Metal_eject.readlines()
Metal_eject_high = data[5]
file_Metal_eject.close()
Metal_eject_table_high = [float(x) for x in Metal_eject_high.split()]
x1 = Z_select_in_table_2[1]
x2 = Z_select_in_table_2[2]
x3 = Z_select_in_table_2[3]
Metal_eject_table = [y1 + (y3 - y1) * (x2 - x1) / (x3 - x1) for y1, y3 in
zip(Metal_eject_table_low, Metal_eject_table_high)]
elif str_yield_table == "WW95":
if Z_select_in_table_2[2] < (Z_select_in_table_2[1]+Z_select_in_table_2[3])/2:
Z_select_in_table_2 = Z_select_in_table_2[1]
else:
Z_select_in_table_2 = Z_select_in_table_2[3]
if Z_select_in_table_3[2] < (Z_select_in_table_3[1]+Z_select_in_table_3[3])/2:
Z_select_in_table_3 = Z_select_in_table_3[1]
else:
Z_select_in_table_3 = Z_select_in_table_3[3]
file_Metal_eject = open(
'yield_tables/rearranged___/setllar_Metal_eject_mass_from_{}/{}_Z={}.txt'.format(str_yield_table,
str_yield_table,
Z_select_in_table_2),
'r')
data = file_Metal_eject.readlines()
mass_2 = data[3]
Metal_eject_ = data[5]
file_Metal_eject.close()
mass = [float(x) for x in mass_2.split()]
mass = lindexsplit(mass, 153)[1]
Metal_eject_table = [float(x) for x in Metal_eject_.split()]
Metal_eject_table = lindexsplit(Metal_eject_table, 153)[1]
file_Metal_eject = open(
'yield_tables/rearranged___/setllar_Metal_eject_mass_from_marigo01/marigo01_Z={}.txt'.format(
Z_select_in_table_3),
'r')
data = file_Metal_eject.readlines()
mass_2 = data[3]
Metal_eject_ = data[5]
file_Metal_eject.close()
mass_agb = [float(x) for x in mass_2.split()]
mass_agb = lindexsplit(mass_agb, 153)[0]
Metal_eject_table_agb = [float(x) for x in Metal_eject_.split()]
Metal_eject_table_agb = lindexsplit(Metal_eject_table_agb, 153)[0]
mass = mass_agb + mass
Metal_eject_table = Metal_eject_table_agb + Metal_eject_table
else:
print('Input str_yield_table does not exist.')
return (mass_2, mass, Metal_eject_table)
def function_read_M_element(element, str_yield_table, Z_select_in_table_2, Z_select_in_table_3):
if str_yield_table == "portinari98" or str_yield_table == "Kobayashi06":
if element == "H" or element == "He" or element == "C" or element == "N" or element == "O" or element == "Mg"\
or element == "Ne" or element == "Si" or element == "S" or element == "Ca" or element == "Fe":
file_M_eject = open(
'yield_tables/rearranged___/setllar_{}_eject_mass_from_{}/{}_Z={}.txt'.format(element, str_yield_table, str_yield_table,
Z_select_in_table_2[1]),
'r')
data = file_M_eject.readlines()
M_eject_low = data[5]
file_M_eject.close()
file_M_eject = open(
'yield_tables/rearranged___/setllar_{}_eject_mass_from_{}/{}_Z={}.txt'.format(element, str_yield_table, str_yield_table,
Z_select_in_table_2[3]),
'r')
data = file_M_eject.readlines()
M_eject_high = data[5]
file_M_eject.close()
else:
print("Error: element parameter for function_read_M_element do not exsit.")
M_eject_table_low = [float(x) for x in M_eject_low.split()]
M_eject_table_high = [float(x) for x in M_eject_high.split()]
x1 = Z_select_in_table_2[1]
x2 = Z_select_in_table_2[2]
x3 = Z_select_in_table_2[3]
if x3 == x1:
M_eject_table = M_eject_table_high
else:
M_eject_table = [y1 + (y3 - y1) * (x2 - x1) / (x3 - x1) for y1, y3 in
zip(M_eject_table_low, M_eject_table_high)]
elif str_yield_table == "WW95":
if Z_select_in_table_2[2] < (Z_select_in_table_2[1]+Z_select_in_table_2[3])/2:
Z_select_in_table_2 = Z_select_in_table_2[1]
else:
Z_select_in_table_2 = Z_select_in_table_2[3]
if Z_select_in_table_3[2] < (Z_select_in_table_3[1]+Z_select_in_table_3[3])/2:
Z_select_in_table_3 = Z_select_in_table_3[1]
else:
Z_select_in_table_3 = Z_select_in_table_3[3]
if element == "H":
file_M_eject = open(
'yield_tables/rearranged___/setllar_H_eject_mass_from_WW95/WW95_Z={}.txt'.format(Z_select_in_table_2),
'r')
elif element == "He":
file_M_eject = open(
'yield_tables/rearranged___/setllar_He_eject_mass_from_WW95/WW95_Z={}.txt'.format(Z_select_in_table_2),
'r')
elif element == "C":
file_M_eject = open(
'yield_tables/rearranged___/setllar_C_eject_mass_from_WW95/WW95_Z={}.txt'.format(Z_select_in_table_2),
'r')
elif element == "N":
file_M_eject = open(
'yield_tables/rearranged___/setllar_N_eject_mass_from_WW95/WW95_Z={}.txt'.format(Z_select_in_table_2),
'r')
elif element == "O":
file_M_eject = open(
'yield_tables/rearranged___/setllar_O_eject_mass_from_WW95/WW95_Z={}.txt'.format(Z_select_in_table_2),
'r')
elif element == "Mg":
file_M_eject = open(
'yield_tables/rearranged___/setllar_Mg_eject_mass_from_WW95/WW95_Z={}.txt'.format(Z_select_in_table_2),
'r')
elif element == "Ne":
file_M_eject = open(
'yield_tables/rearranged___/setllar_Ne_eject_mass_from_WW95/WW95_Z={}.txt'.format(Z_select_in_table_2),
'r')
elif element == "Si":
file_M_eject = open(
'yield_tables/rearranged___/setllar_Si_eject_mass_from_WW95/WW95_Z={}.txt'.format(Z_select_in_table_2),
'r')
elif element == "S":
file_M_eject = open(
'yield_tables/rearranged___/setllar_S_eject_mass_from_WW95/WW95_Z={}.txt'.format(Z_select_in_table_2),
'r')
elif element == "Ca":
file_M_eject = open(
'yield_tables/rearranged___/setllar_Ca_eject_mass_from_WW95/WW95_Z={}.txt'.format(Z_select_in_table_2),
'r')
elif element == "Fe":
file_M_eject = open(
'yield_tables/rearranged___/setllar_Fe_eject_mass_from_WW95/WW95_Z={}.txt'.format(Z_select_in_table_2),
'r')
else:
file_M_eject = 0
print("Error: element parameter for function_read_M_element do not exsit.")
data = file_M_eject.readlines()
M_eject_ = data[5]
file_M_eject.close()
M_eject_table = [float(x) for x in M_eject_.split()]
M_eject_table = lindexsplit(M_eject_table, 153)[1]
if element == "H":
file_M_eject = open(
'yield_tables/rearranged___/setllar_H_eject_mass_from_marigo01/marigo01_Z={}.txt'.format(
Z_select_in_table_3),
'r')
elif element == "He":
file_M_eject = open(
'yield_tables/rearranged___/setllar_He_eject_mass_from_marigo01/marigo01_Z={}.txt'.format(
Z_select_in_table_3),
'r')
elif element == "C":
file_M_eject = open(
'yield_tables/rearranged___/setllar_C_eject_mass_from_marigo01/marigo01_Z={}.txt'.format(
Z_select_in_table_3),
'r')
elif element == "N":
file_M_eject = open(
'yield_tables/rearranged___/setllar_N_eject_mass_from_marigo01/marigo01_Z={}.txt'.format(
Z_select_in_table_3),
'r')
elif element == "O":
file_M_eject = open(
'yield_tables/rearranged___/setllar_O_eject_mass_from_marigo01/marigo01_Z={}.txt'.format(
Z_select_in_table_3),
'r')
elif element == "Mg":
file_M_eject = open(
'yield_tables/rearranged___/setllar_Mg_eject_mass_from_marigo01/marigo01_Z={}.txt'.format(
Z_select_in_table_3),
'r')
elif element == "Ne":
file_M_eject = open(
'yield_tables/rearranged___/setllar_Ne_eject_mass_from_marigo01/marigo01_Z={}.txt'.format(
Z_select_in_table_3),
'r')
elif element == "Si":
file_M_eject = open(
'yield_tables/rearranged___/setllar_Si_eject_mass_from_marigo01/marigo01_Z={}.txt'.format(
Z_select_in_table_3),
'r')
elif element == "S":
file_M_eject = open(
'yield_tables/rearranged___/setllar_S_eject_mass_from_marigo01/marigo01_Z={}.txt'.format(
Z_select_in_table_3),
'r')
elif element == "Ca":
file_M_eject = open(
'yield_tables/rearranged___/setllar_Ca_eject_mass_from_marigo01/marigo01_Z={}.txt'.format(
Z_select_in_table_3),
'r')
elif element == "Fe":
file_M_eject = open(
'yield_tables/rearranged___/setllar_Fe_eject_mass_from_marigo01/marigo01_Z={}.txt'.format(
Z_select_in_table_3),
'r')
else:
file_M_eject = 0
print("Error: element parameter for function_read_M_element do not exsit.")
data = file_M_eject.readlines()
M_eject_ = data[5]
file_M_eject.close()
M_eject_table_agb = [float(x) for x in M_eject_.split()]
M_eject_table_agb = lindexsplit(M_eject_table_agb, 153)[0]
M_eject_table = M_eject_table_agb + M_eject_table
return M_eject_table
def get_BH_mass(mass_boundary, mass_grid_table_number, Mtarget_table_number, mass_calibration_factor,
steller_mass_upper_bound):
if mass_boundary < steller_mass_upper_bound:
BH_mass = function_get_target_mass_in_range(max(mass_boundary, 40), steller_mass_upper_bound,
mass_grid_table_number,
Mtarget_table_number, mass_calibration_factor)
else:
BH_mass = 0
return BH_mass
def get_NS_mass(mass_boundary, mass_grid_table_number, Mtarget_table_number, mass_calibration_factor):
if mass_boundary < 40:
NS_mass = function_get_target_mass_in_range(max(mass_boundary, 8), 40, mass_grid_table_number,
Mtarget_table_number, mass_calibration_factor)
else:
NS_mass = 0
return NS_mass
def get_WD_mass(mass_boundary, mass_grid_table_number, Mtarget_table_number, mass_calibration_factor):
if mass_boundary < 8:
WD_mass = function_get_target_mass_in_range(max(mass_boundary, 0.08), 8, mass_grid_table_number,
Mtarget_table_number, mass_calibration_factor)
else:
WD_mass = 0
return WD_mass
def function_get_target_mass_in_range(lower_mass_limit, upper_mass_limit, mass_grid_table_number, Mtarget_table_number,
mass_calibration_factor):
integrate_in_range = quad(integrator_for_function_get_target_mass_in_range, lower_mass_limit, upper_mass_limit,
(mass_grid_table_number, Mtarget_table_number), limit=40)[
0] ####################################
target_mass_in_range = mass_calibration_factor * integrate_in_range
return target_mass_in_range
def integrator_for_function_get_target_mass_in_range(initial_mass, mass_grid_table_number, Mtarget_table_number):
global igimf_mass_function
mass = igimf_mass_function(initial_mass)
mass_fraction = function_get_target_mass(initial_mass, mass_grid_table_number, Mtarget_table_number) / initial_mass
integrator = mass * mass_fraction
return integrator
def function_get_target_mass(initial_mass, mass_grid_table_number, Mtarget_table_number):
global mass_grid_table, mass_grid_table2, Mfinal_table, Mmetal_table, M_element_table
if Mtarget_table_number == 1:
Mtarget_table = Mfinal_table
if Mtarget_table_number == 2:
Mtarget_table = Mmetal_table
if Mtarget_table_number == "H":
Mtarget_table = M_element_table[0]
if Mtarget_table_number == "He":
Mtarget_table = M_element_table[1]
if Mtarget_table_number == "C":
Mtarget_table = M_element_table[2]
if Mtarget_table_number == "N":
Mtarget_table = M_element_table[3]
if Mtarget_table_number == "O":
Mtarget_table = M_element_table[4]
if Mtarget_table_number == "Mg":
Mtarget_table = M_element_table[5]
if Mtarget_table_number == "Ne":
Mtarget_table = M_element_table[6]
if Mtarget_table_number == "Si":
Mtarget_table = M_element_table[7]
if Mtarget_table_number == "S":
Mtarget_table = M_element_table[8]
if Mtarget_table_number == "Ca":
Mtarget_table = M_element_table[9]
if Mtarget_table_number == "Fe":
Mtarget_table = M_element_table[10]
if mass_grid_table_number == 1:
mass_grid_table_n = mass_grid_table
if mass_grid_table_number == 2:
mass_grid_table_n = mass_grid_table2
if initial_mass < mass_grid_table_n[0] or initial_mass > mass_grid_table_n[-1]:
print('Warning: function_get_remnant_mass initial_mass out of range')
print("initial_mass=", initial_mass, "< mass grid lower boundary =", mass_grid_table_n[0])
length_list_mass = len(mass_grid_table_n)
x = round(length_list_mass / 2)
i = 0
low = 0
high = length_list_mass
if initial_mass == mass_grid_table_n[0]:
x = 0
elif initial_mass == mass_grid_table_n[-1]:
x = -1
else:
while i < math.ceil(math.log(length_list_mass, 2)):
if initial_mass == mass_grid_table_n[x]:
break
elif initial_mass > mass_grid_table_n[x]:
low = x
x = x + round((high - x) / 2)
else:
high = x
x = x - round((x - low) / 2)
(i) = (i + 1)
if mass_grid_table_n[x - 1] < initial_mass < mass_grid_table_n[x]:
x = x - 1
target_mass = (Mtarget_table[x] + (Mtarget_table[x + 1] - Mtarget_table[x]) * (initial_mass - mass_grid_table_n[x]) /
(mass_grid_table_n[x + 1] - mass_grid_table_n[x]))
return target_mass
# ### Define initial stellar mass boundary for WD, NS, and BH.
# mass_boundary_WD_to_NS = 8 # [solar mass]
# mass_boundary_NS_to_BH = 40 # [solar mass]
#
# # Define the observational sensitive mass range for galaxy total mass estimation
# mass_boundary_observe = [mass_boundary_observe_low, mass_boundary_observe_up]
# ### Calculate total mass at each time ###
# M_tot = 0
# M_tot_time_list = []
# new_time = 1
# M_tot_list = []
# for SFH in SFH_input:
# formed_mass = SFH * 10 ** 7
# M_tot += formed_mass
# M_tot_time_list += [new_time]
# if M_tot == 0:
# M_tot_list += [1, 1]
# else:
# M_tot_list += [M_tot, M_tot]
# new_time += 10 ** 7
# M_tot_time_list += [new_time]
#
# Log_M_tot = math.log(M_tot, 10)
# M_tot_time_list += [time_axis[-1]]
# M_tot_list += [M_tot_list[-1]]
#
#
# ### Calculate the observational estimated total mass of the galaxy ###
# # Assuming the estimation done everything right, e.g., stellar evolution module, SFH, dust extinction, metallicity,
# # excepet assumed an universal Kroupa IMF that is not what really happend
# # (although this assumption contradict itself because it is impossible to get everything else right with a wrong IMF).
# # We using the stellar population with mass in 0.08 - 3 solar mass to estimate the total stellar mass with Kroupa IMF
# # and compare it with the real total mass
#
# imf_file_name = "{}_IMF".format(IMF_name)
#
# # estimated total mass with Kroupa IMF =
# M_tot_est_list = []
# IMF = __import__(imf_file_name)
# a = quad(IMF.imf, 0.08, steller_mass_upper_bound, limit=50)[0]
# b = quad(IMF.imf, mass_boundary_observe[0], mass_boundary_observe[1], limit=40)[0]
# for mass_in_range in M_in_range_list:
# est_mass = mass_in_range * a / b
# if est_mass == 0:
# M_tot_est_list += [1]
# else:
# M_tot_est_list += [est_mass]
def function_get_igimf_for_this_epoch(SFR_input, Z_over_X, this_time, this_epoch, check_igimf):
# this function calculate igimf, write them in directory Generated_IGIMFs, and import the file
# with igimf = function_get_igimf_for_every_epoch(SFH_input, Z, Z_solar),
# the igimf can be called by: igimf.custom_imf(stellar_mass, this_time).
function_generate_igimf_file(SFR=SFR_input, Z_over_X=Z_over_X, printout=None, sf_epoch=this_epoch,
check_igimf=check_igimf)
if SFR_input == 0:
igimf_file_name = "igimf_SFR_Zero"
else:
igimf_file_name = "igimf_SFR_{}_Fe_over_H_{}".format(round(math.log(SFR_input, 10) * 100000),
round(Z_over_X * 100000))
igimf = __import__(igimf_file_name)
# import os
# if os.path.isfile('Generated_IGIMFs/' + igimf_file_name + '.py'):
# igimf = __import__(igimf_file_name)
# else:
# cwd = os.getcwd()
# igimf = __import__(cwd + '/galIMF/Generated_IGIMFs/' + igimf_file_name)
# if shows ModuleNotFoundError:
# No module named 'igimf_SFR_..._Fe_over_H_...',
# then try clear all (except for the first and last) lines in the file Generated_IGIMFs/all_igimf_list.txt.
# This will force the program to generate new IGIMF functions for future use,
# instead of looking for the IGIMF in the old generated ones.
return igimf
def function_generate_igimf_file(SFR=None, Z_over_X=None, printout=False, sf_epoch=0, check_igimf=False):
# This funtion check if the parameter for generating a new IGIMF match an old one,
# if not, the function generate a new IGIMF and add it to the generated-IGIMF list.
# --------------------------------------------------------------------------------------------------------------------------------
# import modules and libraries
# --------------------------------------------------------------------------------------------------------------------------------
import galimf # galIMF containing IGIMF function and OSGIMF function and additional computational modules
import numpy as np
import math
import time
import os
Generated_IGIMFs_path = 'Generated_IGIMFs'
if os.path.isdir(Generated_IGIMFs_path) == False:
Generated_IGIMFs_path = '/galIMF/Generated_IGIMFs'
if os.path.isdir(Generated_IGIMFs_path) == False:
cwd = os.getcwd()
Generated_IGIMFs_path = cwd + '/galIMF/Generated_IGIMFs'
file_name = '/igimf_SFR_{}_Fe_over_H_{}.py'.format(round(math.log(SFR, 10) * 100000),
round(Z_over_X * 100000))
file_path_and_name = Generated_IGIMFs_path + file_name
# --------------------------------------------------------------------------------------------------------------------------------
# check if the required IGIMF has already been generated
# --------------------------------------------------------------------------------------------------------------------------------
exist = 0
if check_igimf == True:
if os.path.isfile(file_path_and_name):
igimf_file_name = "igimf_SFR_{}_Fe_over_H_{}".format(round(math.log(SFR, 10) * 100000),
round(Z_over_X * 100000))
igimf_____ = __import__(igimf_file_name)
if hasattr(igimf_____, "custom_imf"):
# print("find IGIMF file '{}' for a galaxy with [Z/X]={}, SFR={}".format(file_path_and_name, round(Z_over_X, 2), SFR))
exist = 1
# else:
# print("{} is not a file".format(file_path_and_name))
# check_file = open(Generated_IGIMFs_path + '/all_igimf_list.txt', 'r')
# igimf_list_line = check_file.readlines()
# check_file.close()
# i = 0
# while i < len(igimf_list_line):
# data = [float(a_block) for a_block in igimf_list_line[i].split()]
# if SFR == data[0] and Z_over_X == data[1]:
# exist = 1
# break
# (i) = (i + 1)
if exist == 0 and SFR != 0:
# print("Generating new IGIMF file '{}' for a galaxy with [Z/X]={}, SFR={}".format(file_path_and_name, Z_over_X, SFR))
# # --------------------------------------------------------------------------------------------------------------------------------
# # add new headline into the list file -- all_igimf_list.txt:
# # --------------------------------------------------------------------------------------------------------------------------------
#
# check_file = open('Generated_IGIMFs/all_igimf_list.txt', 'r')
# igimf_list = check_file.read()
# check_file.close()
#
# check_file = open('Generated_IGIMFs/all_igimf_list.txt', 'w')
# new_headline = igimf_list + '{} {}\n'.format(SFR, Z_over_X)
# check_file.write(new_headline)
# check_file.close()
# --------------------------------------------------------------------------------------------------------------------------------
# Define code parameters necesarry for the computations:
# --------------------------------------------------------------------------------------------------------------------------------
# the most crutial ones, which you most likely might want to change
if SFR is None:
SFR = float(
input(
"Please input the galaxy-wide SFR in solar mass per year and ended the input with the return key. "
"(A typical input SFR is from 0.0001 to 10000. "
"We recommed a value smallar than 0.01 for the first run as high SFR calculations take more time.)\n"
"You can input 1e-4 as 0.0001\n"
"\nSFR [Msolar/yr] = "))
# Star Formation Rate [solar mass / yr]
if SFR != 0:
bindw = galimf.resolution_histogram_relative = 10 ** (max((0 - math.log(SFR, 10)), 0) ** (0.2) - 1.9)
# will change the resolution of histogram for optimall sampling automatically addjusted with SFR value.
gwIMF_model = "IGIMF_Z"
if gwIMF_model == "IGIMF3":
alpha3_model = 2 # 1 # IMF high-mass-end power-index model, see Function_alpha_3_change in file 'galimf.py'
alpha_2 = 2.3 # IMF middle-mass power-index
alpha_1 = 1.3 # IMF low-mass-end power-index
alpha2_model = 1 # 1 # see file 'galimf.py'
alpha1_model = 1 # 0 # see file 'galimf.py'
beta_model = 1
R14orNOT = False
elif gwIMF_model == "IGIMF_Z":
alpha3_model = 2 # 1 # IMF high-mass-end power-index model, see Function_alpha_3_change in file 'galimf.py'
alpha_2 = 2.3 # IMF middle-mass power-index
alpha_1 = 1.3 # IMF low-mass-end power-index
alpha2_model = 'Z' # 1 # see file 'galimf.py'
alpha1_model = 'Z' # 0 # see file 'galimf.py'
beta_model = 1
R14orNOT = False
elif gwIMF_model == "IGIMF2d5":
alpha3_model = 2 # 1 # IMF high-mass-end power-index model, see Function_alpha_3_change in file 'galimf.py'
alpha_2 = 2.3 # IMF middle-mass power-index
alpha_1 = 1.3 # IMF low-mass-end power-index
alpha2_model = 'IGIMF2.5' # 1 # see file 'galimf.py'
alpha1_model = 'IGIMF2.5' # 0 # see file 'galimf.py'
beta_model = 1
R14orNOT = False
elif gwIMF_model == "IGIMF2":
alpha3_model = 2 # 1 # IMF high-mass-end power-index model, see Function_alpha_3_change in file 'galimf.py'
alpha_2 = 2.3 # IMF middle-mass power-index
alpha_1 = 1.3 # IMF low-mass-end power-index
alpha2_model = 0 # 1 # see file 'galimf.py'
alpha1_model = 0 # 0 # see file 'galimf.py'
beta_model = 1
R14orNOT = False
elif gwIMF_model == "IGIMF_R14":
alpha3_model = 'R14' # 'R14' # 2 # 1 # IMF high-mass-end power-index model, see Function_alpha_3_change in file 'galimf.py'
alpha_2 = 2.3 # IMF middle-mass power-index
alpha_1 = 1.3 # IMF low-mass-end power-index
alpha2_model = 'R14' # 'R14' # 1 # see file 'galimf.py'
alpha1_model = 0 # 0 # see file 'galimf.py'
beta_model = 0
R14orNOT = True
# ----------------------------------------------------------------
# Parameters below are internal parameters of the theory.
# Read Yan et al. 2017 carefully before change them!
delta_t = 10. # star formation epoch [Myr]
I_ecl = 1. # normalization factor in the Optimal Sampling condition equation
M_ecl_U = 10 ** 9 # 10**(0.75 * math.log(SFR, 10) + 4.8269) # Recchi 2009
# 10 ** 15 # embedded cluster mass upper limit [solar mass]
M_ecl_L = 5. # embedded cluster mass lower limit [solar mass]
I_str = 1. # normalization factor in the Optimal Sampling condition equation
M_str_L = 0.08 # star mass lower limit [solar mass]
M_turn = 0.5 # IMF power-index breaking mass [solar mass]
M_turn2 = 1. # IMF power-index breaking mass [solar mass]
M_str_U = 150 # star mass upper limit [solar mass]
if printout == True:
print("\n - GalIMF run in progress..")
start_time = time.time()
# --------------------------------------------------------------------------------------------------------------------------------
# Construct IGIMF:
# --------------------------------------------------------------------------------------------------------------------------------
if printout == True:
print("\nCalculating IGIMF......")
galimf.function_galimf(
"I", # IorS ### "I" for IGIMF; "OS" for OSGIMF
R14orNOT, # True or False
SFR, # Star Formation Rate [solar mass / yr]
alpha3_model, # IMF high-mass-end power-index model, see file 'alpha3.py'
delta_t, # star formation epoch [Myr]
Z_over_X, # M_over_H
I_ecl, # normalization factor in the Optimal Sampling condition equation
M_ecl_U, # embedded cluster mass upper limit [solar mass]
M_ecl_L, # embedded cluster mass lower limit [solar mass]
beta_model, ### ECMF power-index model, see file 'beta.py'
I_str, # normalization factor in the Optimal Sampling condition equation
M_str_L, # star mass lower limit [solar mass]
alpha_1, # IMF low-mass-end power-index
alpha1_model, # see file 'alpha1.py'
M_turn, # IMF power-index change point [solar mass]
alpha_2, # IMF middle-mass power-index
alpha2_model, # see file 'alpha2.py'
M_turn2, # IMF power-index change point [solar mass]
M_str_U, # star mass upper limit [solar mass]
printout
)
if printout == True:
### Decorate the output file ###
igimf_raw = np.loadtxt('GalIMF_IGIMF.txt')
if M_str_U - igimf_raw[-1][0] > 0.01:
file = open('GalIMF_IGIMF.txt', 'a')
file.write("{} 0\n\n".format(igimf_raw[-1][0] + 0.01))
file.write("{} 0".format(M_str_U))
file.close()
else:
file = open('GalIMF_IGIMF.txt', 'a')
file.write("{} 0".format(M_str_U))
file.close()
global masses, igimf
masses = np.array(galimf.List_M_str_for_xi_str)
igimf = np.array(galimf.List_xi)
#######################################################
# generated igimf is normalized by default to a total mass formed in 10 Myr given the SFR,
# i.e., total stellar mass.
# to change the normalization, uncomment the below commented part:
#######################################################
# Norm = simps(igimf*masses,masses) #- normalization to a total mass
# Norm = simps(igimf,masses) #- normalization to number of stars
# Mtot1Myr = SFR*10*1.e6 #total mass formed in 10 Myr
# igimf = np.array(igimf)*Mtot1Myr/Norm
#######################################################
global length_of_igimf
length_of_igimf = len(igimf)
def write_imf_input2():
global file, masses, igimf
if SFR == 0:
file = open('Generated_IGIMFs/igimf_SFR_Zero.py', 'w')
file.write("def custom_imf(mass, time): # there is no time dependence for IGIMF\n")
file.write(" return 0\n")
file.close()
else:
file = open('Generated_IGIMFs/igimf_SFR_{}_Fe_over_H_{}.py'.format(round(math.log(SFR, 10) * 100000),
round(Z_over_X * 100000)), 'w')
file.write("# File to define a custom IMF\n"
"# The return value represents the chosen IMF value for the input mass\n\n\n")
file.write("def custom_imf(mass, time): # there is no time dependence for IGIMF\n")
file.write(" if mass < 0.08:\n")
file.write(" return 0\n")
file.write(" elif mass < %s:\n" % masses[1])
if masses[0] - masses[1] == 0:
k = 0
b = 0
else:
k = (igimf[0] - igimf[1]) / (masses[0] - masses[1])
b = igimf[0] - k * masses[0]
file.write(" return {} * mass + {}\n".format(k, b))
write_imf_input_middle2(1)
file.write(" else:\n")
file.write(" return 0\n")
file.close()
return
def write_imf_input_middle2(i):
global file, length_of_igimf
while i < length_of_igimf - 1:
file.write(" elif mass < %s:\n" % masses[i + 1])
if masses[i] - masses[i + 1] == 0:
k = 0
b = 0
else:
k = (igimf[i] - igimf[i + 1]) / (masses[i] - masses[i + 1])
b = igimf[i] - k * masses[i]
file.write(" return {} * mass + {}\n".format(k, b))
(i) = (i + 3)
return
write_imf_input2()
def write_imf_input3():
global file, masses, igimf
if SFR == 0:
file = open('Generated_IGIMFs/igimf_epoch_{}.py'.format(sf_epoch), 'w')
file.write("def custom_imf(mass, time): # there is no time dependence for IGIMF\n")
file.write(" return 0\n")
file.close()
else:
file = open('Generated_IGIMFs/igimf_epoch_{}.py'.format(sf_epoch), 'w')
file.write("# File to define a custom IMF\n"
"# The return value represents the chosen IMF value for the input mass\n\n\n")
file.write("def custom_imf(mass, time): # there is no time dependence for IGIMF\n")
file.write(" if mass < 0.08:\n")
file.write(" return 0\n")
file.write(" elif mass < %s:\n" % masses[1])
if masses[0] - masses[1] == 0:
k = 0
b = 0
else:
k = (igimf[0] - igimf[1]) / (masses[0] - masses[1])
b = igimf[0] - k * masses[0]
file.write(" return {} * mass + {}\n".format(k, b))
write_imf_input_middle2(1)
file.write(" else:\n")
file.write(" return 0\n")
file.close()
return
write_imf_input3()
if printout == True:
print("imf_input.py rewritten for SFR = {} and metallicity = {}\n".format(SFR, Z_over_X))
file = open('../gimf_Fe_over_H.txt', 'w')
file.write("{}".format(Z_over_X))
file.close()
file = open('../gimf_SFR.txt', 'w')
file.write("{}".format(SFR))
file.close()
print(" - GalIMF run completed - Run time: %ss -\n\n" % round((time.time() - start_time), 2))
return
return
def function_element_abundunce(solar_abu_table, element_1_name, element_2_name, metal_1_mass, metal_2_mass, instant_ejection):
# this function calculate the atom number ratio compare to solar value [metal/H]
# The following warning might be due to too large a timestep.
# Try applying the "high_time_resolution=True"
# but the simulation will take longer time.
if metal_2_mass == 0:
if metal_1_mass == 0:
metal_1_over_2 = 0
print("Warning: [{}/{}] = 0 because both element mass = 0. See function_element_abundunce in galevo.py".format(element_1_name, element_2_name))
elif metal_1_mass > 0:
metal_1_over_2 = 6
elif metal_1_mass < 0:
if instant_ejection==False:
print("Warning: current {} mass < 0. See galevo.py".format(element_1_name))
metal_1_over_2 = -6
elif metal_2_mass < 0:
if instant_ejection == False:
print("Warning: current {} mass < 0. See galevo.py".format(element_2_name))
if metal_1_mass == 0:
metal_1_over_2 = 6
elif metal_1_mass > 0:
metal_1_over_2 = 6
elif metal_1_mass < 0:
if instant_ejection == False:
print("Warning: current {} mass < 0. See galevo.py".format(element_1_name))
metal_1_over_2 = 0
print("Warning: [{}/{}] = 0 because both element mass < 0. See function_element_abundunce in galevo.py".format(element_1_name, element_2_name))
else:
if metal_1_mass == 0:
metal_1_over_2 = -6
elif metal_1_mass < 0:
if instant_ejection == False:
print("Warning: current {} mass < 0. See galevo.py".format(element_1_name))
metal_1_over_2 = -6
else:
solar_metal_1_logarithmic_abundances = element_abundances_solar.function_solar_element_abundances(
solar_abu_table, element_1_name)
solar_metal_2_logarithmic_abundances = element_abundances_solar.function_solar_element_abundances(
solar_abu_table, element_2_name)
metal_1_element_weight = element_weight_table.function_element_weight(element_1_name)
metal_2_element_weight = element_weight_table.function_element_weight(element_2_name)
metal_1_over_2 = math.log(metal_1_mass / metal_2_mass / metal_1_element_weight * metal_2_element_weight, 10) \
- (solar_metal_1_logarithmic_abundances - solar_metal_2_logarithmic_abundances)
return metal_1_over_2
def function_get_avaliable_Z(str_yield_table):
# extract avalible metallicity in the given grid table
# stellar life-time table and metal production tables have different avalible metal grid.
import os
yield_path = 'yield_tables'
if os.path.isdir(yield_path) == False:
yield_path = '/galIMF/yield_tables'
if os.path.isdir(yield_path) == False:
cwd = os.getcwd()
yield_path = cwd + '/galIMF/yield_tables'
# list 1
file_names_setllar_lifetime_from_str_yield_table = os.listdir(
yield_path + '/rearranged___/setllar_lifetime_from_portinari98')
Z_table_list = []
for name in file_names_setllar_lifetime_from_str_yield_table:
length_file_name = len(name)
i = 0
i_start = 0
i_end = 0
while i < length_file_name:
if name[i] == '=':
i_start = i
if name[i] == '.':
i_end = i
(i) = (i + 1)
i = i_start + 1
Z = ''
while i < i_end:
Z += name[i]
(i) = (i + 1)
Z_table_list += [float(Z)]
sorted_Z_table_list = sorted(Z_table_list)
# list 2
file_names_setllar_lifetime_from_str_yield_table = os.listdir(
yield_path + '/rearranged___/setllar_Metal_eject_mass_from_{}'.format(str_yield_table))
Z_table_list_2 = []
for name in file_names_setllar_lifetime_from_str_yield_table:
length_file_name = len(name)
i = 0
i_start = 0
i_end = 0
while i < length_file_name:
if name[i] == '=':
i_start = i
if name[i] == '.':
i_end = i
(i) = (i + 1)
i = i_start + 1
Z = ''
while i < i_end:
Z += name[i]
(i) = (i + 1)
if Z != '':
Z_table_list_2 += [float(Z)]
sorted_Z_table_list_2 = sorted(Z_table_list_2)
if str_yield_table != "portinari98":
# list 3
file_names_setllar_lifetime_from_str_yield_table = os.listdir(
yield_path + '/rearranged___/setllar_Metal_eject_mass_from_marigo01')
Z_table_list_3 = []
for name in file_names_setllar_lifetime_from_str_yield_table:
length_file_name = len(name)
i = 0
i_start = 0
i_end = 0
while i < length_file_name:
if name[i] == '=':
i_start = i
if name[i] == '.':
i_end = i
(i) = (i + 1)
i = i_start + 1
Z = ''
while i < i_end:
Z += name[i]
(i) = (i + 1)
Z_table_list_3 += [float(Z)]
sorted_Z_table_list_3 = sorted(Z_table_list_3)
else:
sorted_Z_table_list_3 = []
return (sorted_Z_table_list, sorted_Z_table_list_2, sorted_Z_table_list_3)
def function_select_metal(Z, Z_table_list):
# the list for stellar lifetime is
# [0.0004, 0.0008, 0.0012, 0.0016, 0.002, 0.0024, 0.0028, 0.0032, 0.0036, 0.004, 0.008, 0.012]
# the list for stellar metallicity is
# [0.0004, 0.004, 0.008, 0.0127] or [0, 0.004, 0.02] for Kobayashi2006 massive star table
if Z <= Z_table_list[0]:
Z_select__ = Z_table_list[0]
return ('out', Z_select__, Z_select__, Z_select__)
# The 'out' flag means the current gas metallicity is outside the range of provided stellar yield table.
elif Z >= Z_table_list[-1]:
Z_select__ = Z_table_list[-1]
return ('out', Z_select__, Z_select__, Z_select__)
else:
i = 1
while i < len(Z_table_list):
if Z < Z_table_list[i]:
Z_select__low = Z_table_list[i - 1]
Z_select__high = Z_table_list[i]
return ('in', Z_select__low, Z, Z_select__high)
(i) = (i + 1)
def fucntion_mass_boundary_SNIa_Greggio83(time, mass):
# Accroding to Greggio 1983 eq. 5:
logM = math.log(mass, 10)
left = math.log(time, 10)
right = 10 - 4.319*logM + 1.543*(logM)**2
if left > right:
return fucntion_mass_boundary_SNIa_Greggio83(time, mass*0.99)
else:
return mass
def fucntion_mass_boundary(time, mass_grid_for_lifetime, lifetime):
# The adopted spline fit of portinari98 lifetime is not monotonic at the massive end.
# But this function ensures that lifetime is monotonically smaller for more massive stars.
mass = mass_grid_for_lifetime
length_list_lifetime = len(lifetime)
x = round(length_list_lifetime / 2)
loop_number_fucntion_mass_boundary = math.ceil(math.log(length_list_lifetime, 2))
mass_boundary = 10000
if lifetime[x] == time:
mass_boundary = mass[x]
else:
i = 0
low = 0
high = length_list_lifetime
while i < loop_number_fucntion_mass_boundary:
if lifetime[x] > time:
low = x
x = x + round((high - x) / 2)
else:
high = x
x = x - round((x - low) / 2)
(i) = (i + 1)
if x == length_list_lifetime - 1:
mass_boundary = mass[x]
else:
if lifetime[x - 1] > time > lifetime[x]:
x = x - 1
mass_boundary = round(mass[x] + (mass[x + 1] - mass[x]) * (lifetime[x] - time) / (
lifetime[x] - lifetime[x + 1]), 5)
return mass_boundary
# def function_get_observed_mass(lower_limit, upper_limit, M_tot_for_one_epoch, SFR, integrated_igimf):
# integrator = quad(function_get_xi_from_IGIMF, lower_limit, upper_limit, SFR, limit=40)[0]
# observed_mass = M_tot_for_one_epoch * integrator / integrated_igimf
# return observed_mass
# def function_xi_Kroupa_IMF(mass):
# # integrate this function's output xi result in the number of stars in mass limits.
# xi = Kroupa_IMF.custom_imf(mass, 0)
# return xi
# def function_mass_Kroupa_IMF(mass):
# # integrate this function's output m result in the total stellar mass for stars in mass limits.
# m = mass * Kroupa_IMF.custom_imf(mass, 0)
# return m
def text_output(imf, STF, SFR, SFEN, original_gas_mass, log_Z_0):
print('Generating txt output files...')
global time_axis
# print("time:", time_axis)
global all_sf_imf
number_of_sf_epoch = len(all_sf_imf)
# data = exec(open("simulation_results_from_galaxy_evol/imf{}STF{}log_SFR{}SFEN{}Z_0{}/chemical_and_SN_evolution.txt".format(IMF, SFE[0], SFR[0], SFEN[0])).read())
#
# print(data)
#################
### F05 study ###
#################
#
# mass_range_1 = [0.3, 0.4]
# mass_boundary_low = all_sf_imf[0][1]
# mass_boundary_high = all_sf_imf[-1][1]
# print("mass_range_2_boundary_low", all_sf_imf[0][1])
# print("mass_range_2_boundary_high", all_sf_imf[-1][1])
# mass_range_2 = [mass_boundary_low, mass_boundary_high]
# mass_range_1 = [0.3, 0.4]
# mass_range_2 = [0.08, 1]
#
# integrate_Kroupa_stellar_mass_range_1 = quad(function_mass_Kroupa_IMF, mass_range_1[0], mass_range_1[1], limit=40)[0]
# integrate_Kroupa_stellar_mass_range_2 = quad(function_mass_Kroupa_IMF, mass_range_2[0], mass_range_2[1], limit=40)[0]
# integrate_Kroupa_stellar_number_mass_range_1 = \
# quad(function_xi_Kroupa_IMF, mass_range_1[0], mass_range_1[1], limit=40)[0]
# integrate_Kroupa_stellar_number_mass_range_2 = \
# quad(function_xi_Kroupa_IMF, mass_range_2[0], mass_range_2[1], limit=40)[0]
#
# F_mass_Kroupa_IMF = integrate_Kroupa_stellar_mass_range_1 / integrate_Kroupa_stellar_mass_range_2
# F_number_Kroupa_IMF = integrate_Kroupa_stellar_number_mass_range_1 / integrate_Kroupa_stellar_number_mass_range_2
#
# integrate_IGIMF_stellar_mass_range_1 = 0
# integrate_IGIMF_stellar_mass_range_2 = 0
# integrate_IGIMF_stellar_number_mass_range_1 = 0
# integrate_IGIMF_stellar_number_mass_range_2 = 0
# i = 0
# while i < number_of_sf_epoch:
# def function_xi_IGIMF(mass):
# xi = all_sf_imf[i][0].custom_imf(mass, 0)
# return xi
#
# def function_mass_IGIMF(mass):
# m = mass * all_sf_imf[i][0].custom_imf(mass, 0)
# return m
#
# integrate_IGIMF_stellar_mass_range_1 += quad(function_mass_IGIMF, mass_range_1[0], mass_range_1[1], limit=40)[0]
# integrate_IGIMF_stellar_mass_range_2 += quad(function_mass_IGIMF, mass_range_2[0], mass_range_2[1], limit=40)[0]
# integrate_IGIMF_stellar_number_mass_range_1 += \
# quad(function_xi_IGIMF, mass_range_1[0], mass_range_1[1], limit=40)[0]
# integrate_IGIMF_stellar_number_mass_range_2 += \
# quad(function_xi_IGIMF, mass_range_2[0], mass_range_2[1], limit=40)[0]
# (i) = (i + 1)
#
# F_mass_IGIMF = integrate_IGIMF_stellar_mass_range_1 / integrate_IGIMF_stellar_mass_range_2
# F_number_IGIMF = integrate_IGIMF_stellar_number_mass_range_1 / integrate_IGIMF_stellar_number_mass_range_2
#
# print("F_mass_Kroupa_IMF", F_mass_Kroupa_IMF)
# print("F_mass_IGIMF", F_mass_IGIMF)
# print("F_number_Kroupa_IMF", F_number_Kroupa_IMF)
# print("F_number_IGIMF", F_number_IGIMF)
# print("Number of star formation event epoch (10^7 yr): ", number_of_sf_epoch)
# print("modeled star formation duration:", number_of_sf_epoch/100, "Gyr")
global total_energy_release_list
# print("total number of SN: 10^", round(math.log(total_energy_release_list[-1], 10), 1))
global BH_mass_list, NS_mass_list, WD_mass_list, remnant_mass_list, stellar_mass_list, ejected_gas_mass_list
stellar_mass = round(math.log(stellar_mass_list[-1], 10), 4)
# print("Mass of all alive stars at final time: 10 ^", stellar_mass)
downsizing_relation__star_formation_duration = round(10 ** (2.38 - 0.24 * stellar_mass), 4) # Recchi 2009
# print("star formation duration (downsizing relation):", downsizing_relation__star_formation_duration, "Gyr")
stellar_and_remnant_mass = round(math.log(stellar_mass_list[-1] + remnant_mass_list[-1], 10), 4)
# print("Mass of stars and remnants at final time: 10 ^", stellar_and_remnant_mass)
total_mas_in_box = original_gas_mass
# # Dabringhausen 2008 eq.4
# Dabringhausen_2008_a = 2.95
# Dabringhausen_2008_b = 0.596
# expansion_factor = 5 ################ the expansion_factor should be a funtion of galaxy mass and rise with the mass
# log_binding_energy = round(
# math.log(4.3 * 6 / 5, 10) + 40 + (2 - Dabringhausen_2008_b) * math.log(total_mas_in_box, 10) - math.log(
# Dabringhausen_2008_a, 10) + 6 * Dabringhausen_2008_b + math.log(expansion_factor, 10), 1)
# # print("the gravitational binding energy: 10^", log_binding_energy, "erg")
global Fe_over_H_list, stellar_Fe_over_H_list, stellar_Fe_over_H_list_luminosity_weighted
# print("Gas [Fe/H]:", round(Fe_over_H_list[-1], 3))
# print("Stellar [Fe/H]:", round(stellar_Fe_over_H_list[-1], 3))
global Mg_over_Fe_list, stellar_Mg_over_Fe_list, stellar_Mg_over_Fe_list_luminosity_weighted
# print("Gas [Mg/Fe]:", round(Mg_over_Fe_list[-1], 3))
# print("Stellar [Mg/Fe]:", round(stellar_Mg_over_Fe_list[-1], 3))
global O_over_Fe_list, stellar_O_over_Fe_list, stellar_O_over_Fe_list_luminosity_weighted
# print("Gas [O/Fe]:", round(O_over_Fe_list[-1], 3))
# print("Stellar [O/Fe]:", round(stellar_O_over_Fe_list[-1], 3))
global Mg_over_H_list, stellar_Mg_over_H_list, stellar_Mg_over_H_list_luminosity_weighted
global C_over_H_list, stellar_C_over_H_list, stellar_C_over_H_list_luminosity_weighted
global N_over_H_list, stellar_N_over_H_list, stellar_N_over_H_list_luminosity_weighted
global Ca_over_H_list, stellar_Ca_over_H_list, stellar_Ca_over_H_list_luminosity_weighted
global Si_over_H_list, stellar_Si_over_H_list, stellar_Si_over_H_list_luminosity_weighted
global S_over_H_list, stellar_S_over_H_list, stellar_S_over_H_list_luminosity_weighted
global Ne_over_H_list, stellar_Ne_over_H_list, stellar_Ne_over_H_list_luminosity_weighted
# print("Gas [Mg/H]:", round(Mg_over_H_list[-1], 3))
# print("Stellar [Mg/H]:", round(stellar_Mg_over_H_list[-1], 3))
global O_over_H_list, stellar_O_over_H_list, stellar_O_over_H_list_luminosity_weighted
# print("Gas [O/H]:", round(O_over_H_list[-1], 3))
# print("Stellar [O/H]:", round(stellar_O_over_H_list[-1], 3))
global gas_Z_over_X_list, stellar_Z_over_X_list, stellar_Z_over_X_list_luminosity_weighted, stellar_Z_over_H_list
# print("Gas metallicity:", round(gas_Z_over_X_list[-1], 3))
# print("Stellar metallicity:", round(stellar_Z_over_X_list[-1], 3))
# print("Stellar [Z/H]:", round(stellar_Z_over_H_list[-1], 3))
filename = "simulation_results_from_galaxy_evol/imf{}STF{}log_SFR{}SFEN{}Z_0{}/chemical_and_SN_evolution.txt".format(imf, STF, SFR, SFEN, log_Z_0)
if not os.path.exists(os.path.dirname(filename)):
try:
os.makedirs(os.path.dirname(filename))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
file = open(
'simulation_results_from_galaxy_evol/imf{}STF{}log_SFR{}SFEN{}Z_0{}/chemical_and_SN_evolution.txt'.format(imf, STF, SFR, SFEN, log_Z_0), 'w')
print("simulation results saved in the file: "
"simulation_results_from_galaxy_evol/imf{}STF{}log_SFR{}SFEN{}Z_0{}/(plots/)...txt".format(imf, STF, SFR, SFEN,
log_Z_0))
file.write("# Number of star formation event epoch (10^7 yr):\n")
file.write("%s\n" % number_of_sf_epoch)
file.write("# Modeled star formation duration (Gyr):\n")
file.write("{}\n".format(number_of_sf_epoch / 100))
file.write("# Total number of SN (log_10):\n")
file.write("%s\n" % round(math.log(total_energy_release_list[-1], 10), 1))
file.write("# Mass of all alive stars at final time (log_10):\n")
file.write("%s\n" % stellar_mass)
file.write("# Star formation duration of this final stellar mass according to the downsizing relation, Gyr):\n")
file.write("%s\n" % downsizing_relation__star_formation_duration)
file.write("# Mass of stars and remnants at final time (log_10):\n")
file.write("%s\n" % stellar_and_remnant_mass)
file.write("# total mass in the closed-box model:\n")
file.write("%s\n" % total_mas_in_box)
length_of_time_axis = len(time_axis)
file.write("# time step list:\n")
i = 0
while i < length_of_time_axis:
file.write("%s " % time_axis[i])
(i) = (i + 1)
file.write("\n")
file.write("# Number of SNIa + SNII (log_10):\n")
i = 0
while i < length_of_time_axis:
file.write("%s " % total_energy_release_list[i])
(i) = (i + 1)
file.write("\n")
file.write("# Gas [Fe/H]:\n")
i = 0
while i < length_of_time_axis:
file.write("%s " % Fe_over_H_list[i])
(i) = (i + 1)
file.write("\n")
file.write("# Stellar mass-weighted [Fe/H]:\n")
i = 0
while i < length_of_time_axis:
file.write("%s " % stellar_Fe_over_H_list[i])
(i) = (i + 1)
file.write("\n")
file.write("# Gas [Mg/Fe]:\n")
i = 0
while i < length_of_time_axis:
file.write("%s " % Mg_over_Fe_list[i])
(i) = (i + 1)
file.write("\n")
file.write("# Stellar mass-weighted [Mg/Fe]:\n")
i = 0
while i < length_of_time_axis:
file.write("%s " % stellar_Mg_over_Fe_list[i])
(i) = (i + 1)
file.write("\n")
Mass_weighted_stellar_Mg_over_Fe = stellar_Mg_over_Fe_list[-1]
# print("Mass-weighted stellar [Mg/Fe] at final time:", Mass_weighted_stellar_Mg_over_Fe)
file.write("# Gas [O/Fe]:\n")
i = 0
while i < length_of_time_axis:
file.write("%s " % O_over_Fe_list[i])
(i) = (i + 1)
file.write("\n")
file.write("# Stellar mass-weighted [O/Fe]:\n")
i = 0
while i < length_of_time_axis:
file.write("%s " % stellar_O_over_Fe_list[i])
(i) = (i + 1)
file.write("\n")
file.write("# Gas [Mg/H]:\n")
i = 0
while i < length_of_time_axis:
file.write("%s " % Mg_over_H_list[i])
(i) = (i + 1)
file.write("\n")
file.write("# Gas [C/H]:\n")
i = 0
while i < length_of_time_axis:
file.write("%s " % C_over_H_list[i])
(i) = (i + 1)
file.write("\n")
file.write("# Gas [N/H]:\n")
i = 0
while i < length_of_time_axis:
file.write("%s " % N_over_H_list[i])
(i) = (i + 1)
file.write("\n")
file.write("# Gas [Ca/H]:\n")
i = 0
while i < length_of_time_axis:
file.write("%s " % Ca_over_H_list[i])
(i) = (i + 1)
file.write("\n")
file.write("# Gas [Ne/H]:\n")
i = 0
while i < length_of_time_axis:
file.write("%s " % Ne_over_H_list[i])
(i) = (i + 1)
file.write("\n")
file.write("# Gas [Si/H]:\n")
i = 0
while i < length_of_time_axis:
file.write("%s " % Si_over_H_list[i])
(i) = (i + 1)
file.write("\n")
file.write("# Gas [S/H]:\n")
i = 0
while i < length_of_time_axis:
file.write("%s " % S_over_H_list[i])
(i) = (i + 1)
file.write("\n")
file.write("# Stellar mass-weighted [Mg/H]:\n")
i = 0
while i < length_of_time_axis:
file.write("%s " % stellar_Mg_over_H_list[i])
(i) = (i + 1)
file.write("\n")
file.write("# Stellar mass-weighted [C/H]:\n")
i = 0
while i < length_of_time_axis:
file.write("%s " % stellar_C_over_H_list[i])
(i) = (i + 1)
file.write("\n")
file.write("# Stellar mass-weighted [N/H]:\n")
i = 0
while i < length_of_time_axis:
file.write("%s " % stellar_N_over_H_list[i])
(i) = (i + 1)
file.write("\n")
file.write("# Stellar mass-weighted [Ca/H]:\n")
i = 0
while i < length_of_time_axis:
file.write("%s " % stellar_Ca_over_H_list[i])
(i) = (i + 1)
file.write("\n")
file.write("# Stellar mass-weighted [Ne/H]:\n")
i = 0
while i < length_of_time_axis:
file.write("%s " % stellar_Ne_over_H_list[i])
(i) = (i + 1)
file.write("\n")
file.write("# Stellar mass-weighted [Si/H]:\n")
i = 0
while i < length_of_time_axis:
file.write("%s " % stellar_Si_over_H_list[i])
(i) = (i + 1)
file.write("\n")
file.write("# Stellar mass-weighted [S/H]:\n")
i = 0
while i < length_of_time_axis:
file.write("%s " % stellar_S_over_H_list[i])
(i) = (i + 1)
file.write("\n")
file.write("# Gas [O/H]:\n")
i = 0
while i < length_of_time_axis:
file.write("%s " % O_over_H_list[i])
(i) = (i + 1)
file.write("\n")
file.write("# Stellar mass-weighted [O/H]:\n")
i = 0
while i < length_of_time_axis:
file.write("%s " % stellar_O_over_H_list[i])
(i) = (i + 1)
file.write("\n")
file.write("# Gas metallicity, [Z/X]:\n")
i = 0
while i < length_of_time_axis:
file.write("%s " % gas_Z_over_X_list[i])
(i) = (i + 1)
file.write("\n")
file.write("# Stellar mass-weighted metallicity, [Z/X]:\n")
i = 0
while i < length_of_time_axis:
file.write("%s " % stellar_Z_over_X_list[i])
(i) = (i + 1)
file.write("\n")
Mass_weighted_stellar_metallicity = stellar_Z_over_X_list[-1]
# print("Mass-weighted stellar [Z/X] at final time:", Mass_weighted_stellar_metallicity)
if SNIa_energy_release_list[-1] < 10 ** (-10):
SNIa_energy_release_list[-1] = 10 ** (-10)
file.write("# Total number of SNIa (log_10):\n")
file.write("%s\n" % round(math.log(SNIa_energy_release_list[-1], 10), 1))
if SNII_energy_release_list[-1] < 10 ** (-10):
SNII_energy_release_list[-1] = 10 ** (-10)
file.write("# Total number of SNII (log_10):\n")
file.write("%s\n" % round(math.log(SNII_energy_release_list[-1], 10), 1))
file.write("# Number of SNIa (log_10):\n")
i = 0
while i < length_of_time_axis:
file.write("%s " % SNIa_energy_release_list[i])
(i) = (i + 1)
file.write("\n")
file.write("# Number of SNII (log_10):\n")
i = 0
while i < length_of_time_axis:
file.write("%s " % SNII_energy_release_list[i])
(i) = (i + 1)
file.write("\n")
global ejected_gas_Mg_over_Fe_list
file.write("# [Mg/Fe] for total ejected gas till this time:\n")
i = 0
while i < length_of_time_axis:
file.write("%s " % ejected_gas_Mg_over_Fe_list[i])
(i) = (i + 1)
file.write("\n")
global instant_ejected_gas_Mg_over_Fe_list
file.write("# [Mg/Fe] for instant ejected gas at this time:\n")
i = 0
while i < length_of_time_axis:
file.write("%s " % instant_ejected_gas_Mg_over_Fe_list[i])
(i) = (i + 1)
file.write("\n")
global ejected_metal_mass_list
file.write("# total ejected metal mass till this time:\n")
i = 0
while i < length_of_time_axis:
file.write("%s " % ejected_metal_mass_list[i])
(i) = (i + 1)
file.write("\n")
file.write("# initial [M/H]:\n")
file.write("%s " % log_Z_0)
file.write("\n")
file.write("# Stellar [Z/H]. [Z/H] = [Fe/H] + A[Mg/Fe] where A=0.94:\n")
i = 0
while i < length_of_time_axis:
file.write("%s " % stellar_Z_over_H_list[i])
(i) = (i + 1)
file.write("\n")
file.write("# Stellar luminosity-weighted metallicity, [Z/X]:\n")
i = 0
while i < length_of_time_axis:
file.write("%s " % stellar_Z_over_X_list_luminosity_weighted[i])
(i) = (i + 1)
file.write("\n")
file.write("# Stellar luminosity-weighted [Mg/Fe]:\n")
i = 0
while i < length_of_time_axis:
file.write("%s " % stellar_Mg_over_Fe_list_luminosity_weighted[i])
(i) = (i + 1)
file.write("\n")
file.write("# Stellar luminosity-weighted [C/Fe]:\n")
i = 0
while i < length_of_time_axis:
file.write("%s " % stellar_C_over_Fe_list_luminosity_weighted[i])
(i) = (i + 1)
file.write("\n")
file.write("# Stellar luminosity-weighted [N/Fe]:\n")
i = 0
while i < length_of_time_axis:
file.write("%s " % stellar_N_over_O_list_luminosity_weighted[i])
(i) = (i + 1)
file.write("\n")
file.write("# Stellar luminosity-weighted [O/Fe]:\n")
i = 0
while i < length_of_time_axis:
file.write("%s " % stellar_O_over_Fe_list_luminosity_weighted[i])
(i) = (i + 1)
file.write("\n")
file.write("# Stellar luminosity-weighted [Ca/Fe]:\n")
i = 0
while i < length_of_time_axis:
file.write("%s " % stellar_Ca_over_Fe_list_luminosity_weighted[i])
(i) = (i + 1)
file.write("\n")
file.write("# Stellar luminosity-weighted [Ne/Fe]:\n")
i = 0
while i < length_of_time_axis:
file.write("%s " % stellar_Ne_over_Fe_list_luminosity_weighted[i])
(i) = (i + 1)
file.write("\n")
file.write("# Stellar luminosity-weighted [Si/Fe]:\n")
i = 0
while i < length_of_time_axis:
file.write("%s " % stellar_Si_over_Fe_list_luminosity_weighted[i])
(i) = (i + 1)
file.write("\n")
file.write("# Stellar luminosity-weighted [S/Fe]:\n")
i = 0
while i < length_of_time_axis:
file.write("%s " % stellar_S_over_Fe_list_luminosity_weighted[i])
(i) = (i + 1)
file.write("\n")
file.write("# Stellar luminosity-weighted [C/H]:\n")
i = 0
while i < length_of_time_axis:
file.write("%s " % stellar_C_over_H_list_luminosity_weighted[i])
(i) = (i + 1)
file.write("\n")
file.write("# Stellar luminosity-weighted [N/H]:\n")
i = 0
while i < length_of_time_axis:
file.write("%s " % stellar_N_over_H_list_luminosity_weighted[i])
(i) = (i + 1)
file.write("\n")
file.write("# Stellar luminosity-weighted [O/H]:\n")
i = 0
while i < length_of_time_axis:
file.write("%s " % stellar_O_over_H_list_luminosity_weighted[i])
(i) = (i + 1)
file.write("\n")
file.write("# Stellar luminosity-weighted [Mg/H]:\n")
i = 0
while i < length_of_time_axis:
file.write("%s " % stellar_Mg_over_H_list_luminosity_weighted[i])
(i) = (i + 1)
file.write("\n")
file.write("# Stellar luminosity-weighted [Fe/H]:\n")
i = 0
while i < length_of_time_axis:
file.write("%s " % stellar_Fe_over_H_list_luminosity_weighted[i])
(i) = (i + 1)
file.write("\n")
file.write("# Stellar luminosity-weighted [Ca/H]:\n")
i = 0
while i < length_of_time_axis:
file.write("%s " % stellar_Ca_over_H_list_luminosity_weighted[i])
(i) = (i + 1)
file.write("\n")
file.write("# Stellar luminosity-weighted [Ne/H]:\n")
i = 0
while i < length_of_time_axis:
file.write("%s " % stellar_Ne_over_H_list_luminosity_weighted[i])
(i) = (i + 1)
file.write("\n")
file.write("# Stellar luminosity-weighted [Si/H]:\n")
i = 0
while i < length_of_time_axis:
file.write("%s " % stellar_Si_over_H_list_luminosity_weighted[i])
(i) = (i + 1)
file.write("\n")
file.write("# Stellar luminosity-weighted [S/H]:\n")
i = 0
while i < length_of_time_axis:
file.write("%s " % stellar_S_over_H_list_luminosity_weighted[i])
(i) = (i + 1)
file.write("\n")
file.write("# X_list:\n")
i = 0
while i < length_of_time_axis:
file.write("%s " % X_list[i])
(i) = (i + 1)
file.write("\n")
file.write("# stellar_X_list:\n")
i = 0
while i < length_of_time_axis:
file.write("%s " % stellar_X_list[i])
(i) = (i + 1)
file.write("\n")
file.write("# stellar_X_list_luminosity_weighted:\n")
i = 0
while i < length_of_time_axis:
file.write("%s " % stellar_X_list_luminosity_weighted[i])
(i) = (i + 1)
file.write("\n")
file.write("# Y_list:\n")
i = 0
while i < length_of_time_axis:
file.write("%s " % Y_list[i])
(i) = (i + 1)
file.write("\n")
file.write("# stellar_Y_list:\n")
i = 0
while i < length_of_time_axis:
file.write("%s " % stellar_Y_list[i])
(i) = (i + 1)
file.write("\n")
file.write("# stellar_Y_list_luminosity_weighted:\n")
i = 0
while i < length_of_time_axis:
file.write("%s " % stellar_Y_list_luminosity_weighted[i])
(i) = (i + 1)
file.write("\n")
file.write("# Z_list:\n")
i = 0
while i < length_of_time_axis:
file.write("%s " % Z_list[i])
(i) = (i + 1)
file.write("\n")
file.write("# stellar_Z_list:\n")
i = 0
while i < length_of_time_axis:
file.write("%s " % stellar_Z_list[i])
(i) = (i + 1)
file.write("\n")
file.write("# stellar_Z_list_luminosity_weighted:\n")
i = 0
while i < length_of_time_axis:
file.write("%s " % stellar_Z_list_luminosity_weighted[i])
(i) = (i + 1)
file.write("\n")
file.close()
return
def plot_output(plot_show, plot_save, imf, igimf, SFR, SFEN, log_Z_0, STF): # SFR is the maximum SFR.
if plot_show is True:
print('\nGenerating plot outputs...\n')
# plot SFH
global all_sfr
SFR_list = []
age_list = []
age_list.append(0)
SFR_list.append(-22)
age_list.append(0.01)
SFR_list.append(-22)
for i in range(len(all_sfr)):
age_list.append(all_sfr[i][1])
SFR_list.append(math.log(all_sfr[i][0], 10))
age_list.append(all_sfr[i][1] + 0.01)
SFR_list.append(math.log(all_sfr[i][0], 10))
age_list.append(all_sfr[i][1] + 0.01)
SFR_list.append(-22)
age_list.append(10)
SFR_list.append(-22)
if plot_show is True or plot_save is True:
plt.rc('font', family='serif')
plt.rc('xtick', labelsize='x-small')
plt.rc('ytick', labelsize='x-small')
fig = plt.figure(0, figsize=(3, 2.5))
fig.add_subplot(1, 1, 1)
plt.plot(age_list, SFR_list, color="k", lw=0.8)
# with open('SFH_Lacchin.txt') as f:
# lines = f.readlines()
# time_Lacchin = [float(line.split()[0]) for line in lines]
# SFH_Lacchin = [float(line.split()[1]) for line in lines]
# with open('SFH_Lacchin_igimf.txt') as f:
# lines = f.readlines()
# time_Lacchin_igimf = [float(line.split()[0]) for line in lines]
# SFH_Lacchin_igimf = [float(line.split()[1]) for line in lines]
# # plt.plot(time_Lacchin, SFH_Lacchin, color="tab:red", label='Lacchin2019 Salpeter', ls='dashed', lw=0.7)
# plt.plot(time_Lacchin_igimf, SFH_Lacchin_igimf, color="tab:blue", label='Lacchin2019 IGIMF', ls='dashed', lw=0.7)
plt.xlabel('time [Gyr]')
plt.ylabel(r'log$_{10}(SFR [M_\odot$/yr])')
# plt.xlim(6.4, 1.01 * log_time_axis[-1])
# plt.xlim(-0.1, 1.1)
# plt.ylim(-7, -2.5)
# plt.title('Star formation history', fontsize=10)
plt.tight_layout()
# plt.legend(prop={'size': 7})
if plot_save is True:
plt.savefig('galaxy_evolution_fig_SFH.pdf', dpi=250)
filename = "simulation_results_from_galaxy_evol/imf{}STF{}log_SFR{}SFEN{}Z_0{}/plots/SFH.txt".format(imf, STF, SFR, SFEN, log_Z_0)
if not os.path.exists(os.path.dirname(filename)):
try:
os.makedirs(os.path.dirname(filename))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
length_of_SFH_list = len(SFR_list)
file = open('simulation_results_from_galaxy_evol/imf{}STF{}log_SFR{}SFEN{}Z_0{}/plots/SFH.txt'.format(imf, STF, SFR, SFEN, log_Z_0), 'w')
file.write("# age_list\n")
i = 0
while i < length_of_SFH_list:
file.write("{} ".format(age_list[i]))
(i) = (i + 1)
file.write("\n# SFR_list\n")
i = 0
while i < length_of_SFH_list:
file.write("{} ".format(SFR_list[i]))
(i) = (i + 1)
file.write("\n")
file.close()
# # plot IMF
global all_sf_imf
number_of_sf_epoch = len(all_sf_imf)
mass_list = []
xi_last_time = []
xi_Kroupa = []
xi_Salpeter = []
xi_Kroupa_not_log = []
xi_observe = []
xi_each_epoch = []
xi_each_time_log = []
xi_each_time = []
i = 0
while i < number_of_sf_epoch:
xi_each_epoch.append([])
xi_each_time_log.append([])
xi_each_time.append([])
mass = 200
while mass > 0.05:
xi_each_epoch__ = all_sf_imf[i][0].custom_imf(mass, 0)
if xi_each_epoch__ == 0:
xi_each_epoch[i] += [-10]
else:
xi_each_epoch[i] += [math.log(xi_each_epoch__, 10)]
j = 0
xi_each_time__ = 0
while j < i + 1:
xi_each_time__ += all_sf_imf[j][0].custom_imf(mass, 0)
(j) = (j + 1)
if xi_each_time__ == 0:
xi_each_time_log[i] += [-10]
xi_each_time[i] += [0]
else:
xi_each_time_log[i] += [math.log(xi_each_time__, 10)]
xi_each_time[i] += [xi_each_time__]
(mass) = (mass * 0.99)
(i) = (i + 1)
j = 0
xi_1_last_time = 0
while j < number_of_sf_epoch:
xi_1_last_time += all_sf_imf[j][0].custom_imf(1, 0)
(j) = (j + 1)
from IMFs import Salpeter_IMF
normal_Kroupa = xi_1_last_time / Kroupa_IMF.custom_imf(1, 0)
normal_Salpeter = xi_1_last_time / Salpeter_IMF.custom_imf(1, 0)
mass = 200
while mass > 0.05:
mass_list += [mass]
xi_last_time += [all_sf_imf[-1][0].custom_imf(mass, 0)]
# xi_last_time += [igimf.custom_imf(mass, 0)]
xi_observe__ = 0
for i in range(number_of_sf_epoch):
xi_observe__ += all_sf_imf[i][0].custom_imf(mass, 0)
# if mass < all_sf_imf[i][1]:
# xi_observe__ += all_sf_imf[i][0].custom_imf(mass, 0)
xi_observe += [xi_observe__]
xi_Kroupa__ = Kroupa_IMF.custom_imf(mass, 0) * normal_Kroupa
if xi_Kroupa__ == 0:
xi_Kroupa += [-10]
else:
xi_Kroupa += [math.log(xi_Kroupa__, 10)]
xi_Kroupa_not_log += [xi_Kroupa__]
xi_Salpeter__ = Salpeter_IMF.custom_imf(mass, 0) * normal_Salpeter
if xi_Salpeter__ == 0:
xi_Salpeter += [-10]
else:
xi_Salpeter += [math.log(xi_Salpeter__, 10)]
(mass) = (mass * 0.99)
i = 0
while i < number_of_sf_epoch:
time = round(all_sf_imf[i][2] / 10 ** 6)
file = open('Generated_IGIMFs/imf_at_time_{}_Myr.txt'.format(time), 'w')
file.write("# This file gives the total number of stars in a unit mass interval for a given stellar mass "
"for the entire galaxy, i.e., galaxy-wide Initial Mass Function (gwIMF), at {} Myr.\n".format(time))
file.write("# Below showing the given stellar mass on the left and the corresponding xi on the right, "
"where xi = d number / d mass.\n")
mass_list_length = len(mass_list)
j = mass_list_length - 1
while j > 0:
file.write("{} {}\n".format(mass_list[j], xi_each_time[i][j]))
(j) = (j - 1)
file.close()
(i) = (i + 1)
i = 0
while i < number_of_sf_epoch:
time = round(all_sf_imf[i][2] / 10 ** 6)
length_of_xi = len(mass_list)
file = open('simulation_results_from_galaxy_evol/imf{}STF{}log_SFR{}SFEN{}Z_0{}/plots/imf_at_time_{}_Myr.txt'.format(imf, STF, SFR, SFEN, log_Z_0, time), 'w')
file.write("# mass_list\n")
j = 0
while j < length_of_xi:
file.write("{} ".format(mass_list[j]))
(j) = (j + 1)
file.write("\n")
file.write("# xi_each_time\n")
j = 0
while j < length_of_xi:
file.write("{} ".format(xi_each_time[i][j]))
(j) = (j + 1)
file.write("\n")
file.write("# xi_Kroupa\n")
j = 0
while j < length_of_xi:
file.write("{} ".format(xi_Kroupa_not_log[j]))
(j) = (j + 1)
file.write("\n")
file.close()
(i) = (i + 1)
for i in range(len(mass_list)):
mass_list[i] = math.log(mass_list[i], 10)
if xi_last_time[i] == 0:
xi_last_time[i] = -10
else:
xi_last_time[i] = math.log(xi_last_time[i], 10)
if xi_observe[i] == 0:
xi_observe[i] = -10
else:
xi_observe[i] = math.log(xi_observe[i], 10)
# if xi_Kroupa[i] == 0:
# xi_Kroupa[i] = -10
# else:
# xi_Kroupa[i] = math.log(xi_Kroupa[i], 10)
if plot_show is True or plot_save is True:
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(1, figsize=(3, 2.5))
# fig.add_subplot(1, 1, 1)
# # i = 0
# # while i < number_of_sf_epoch:
# # time = round(all_sf_imf[i][2] / 10**6)
# # if i < 3:
# # plt.plot(mass_list, xi_each_time_log[i], label='TIgwIMF at {} Myr'.format(time))
# # else:
# # plt.plot(mass_list, xi_each_time_log[i])
# # (i) = (i + 1)
# # plt.plot(mass_list, xi_observe, label='final TIgwIMF')
# plt.plot(mass_list, xi_observe, color='k', label='IGIMF', lw=0.9)
# plt.plot(mass_list, xi_Salpeter, linestyle='dashed', color='r', label='Salpeter IMF')
# plt.plot(mass_list, xi_Kroupa, linestyle='dotted', color='b', label='Kroupa IMF', lw=1.5)
# plt.xlabel(r'log$_{10}(M_\star$ [$M_\odot$])')
# plt.ylabel(r'log$_{10}(\xi_\star)$')
# plt.ylim(-4, 8)
# # plt.title('Time Integrated galaxy-wide IMF', fontsize=10)
# plt.legend(prop={'size': 7})
# plt.tight_layout()
#
plt.rc('font', family='serif')
plt.rc('xtick', labelsize='x-small')
plt.rc('ytick', labelsize='x-small')
fig = plt.figure(2, figsize=(3, 2.5))
fig.add_subplot(1, 1, 1)
i = 0
while i < number_of_sf_epoch:
plt.plot(mass_list, xi_each_epoch[i], lw=0.3, color='0.5')
(i) = (i + 1)
plt.plot(mass_list, xi_Salpeter, linestyle='dashed', color='tab:blue', label='Salpeter IMF', lw=0.7)
plt.plot(mass_list, xi_observe, label='TIgwIMF', color='k')
plt.plot(mass_list, xi_Kroupa, linestyle='dotted', color='r', label='Canonical IMF', lw=1.5)
plt.xlabel(r'log$_{10}(M_\star)$ [$M_{\odot}$]')
plt.ylabel(r'log$_{10}(\xi_\star)$')
# plt.title('galaxy-wide IMF of each star formation epoch', fontsize=10)
# plt.ylim(-4, 7)
plt.legend(prop={'size': 7})
plt.tight_layout()
if plot_save is True:
plt.savefig('galaxy_evolution_fig_TIgwIMF.pdf', dpi=250)
################# evolution plots #################
global time_axis
length_of_time_axis = len(time_axis)
log_time_axis = []
for i in range(length_of_time_axis):
if time_axis[i] != 0:
log_time_axis += [math.log((time_axis[i]), 10)]
else:
log_time_axis += [0]
global X_list, stellar_X_list, stellar_X_list_luminosity_weighted
global Y_list, stellar_Y_list, stellar_Y_list_luminosity_weighted
global Z_list, stellar_Z_list, stellar_Z_list_luminosity_weighted
global primary_He_mass_fraction
DY_over_Z_list = [(yyy-primary_He_mass_fraction+1e-8)/zzz for zzz, yyy in zip(Z_list, Y_list)]
stellar_DY_over_Z_list = [(yyy-primary_He_mass_fraction+1e-8)/zzz for zzz, yyy in zip(stellar_Z_list, stellar_Y_list)]
stellar_DY_over_Z_list_luminosity_weighted = [(yyy-primary_He_mass_fraction+1e-8)/zzz for zzz, yyy in zip(stellar_Z_list_luminosity_weighted, stellar_Y_list_luminosity_weighted)]
# dY_over_dZ = []
# x_axis_for_dY_over_dZ = []
# length_Z_list = len(Z_list)
# i = 2
# while i < length_Z_list:
# __dY_over_dZ__ = (Z_list[i] - Z_list[i-2])/(Y_list[i] - Y_list[i-2])
# __x_axis_for_dY_over_dZ__ = Z_list[i-1]
# dY_over_dZ.append(__dY_over_dZ__)
# x_axis_for_dY_over_dZ.append(__x_axis_for_dY_over_dZ__)
# (i) = (i+1)
# print(stellar_Y_list)
# print(stellar_Z_list)
# print(stellar_DY_over_Z_list)
# if plot_show is True or plot_save is True:
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(61, figsize=(3, 2.5))
# fig.add_subplot(1, 1, 1)
# plt.stackplot(log_time_axis, Y_list, X_list, Z_list, labels=["Y", "X", "Z"])
# if plot_save is not True:
# plt.title('gas-phase H, He, and metal mass fraction', fontsize=10)
# plt.xlim(7, log_time_axis[-1])
# plt.ylim(0, 1)
# plt.xlabel(r'log$_{10}$(time [yr])')
# plt.ylabel('stacked mass fraction')
# plt.legend(loc='lower left', prop={'size': 7})
# plt.tight_layout()
# if plot_save is True:
# plt.savefig('XYZ_gas_phase.pdf', dpi=250)
# fig = plt.figure(62, figsize=(3, 2.5))
# fig.add_subplot(1, 1, 1)
# plt.stackplot(log_time_axis, stellar_Y_list, stellar_X_list, stellar_Z_list, labels=["Y", "X", "Z"])
# if plot_save is not True:
# plt.title('stellar H, He, and metal mass fraction', fontsize=10)
# plt.xlim(7, log_time_axis[-1])
# plt.ylim(0, 1)
# plt.xlabel(r'log$_{10}$(time [yr])')
# plt.ylabel('stacked mass fraction')
# plt.legend(loc='lower left', prop={'size': 7})
# plt.tight_layout()
# if plot_save is True:
# plt.savefig('XYZ_star_MW.pdf', dpi=250)
# fig = plt.figure(63, figsize=(3, 2.5))
# fig.add_subplot(1, 1, 1)
# plt.stackplot(log_time_axis, stellar_Y_list_luminosity_weighted, stellar_X_list_luminosity_weighted, stellar_Z_list_luminosity_weighted, labels=["Y", "X", "Z"])
# if plot_save is not True:
# plt.title('stellar luminosity-weighted H, He, and metal mass fraction', fontsize=10)
# plt.xlim(7, log_time_axis[-1])
# plt.ylim(0, 1)
# plt.xlabel(r'log$_{10}$(time [yr])')
# plt.ylabel('stacked mass fraction')
# plt.legend(loc='lower left', prop={'size': 7})
# plt.tight_layout()
# if plot_save is True:
# plt.savefig('XYZ_star_LW.pdf', dpi=250)
# global mm, zz
# fig = plt.figure(0, figsize=(3, 2.5))
# plt.plot(mm, zz)
global Fe_over_H_list, stellar_Fe_over_H_list, stellar_Fe_over_H_list_luminosity_weighted
print('plot stellar_Fe_over_H final', stellar_Fe_over_H_list[-1])
# if plot_show is True or plot_save is True:
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(3, figsize=(3, 2.5))
# fig.add_subplot(1, 1, 1)
# plt.plot(log_time_axis, Fe_over_H_list, label='gas')
# plt.plot(log_time_axis, stellar_Fe_over_H_list, label='stellar MW')
# plt.plot(log_time_axis, stellar_Fe_over_H_list_luminosity_weighted, label='stellar LW')
# plt.plot([log_time_axis[0], log_time_axis[-1]], [0, 0], color='red', ls='dashed', label='solar')
# plt.xlabel(r'log$_{10}$(time [yr])')
# plt.ylabel('[Fe/H]')
# plt.title('Element abundance evolution', fontsize=10)
# # if imf == 'igimf':
# # plt.title('IGIMF')
# # elif imf == 'Kroupa':
# # plt.title('Kroupa IMF')
# # plt.legend(scatterpoints=1, numpoints=1, loc=0, prop={'size': 7}, ncol=2)
# # plt.xlim(6.4, 1.01 * log_time_axis[-1])
# # plt.ylim(-5, 1)
# plt.legend(prop={'size': 7})
# plt.tight_layout()
# if plot_save is True:
# plt.savefig('galaxy_evolution_fig_FeH_{}.pdf'.format(imf), dpi=250)
file = open('simulation_results_from_galaxy_evol/imf{}STF{}log_SFR{}SFEN{}Z_0{}/plots/Fe_over_H_time.txt'.format(imf, STF, SFR, SFEN, log_Z_0), 'w')
file.write("# log_time_axis\n")
i = 0
while i < length_of_time_axis:
file.write("{} ".format(log_time_axis[i]))
(i) = (i + 1)
file.write("\n# gas_Fe_over_H_list\n")
i = 0
while i < length_of_time_axis:
file.write("{} ".format(Fe_over_H_list[i]))
(i) = (i + 1)
file.write("\n# stellar_Fe_over_H_list\n")
i = 0
while i < length_of_time_axis:
if stellar_Fe_over_H_list[i] is None:
file.write("{} ".format(-6))
else:
file.write("{} ".format(stellar_Fe_over_H_list[i]))
(i) = (i + 1)
file.write("\n# stellar_Fe_over_H_list_luminosity_weighted\n")
i = 0
while i < length_of_time_axis:
if stellar_Fe_over_H_list_luminosity_weighted[i] is None:
file.write("{} ".format(-6))
else:
file.write("{} ".format(stellar_Fe_over_H_list_luminosity_weighted[i]))
(i) = (i + 1)
file.write("\n")
file.close()
file = open('simulation_results_from_galaxy_evol/imf{}STF{}log_SFR{}SFEN{}Z_0{}/plots/Fe_over_H_mass.txt'.format(imf, STF, SFR, SFEN, log_Z_0), 'w')
file.write("# gas_Fe_over_H\n")
file.write("{} ".format(Fe_over_H_list[-1]))
file.write("\n# stellar_Fe_over_H\n")
file.write("{} ".format(stellar_Fe_over_H_list[-1]))
file.write("\n# stellar_Fe_over_H_list_luminosity_weighted\n")
file.write("{} ".format(stellar_Fe_over_H_list_luminosity_weighted[-1]))
file.write("\n")
file.close()
#
global O_over_H_list, stellar_O_over_H_list, stellar_O_over_H_list_luminosity_weighted
# if plot_show is True or plot_save is True:
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(4, figsize=(3, 2.5))
# fig.add_subplot(1, 1, 1)
# plt.plot(log_time_axis, O_over_H_list, label='gas')
# plt.plot(log_time_axis, stellar_O_over_H_list, label='stellar MW')
# plt.plot(log_time_axis, stellar_O_over_H_list_luminosity_weighted, label='stellar LW')
# plt.plot([log_time_axis[0], log_time_axis[-1]], [0, 0], color='red', ls='dashed', label='solar')
# plt.xlabel(r'log$_{10}$(time [yr])')
# plt.ylabel('[O/H]')
# plt.title('Element abundance evolution', fontsize=10)
# # plt.xlim(6.4, 1.01 * log_time_axis[-1])
# # plt.ylim(-5, 1)
# plt.legend(prop={'size': 7})
# plt.tight_layout()
# if plot_save is True:
# plt.savefig('galaxy_evolution_fig_OH_{}.pdf'.format(imf), dpi=250)
#
global Mg_over_H_list, stellar_Mg_over_H_list, stellar_Mg_over_H_list_luminosity_weighted
# if plot_show is True or plot_save is True:
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(5, figsize=(3, 2.5))
# fig.add_subplot(1, 1, 1)
# plt.plot(log_time_axis, Mg_over_H_list, label='gas')
# # print(stellar_Mg_over_H_list)
# plt.plot(log_time_axis, stellar_Mg_over_H_list, label='stellar MW')
# plt.plot(log_time_axis, stellar_Mg_over_H_list_luminosity_weighted, label='stellar LW')
# plt.plot([log_time_axis[0], log_time_axis[-1]], [0, 0], color='red', ls='dashed', label='solar')
# plt.xlabel(r'log$_{10}$(time [yr])')
# plt.ylabel('[Mg/H]')
# plt.title('Element abundance evolution', fontsize=10)
# # plt.xlim(6.4, 1.01 * log_time_axis[-1])
# # plt.ylim(-5, 1)
# plt.legend(prop={'size': 7})
# plt.tight_layout()
# if plot_save is True:
# plt.savefig('galaxy_evolution_fig_MgH_{}.pdf'.format(imf), dpi=250)
###
global C_over_H_list, stellar_C_over_H_list, stellar_C_over_H_list_luminosity_weighted
# if plot_show is True or plot_save is True:
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(31, figsize=(3, 2.5))
# fig.add_subplot(1, 1, 1)
# plt.plot(log_time_axis, C_over_H_list, label='gas')
# plt.plot(log_time_axis, stellar_C_over_H_list, label='stellar MW')
# plt.plot(log_time_axis, stellar_C_over_H_list_luminosity_weighted, label='stellar LW')
# plt.plot([log_time_axis[0], log_time_axis[-1]], [0, 0], color='red', ls='dashed', label='solar')
# plt.xlabel(r'log$_{10}$(time [yr])')
# plt.ylabel('[C/H]')
# plt.title('Element abundance evolution', fontsize=10)
# # if imf == 'igimf':
# # plt.title('IGIMF')
# # elif imf == 'Kroupa':
# # plt.title('Kroupa IMF')
# # plt.legend(scatterpoints=1, numpoints=1, loc=0, prop={'size': 7}, ncol=2)
# # plt.xlim(6.4, 1.01 * log_time_axis[-1])
# # plt.ylim(-5, 1)
# plt.legend(prop={'size': 7})
# plt.tight_layout()
# if plot_save is True:
# plt.savefig('galaxy_evolution_fig_CH_{}.pdf'.format(imf), dpi=250)
#
global N_over_H_list, stellar_N_over_H_list, stellar_N_over_H_list_luminosity_weighted
# if plot_show is True or plot_save is True:
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(32, figsize=(3, 2.5))
# fig.add_subplot(1, 1, 1)
# plt.plot(log_time_axis, N_over_H_list, label='gas')
# plt.plot(log_time_axis, stellar_N_over_H_list, label='stellar MW')
# plt.plot(log_time_axis, stellar_N_over_H_list_luminosity_weighted, label='stellar LW')
# plt.plot([log_time_axis[0], log_time_axis[-1]], [0, 0], color='red', ls='dashed', label='solar')
# plt.xlabel(r'log$_{10}$(time [yr])')
# plt.ylabel('[N/H]')
# plt.title('Element abundance evolution', fontsize=10)
# # if imf == 'igimf':
# # plt.title('IGIMF')
# # elif imf == 'Kroupa':
# # plt.title('Kroupa IMF')
# # plt.legend(scatterpoints=1, numpoints=1, loc=0, prop={'size': 7}, ncol=2)
# # plt.xlim(6.4, 1.01 * log_time_axis[-1])
# # plt.ylim(-5, 1)
# plt.legend(prop={'size': 7})
# plt.tight_layout()
# if plot_save is True:
# plt.savefig('galaxy_evolution_fig_NH_{}.pdf'.format(imf), dpi=250)
#
global Ca_over_H_list, stellar_Ca_over_H_list, stellar_Ca_over_H_list_luminosity_weighted
# if plot_show is True or plot_save is True:
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(33, figsize=(3, 2.5))
# fig.add_subplot(1, 1, 1)
# plt.plot(log_time_axis, Ca_over_H_list, label='gas')
# plt.plot(log_time_axis, stellar_Ca_over_H_list, label='stellar MW')
# plt.plot(log_time_axis, stellar_Ca_over_H_list_luminosity_weighted, label='stellar LW')
# plt.plot([log_time_axis[0], log_time_axis[-1]], [0, 0], color='red', ls='dashed', label='solar')
# plt.xlabel(r'log$_{10}$(time [yr])')
# plt.ylabel('[Ca/H]')
# plt.title('Element abundance evolution', fontsize=10)
# # if imf == 'igimf':
# # plt.title('IGIMF')
# # elif imf == 'Kroupa':
# # plt.title('Kroupa IMF')
# # plt.legend(scatterpoints=1, numpoints=1, loc=0, prop={'size': 7}, ncol=2)
# # plt.xlim(6.4, 1.01 * log_time_axis[-1])
# # plt.ylim(-5, 1)
# plt.legend(prop={'size': 7})
# plt.tight_layout()
# if plot_save is True:
# plt.savefig('galaxy_evolution_fig_CaH_{}.pdf'.format(imf), dpi=250)
#
global Ne_over_H_list, stellar_Ne_over_H_list, stellar_Ne_over_H_list_luminosity_weighted
# if plot_show is True or plot_save is True:
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(331, figsize=(3, 2.5))
# fig.add_subplot(1, 1, 1)
# plt.plot(log_time_axis, Ne_over_H_list, label='gas')
# plt.plot(log_time_axis, stellar_Ne_over_H_list, label='stellar MW')
# plt.plot(log_time_axis, stellar_Ne_over_H_list_luminosity_weighted, label='stellar LW')
# plt.plot([log_time_axis[0], log_time_axis[-1]], [0, 0], color='red', ls='dashed', label='solar')
# plt.xlabel(r'log$_{10}$(time [yr])')
# plt.ylabel('[Ne/H]')
# plt.title('Element abundance evolution', fontsize=10)
# # if imf == 'igimf':
# # plt.title('IGIMF')
# # elif imf == 'Kroupa':
# # plt.title('Kroupa IMF')
# # plt.legend(scatterpoints=1, numpoints=1, loc=0, prop={'size': 7}, ncol=2)
# # plt.xlim(6.4, 1.01 * log_time_axis[-1])
# # plt.ylim(-5, 1)
# plt.legend(prop={'size': 7})
# plt.tight_layout()
# if plot_save is True:
# plt.savefig('galaxy_evolution_fig_NeH_{}.pdf'.format(imf), dpi=250)
#
global Si_over_H_list, stellar_Si_over_H_list, stellar_Si_over_H_list_luminosity_weighted
# if plot_show is True or plot_save is True:
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(332, figsize=(3, 2.5))
# fig.add_subplot(1, 1, 1)
# plt.plot(log_time_axis, Si_over_H_list, label='gas')
# plt.plot(log_time_axis, stellar_Si_over_H_list, label='stellar MW')
# plt.plot(log_time_axis, stellar_Si_over_H_list_luminosity_weighted, label='stellar LW')
# plt.plot([log_time_axis[0], log_time_axis[-1]], [0, 0], color='red', ls='dashed', label='solar')
# plt.xlabel(r'log$_{10}$(time [yr])')
# plt.ylabel('[Si/H]')
# plt.title('Element abundance evolution', fontsize=10)
# # if imf == 'igimf':
# # plt.title('IGIMF')
# # elif imf == 'Kroupa':
# # plt.title('Kroupa IMF')
# # plt.legend(scatterpoints=1, numpoints=1, loc=0, prop={'size': 7}, ncol=2)
# # plt.xlim(6.4, 1.01 * log_time_axis[-1])
# # plt.ylim(-5, 1)
# plt.legend(prop={'size': 7})
# plt.tight_layout()
# if plot_save is True:
# plt.savefig('galaxy_evolution_fig_SiH_{}.pdf'.format(imf), dpi=250)
#
global S_over_H_list, stellar_S_over_H_list, stellar_S_over_H_list_luminosity_weighted
# if plot_show is True or plot_save is True:
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(333, figsize=(3, 2.5))
# fig.add_subplot(1, 1, 1)
# plt.plot(log_time_axis, S_over_H_list, label='gas')
# plt.plot(log_time_axis, stellar_S_over_H_list, label='stellar MW')
# plt.plot(log_time_axis, stellar_S_over_H_list_luminosity_weighted, label='stellar LW')
# plt.plot([log_time_axis[0], log_time_axis[-1]], [0, 0], color='red', ls='dashed', label='solar')
# plt.xlabel(r'log$_{10}$(time [yr])')
# plt.ylabel('[S/H]')
# plt.title('Element abundance evolution', fontsize=10)
# # if imf == 'igimf':
# # plt.title('IGIMF')
# # elif imf == 'Kroupa':
# # plt.title('Kroupa IMF')
# # plt.legend(scatterpoints=1, numpoints=1, loc=0, prop={'size': 7}, ncol=2)
# # plt.xlim(6.4, 1.01 * log_time_axis[-1])
# # plt.ylim(-5, 1)
# plt.legend(prop={'size': 7})
# plt.tight_layout()
# if plot_save is True:
# plt.savefig('galaxy_evolution_fig_SH_{}.pdf'.format(imf), dpi=250)
###
# Reference: Serenelli & Basu 2010, Determining the Initial Helium Abundance of the Sun, DOI: 10.1088/0004-637X/719/1/865
# if plot_show is True or plot_save is True:
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(64, figsize=(3, 2.5))
# fig.add_subplot(1, 1, 1)
# plt.plot(log_time_axis, Y_list, label='gas')
# plt.plot(log_time_axis, stellar_Y_list, label='stellar MW')
# plt.plot(log_time_axis, stellar_Y_list_luminosity_weighted, label='stellar LW')
# plt.plot([log_time_axis[0], log_time_axis[-1]], [Y_solar, Y_solar], color='red',
# ls='dashed', label='solar')
# plt.xlabel(r'log$_{10}$(time [yr])')
# plt.ylabel('Y')
# plt.title('Helium mass fraction evolution', fontsize=10)
# plt.legend(prop={'size': 7})
# plt.tight_layout()
# if plot_save is True:
# plt.savefig('galaxy_evolution_fig_Y_{}.pdf'.format(imf), dpi=250)
# if True: # plot_show is True or plot_save is True:
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(65, figsize=(3, 2.5))
# fig.add_subplot(1, 1, 1)
# plt.plot(log_time_axis, DY_over_Z_list, label='gas')
# plt.plot(log_time_axis, stellar_DY_over_Z_list, label='stellar MW')
# plt.plot(log_time_axis, stellar_DY_over_Z_list_luminosity_weighted, label='stellar LW')
# plt.plot([log_time_axis[0], log_time_axis[-1]], [(Y_solar-primary_He_mass_fraction+1e-8)/Z_solar, (Y_solar-primary_He_mass_fraction+1e-8)/Z_solar], color='red',
# ls='dashed', label='solar')
# plt.xlabel(r'log$_{10}$(time [yr])')
# plt.ylabel(r'$\Delta$Y/Z')
# plt.legend(prop={'size': 7})
# plt.tight_layout()
# if plot_save is True:
# plt.savefig('galaxy_evolution_fig_DY_over_Z_{}.pdf'.format(imf), dpi=250)
# global gas_Z_over_X_list, stellar_Z_over_X_list, stellar_Z_over_X_list_luminosity_weighted
# if True: # plot_show is True or plot_save is True:
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(66, figsize=(3, 2.5))
# fig.add_subplot(1, 1, 1)
# plt.plot(gas_Z_over_X_list, DY_over_Z_list, label='gas')
# plt.plot(stellar_Z_over_X_list, stellar_DY_over_Z_list, label='stellar MW')
# plt.plot(stellar_Z_over_X_list_luminosity_weighted, stellar_DY_over_Z_list_luminosity_weighted, label='stellar LW')
# plt.plot([gas_Z_over_X_list[0], gas_Z_over_X_list[-1]], [(Y_solar-primary_He_mass_fraction+1e-8)/Z_solar, (Y_solar-primary_He_mass_fraction+1e-8)/Z_solar], color='red',
# ls='dashed', label='solar')
# plt.xlabel('[Z/X]')
# plt.ylabel('$\Delta$Y/Z')
# plt.legend(prop={'size': 7})
# plt.tight_layout()
# if plot_save is True:
# plt.savefig('galaxy_evolution_fig_DY_over_Z_over_Z_over_X_{}.pdf'.format(imf), dpi=250)
# if True: # plot_show is True or plot_save is True:
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(67, figsize=(3, 2.5))
# fig.add_subplot(1, 1, 1)
# plt.plot(Z_list, DY_over_Z_list, label='gas')
# plt.plot(stellar_Z_list, stellar_DY_over_Z_list, label='stellar MW')
# plt.plot(stellar_Z_list_luminosity_weighted, stellar_DY_over_Z_list_luminosity_weighted, label='stellar LW')
# plt.plot([Z_list[0], Z_list[-1]], [(Y_solar-primary_He_mass_fraction+1e-8)/Z_solar, (Y_solar-primary_He_mass_fraction+1e-8)/Z_solar], color='red',
# ls='dashed', label='solar')
# plt.xlabel('Z')
# plt.ylabel('$\Delta$Y/Z')
# plt.legend(prop={'size': 7})
# plt.tight_layout()
# if plot_save is True:
# plt.savefig('galaxy_evolution_fig_DY_over_Z_over_Z_{}.pdf'.format(imf), dpi=250)
# if True: # plot_show is True or plot_save is True:
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(68, figsize=(3, 2.5))
# fig.add_subplot(1, 1, 1)
# plt.plot(Z_list, Y_list, label='gas')
# plt.plot(stellar_Z_list, stellar_Y_list, label='stellar MW')
# plt.plot(stellar_Z_list_luminosity_weighted, stellar_Y_list_luminosity_weighted, label='stellar LW')
# plt.plot([Z_list[0], Z_list[-1]], [Y_solar, Y_solar], color='red',
# ls='dashed', label='solar')
# plt.xlabel('Z')
# plt.ylabel('Y')
# plt.legend(prop={'size': 7})
# plt.tight_layout()
# if plot_save is True:
# plt.savefig('galaxy_evolution_fig_Y_over_Z_{}.pdf'.format(imf), dpi=250)
# if True: # plot_show is True or plot_save is True:
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(69, figsize=(3, 2.5))
# fig.add_subplot(1, 1, 1)
# plt.plot(x_axis_for_dY_over_dZ, dY_over_dZ, label='gas')
# # plt.plot(stellar_Z_list, stellar_Y_list, label='stellar MW')
# # plt.plot(stellar_Z_list_luminosity_weighted, stellar_Y_list_luminosity_weighted, label='stellar LW')
# plt.plot([x_axis_for_dY_over_dZ[0], x_axis_for_dY_over_dZ[-1]], [(Y_solar-primary_He_mass_fraction+1e-8)/Z_solar, (Y_solar-primary_He_mass_fraction+1e-8)/Z_solar], color='red',
# ls='dashed', label='solar')
# plt.xlabel('Z')
# plt.ylabel('dY/dZ')
# plt.legend(prop={'size': 7})
# plt.tight_layout()
# if plot_save is True:
# plt.savefig('galaxy_evolution_fig_dY_over_dZ_over_Z_{}.pdf'.format(imf), dpi=250)
file = open('simulation_results_from_galaxy_evol/imf{}STF{}log_SFR{}SFEN{}Z_0{}/plots/Y_time.txt'.format(imf, STF, SFR, SFEN, log_Z_0), 'w')
file.write("# log_time_axis\n")
i = 0
while i < length_of_time_axis:
file.write("{} ".format(log_time_axis[i]))
(i) = (i + 1)
file.write("\n# gas_Y_list\n")
i = 0
while i < length_of_time_axis:
file.write("{} ".format(Y_list[i]))
(i) = (i + 1)
file.write("\n# stellar_Y_list\n")
i = 0
while i < length_of_time_axis:
if stellar_Y_list[i] is None:
file.write("0.001")
else:
file.write("{} ".format(stellar_Y_list[i]))
(i) = (i + 1)
file.write("\n# stellar_Y_list_luminosity_weighted\n")
i = 0
while i < length_of_time_axis:
if stellar_Y_list_luminosity_weighted[i] is None:
file.write("0.001")
else:
file.write("{} ".format(stellar_Y_list_luminosity_weighted[i]))
(i) = (i + 1)
file.write("\n")
file.close()
#
global Mg_over_Fe_list, stellar_Mg_over_Fe_list, stellar_Mg_over_Fe_list_luminosity_weighted
# if plot_show is True or plot_save is True:
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(7, figsize=(3, 2.5))
# fig.add_subplot(1, 1, 1)
# plt.plot(log_time_axis, Mg_over_Fe_list, label='gas')
# plt.plot(log_time_axis, stellar_Mg_over_Fe_list, label='stellar MW')
# plt.plot(log_time_axis, stellar_Mg_over_Fe_list_luminosity_weighted, label='stellar LW')
# plt.plot([log_time_axis[0], log_time_axis[-1]], [0, 0], color='red', ls='dashed', label='solar')
# plt.xlabel(r'log$_{10}$(time [yr])')
# plt.ylabel('[Mg/Fe]')
# plt.title('Element number ratio evolution', fontsize=10)
# # plt.xlim(6.4, 1.01 * log_time_axis[-1])
# # plt.ylim(-1, 3.5)
# plt.legend(prop={'size': 7})
# plt.tight_layout()
# if plot_save is True:
# plt.savefig('galaxy_evolution_fig_MgFe_{}.pdf'.format(imf), dpi=250)
file = open('simulation_results_from_galaxy_evol/imf{}STF{}log_SFR{}SFEN{}Z_0{}/plots/Mg_over_Fe_time.txt'.format(imf, STF, SFR, SFEN, log_Z_0), 'w')
file.write("# log_time_axis\n")
i = 0
while i < length_of_time_axis:
file.write("{} ".format(log_time_axis[i]))
(i) = (i + 1)
file.write("\n# gas_Mg_over_Fe_list\n")
i = 0
while i < length_of_time_axis:
file.write("{} ".format(Mg_over_Fe_list[i]))
(i) = (i + 1)
file.write("\n# stellar_Mg_over_Fe_list\n")
i = 0
while i < length_of_time_axis:
if stellar_Mg_over_Fe_list[i] is None:
file.write("-6 ")
else:
file.write("{} ".format(stellar_Mg_over_Fe_list[i]))
(i) = (i + 1)
file.write("\n# stellar_Mg_over_Fe_list_luminosity_weighted\n")
i = 0
while i < length_of_time_axis:
if stellar_Mg_over_Fe_list_luminosity_weighted[i] is None:
file.write("-6 ")
else:
file.write("{} ".format(stellar_Mg_over_Fe_list_luminosity_weighted[i]))
(i) = (i + 1)
file.write("\n")
file.close()
file = open('simulation_results_from_galaxy_evol/imf{}STF{}log_SFR{}SFEN{}Z_0{}/plots/Mg_over_Fe_mass.txt'.format(imf, STF, SFR, SFEN, log_Z_0), 'w')
file.write("# gas_Mg_over_Fe\n")
file.write("{} ".format(Mg_over_Fe_list[-1]))
file.write("\n# stellar_Mg_over_Fe\n")
file.write("{} ".format(stellar_Mg_over_Fe_list[-1]))
file.write("\n# stellar_Mg_over_Fe_list_luminosity_weighted\n")
file.write("{} ".format(stellar_Mg_over_Fe_list_luminosity_weighted[-1]))
file.write("\n")
file.close()
#
global O_over_Fe_list, stellar_O_over_Fe_list, stellar_O_over_Fe_list_luminosity_weighted
# if plot_show is True or plot_save is True:
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(8, figsize=(3, 2.5))
# fig.add_subplot(1, 1, 1)
# plt.plot(log_time_axis, O_over_Fe_list, label='gas')
# plt.plot(log_time_axis, stellar_O_over_Fe_list, label='stellar MW')
# plt.plot(log_time_axis, stellar_O_over_Fe_list_luminosity_weighted, label='stellar LW')
# plt.plot([log_time_axis[0], log_time_axis[-1]], [0, 0], color='red', ls='dashed', label='solar')
# plt.xlabel(r'log$_{10}$(time [yr])')
# plt.ylabel('[O/Fe]')
# plt.title('Element number ratio evolution', fontsize=10)
# # plt.xlim(6.4, 1.01 * log_time_axis[-1])
# # plt.ylim(-1, 3.5)
# plt.legend(prop={'size': 7})
# plt.tight_layout()
# if plot_save is True:
# plt.savefig('galaxy_evolution_fig_MgFe_{}.pdf'.format(imf), dpi=250)
#
global Ca_over_Fe_list, stellar_Ca_over_Fe_list, stellar_Ca_over_Fe_list_luminosity_weighted
# if plot_show is True or plot_save is True:
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(81, figsize=(3, 2.5))
# fig.add_subplot(1, 1, 1)
# plt.plot(log_time_axis, Ca_over_Fe_list, label='gas')
# plt.plot(log_time_axis, stellar_Ca_over_Fe_list, label='stellar MW')
# plt.plot(log_time_axis, stellar_Ca_over_Fe_list_luminosity_weighted, label='stellar LW')
# plt.plot([log_time_axis[0], log_time_axis[-1]], [0, 0], color='red', ls='dashed', label='solar')
# plt.xlabel(r'log$_{10}$(time [yr])')
# plt.ylabel('[Ca/Fe]')
# plt.title('Element number ratio evolution', fontsize=10)
# # plt.xlim(6.4, 1.01 * log_time_axis[-1])
# # plt.ylim(-1, 3.5)
# plt.legend(prop={'size': 7})
# plt.tight_layout()
# if plot_save is True:
# plt.savefig('galaxy_evolution_fig_CaFe_{}.pdf'.format(imf), dpi=250)
#
global Ne_over_Fe_list, stellar_Ne_over_Fe_list, stellar_Ne_over_Fe_list_luminosity_weighted
# if plot_show is True or plot_save is True:
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(811, figsize=(3, 2.5))
# fig.add_subplot(1, 1, 1)
# plt.plot(log_time_axis, Ne_over_Fe_list, label='gas')
# plt.plot(log_time_axis, stellar_Ne_over_Fe_list, label='stellar MW')
# plt.plot(log_time_axis, stellar_Ne_over_Fe_list_luminosity_weighted, label='stellar LW')
# plt.plot([log_time_axis[0], log_time_axis[-1]], [0, 0], color='red', ls='dashed', label='solar')
# plt.xlabel(r'log$_{10}$(time [yr])')
# plt.ylabel('[Ne/Fe]')
# plt.title('Element number ratio evolution', fontsize=10)
# # plt.xlim(6.4, 1.01 * log_time_axis[-1])
# # plt.ylim(-1, 3.5)
# plt.legend(prop={'size': 7})
# plt.tight_layout()
# if plot_save is True:
# plt.savefig('galaxy_evolution_fig_NeFe_{}.pdf'.format(imf), dpi=250)
#
global Si_over_Fe_list, stellar_Si_over_Fe_list, stellar_Si_over_Fe_list_luminosity_weighted
# if plot_show is True or plot_save is True:
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(812, figsize=(3, 2.5))
# fig.add_subplot(1, 1, 1)
# plt.plot(log_time_axis, Si_over_Fe_list, label='gas')
# plt.plot(log_time_axis, stellar_Si_over_Fe_list, label='stellar MW')
# plt.plot(log_time_axis, stellar_Si_over_Fe_list_luminosity_weighted, label='stellar LW')
# plt.plot([log_time_axis[0], log_time_axis[-1]], [0, 0], color='red', ls='dashed', label='solar')
# plt.xlabel(r'log$_{10}$(time [yr])')
# plt.ylabel('[Si/Fe]')
# plt.title('Element number ratio evolution', fontsize=10)
# # plt.xlim(6.4, 1.01 * log_time_axis[-1])
# # plt.ylim(-1, 3.5)
# plt.legend(prop={'size': 7})
# plt.tight_layout()
# if plot_save is True:
# plt.savefig('galaxy_evolution_fig_SiFe_{}.pdf'.format(imf), dpi=250)
#
global S_over_Fe_list, stellar_S_over_Fe_list, stellar_S_over_Fe_list_luminosity_weighted
# if plot_show is True or plot_save is True:
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(813, figsize=(3, 2.5))
# fig.add_subplot(1, 1, 1)
# plt.plot(log_time_axis, S_over_Fe_list, label='gas')
# plt.plot(log_time_axis, stellar_S_over_Fe_list, label='stellar MW')
# plt.plot(log_time_axis, stellar_S_over_Fe_list_luminosity_weighted, label='stellar LW')
# plt.plot([log_time_axis[0], log_time_axis[-1]], [0, 0], color='red', ls='dashed', label='solar')
# plt.xlabel(r'log$_{10}$(time [yr])')
# plt.ylabel('[S/Fe]')
# plt.title('Element number ratio evolution', fontsize=10)
# # plt.xlim(6.4, 1.01 * log_time_axis[-1])
# # plt.ylim(-1, 3.5)
# plt.legend(prop={'size': 7})
# plt.tight_layout()
# if plot_save is True:
# plt.savefig('galaxy_evolution_fig_SFe_{}.pdf'.format(imf), dpi=250)
#
global C_over_Fe_list, stellar_C_over_Fe_list, stellar_C_over_Fe_list_luminosity_weighted
# if plot_show is True or plot_save is True:
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(82, figsize=(3, 2.5))
# fig.add_subplot(1, 1, 1)
# plt.plot(log_time_axis, C_over_Fe_list, label='gas')
# plt.plot(log_time_axis, stellar_C_over_Fe_list, label='stellar MW')
# plt.plot(log_time_axis, stellar_C_over_Fe_list_luminosity_weighted, label='stellar LW')
# plt.plot([log_time_axis[0], log_time_axis[-1]], [0, 0], color='red', ls='dashed', label='solar')
# plt.xlabel(r'log$_{10}$(time [yr])')
# plt.ylabel('[C/Fe]')
# plt.title('Element number ratio evolution', fontsize=10)
# # plt.xlim(6.4, 1.01 * log_time_axis[-1])
# # plt.ylim(-1, 3.5)
# plt.legend(prop={'size': 7})
# plt.tight_layout()
# if plot_save is True:
# plt.savefig('galaxy_evolution_fig_CFe_{}.pdf'.format(imf), dpi=250)
# #
# global N_over_O_list, stellar_N_over_O_list, stellar_N_over_O_list_luminosity_weighted
# if plot_show is True or plot_save is True:
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(83, figsize=(3, 2.5))
# fig.add_subplot(1, 1, 1)
# plt.plot(log_time_axis, N_over_O_list, label='gas')
# plt.plot(log_time_axis, stellar_N_over_O_list, label='stellar MW')
# plt.plot(log_time_axis, stellar_N_over_O_list_luminosity_weighted, label='stellar LW')
# plt.plot([log_time_axis[0], log_time_axis[-1]], [0, 0], color='red', ls='dashed', label='solar')
# plt.xlabel(r'log$_{10}$(time [yr])')
# plt.ylabel('[N/Fe]')
# plt.title('Element number ratio evolution', fontsize=10)
# # plt.xlim(6.4, 1.01 * log_time_axis[-1])
# # plt.ylim(-1, 3.5)
# plt.legend(prop={'size': 7})
# plt.tight_layout()
# if plot_save is True:
# plt.savefig('galaxy_evolution_fig_NFe_{}.pdf'.format(imf), dpi=250)
#
del stellar_Fe_over_H_list[0]
del stellar_Fe_over_H_list_luminosity_weighted[0]
del stellar_Mg_over_Fe_list[0]
del stellar_Mg_over_Fe_list_luminosity_weighted[0]
del stellar_O_over_Fe_list[0]
del stellar_O_over_Fe_list_luminosity_weighted[0]
del stellar_Si_over_Fe_list[0]
del stellar_Si_over_Fe_list_luminosity_weighted[0]
del stellar_Ca_over_Fe_list[0]
del stellar_Ca_over_Fe_list_luminosity_weighted[0]
# if plot_show is True or plot_save is True:
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(9, figsize=(3, 2.5))
# fig.add_subplot(1, 1, 1)
# Fe_over_H_list[0] = -99
# stellar_Fe_over_H_list[0] = -99
# stellar_Fe_over_H_list_luminosity_weighted[0] = -99
# plt.plot(Fe_over_H_list, Mg_over_Fe_list, label='gas')
# # plt.scatter(Fe_over_H_list, Mg_over_Fe_list, alpha=0.5, s=10)
# plt.plot(stellar_Fe_over_H_list, stellar_Mg_over_Fe_list, label='stellar MW')
# plt.plot(stellar_Fe_over_H_list_luminosity_weighted, stellar_Mg_over_Fe_list_luminosity_weighted,
# label='stellar LW')
# plt.plot([-4, -0.6], [-0.8, -0.8], color='red', label='Lacchin2019', lw=0.5)
# plt.plot([-4, -0.6], [1, 1], color='red', lw=0.5)
# plt.plot([-4, -4], [-0.8, 1], color='red', lw=0.5)
# plt.plot([-0.6, -0.6], [-0.8, 1], color='red', lw=0.5)
# plt.plot([-4, -3.5, -3, -2.5, -2, -1.5, -1, -0.6], [0.44, 0.445, 0.45, 0.44, 0.37, 0.17, -0.14, -0.44], color='red', ls='dashed')
# plt.scatter([-3.7, -3.2, -2.92, -2.7, -2.5, -2.3, -2.3, -2.2, -2.05, -1.9, -1.8],
# [0.47, 0.4, 0.65, 0.15, 0.2, 0.32, 0.35, 0.35, 0.3, 0.2, 0.48], color='k', alpha=0.5)
# with open('Mg_Lacchin.txt') as f:
# lines = f.readlines()
# time_Mg_Lacchin_igimf = [float(line.split()[0]) for line in lines]
# Mg_Lacchin_igimf = [float(line.split()[1]) for line in lines]
# plt.plot(time_Mg_Lacchin_igimf, Mg_Lacchin_igimf, color="tab:blue", label='Lacchin2019 IGIMF', ls='dashed')
# plt.xlabel('[Fe/H]')
# plt.ylabel('[Mg/Fe]')
# plt.xlim(-4.5, 0)
# plt.ylim(-1, 1.5)
# plt.legend(loc='lower left', prop={'size': 6})
# plt.tight_layout()
# if plot_save is True:
# plt.savefig('galaxy_evolution_fig_MgFe-FeH_{}.pdf'.format(imf), dpi=250)
#
# if plot_show is True or plot_save is True:
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(91, figsize=(3, 2.5))
# fig.add_subplot(1, 1, 1)
# plt.plot(O_over_H_list, N_over_O_list, label='gas')
# plt.plot(stellar_O_over_H_list, stellar_N_over_O_list, label='stellar MW')
# plt.plot(stellar_O_over_H_list_luminosity_weighted, stellar_N_over_O_list_luminosity_weighted,
# label='stellar LW')
# plt.plot([-5, 1], [0, 0], color='red', ls='dashed', label='solar')
# plt.plot([0, 0], [-1, 3.5], color='red', ls='dashed')
# plt.xlabel('[O/H]')
# plt.ylabel('[N/O]')
# # plt.xlim(-5, 1)
# # plt.ylim(-1, 3.5)
# plt.legend(prop={'size': 7})
# plt.tight_layout()
# if plot_save is True:
# plt.savefig('galaxy_evolution_fig_NO-OH_{}.pdf'.format(imf), dpi=250)
# #
# if plot_show is True or plot_save is True:
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(92, figsize=(3, 2.5))
# fig.add_subplot(1, 1, 1)
# plt.plot(O_over_H_list, C_over_H_list, label='gas')
# plt.plot(stellar_O_over_H_list, stellar_C_over_H_list, label='stellar MW')
# plt.plot(stellar_O_over_H_list_luminosity_weighted, stellar_C_over_H_list_luminosity_weighted,
# label='stellar LW')
# plt.plot([-5, 1], [0, 0], color='red', ls='dashed', label='solar')
# plt.plot([0, 0], [-1, 3.5], color='red', ls='dashed')
# plt.xlabel('[O/H]')
# plt.ylabel('[C/H]')
# # plt.xlim(-5, 1)
# # plt.ylim(-1, 3.5)
# plt.legend(prop={'size': 7})
# plt.tight_layout()
# if plot_save is True:
# plt.savefig('galaxy_evolution_fig_CH-OH_{}.pdf'.format(imf), dpi=250)
# #
# if plot_show is True or plot_save is True:
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(93, figsize=(3, 2.5))
# fig.add_subplot(1, 1, 1)
# plt.plot(Fe_over_H_list, Si_over_Fe_list, label='gas')
# # plt.scatter(Fe_over_H_list, Si_over_Fe_list, alpha=0.5, s=10)
# plt.plot(stellar_Fe_over_H_list, stellar_Si_over_Fe_list, label='stellar MW')
# plt.plot(stellar_Fe_over_H_list_luminosity_weighted, stellar_Si_over_Fe_list_luminosity_weighted,
# label='stellar LW')
# plt.plot([-4, -0.6], [-0.5, -0.5], color='red', label='Lacchin2019', lw=0.5)
# plt.plot([-4, -0.6], [1, 1], color='red', lw=0.5)
# plt.plot([-4, -4], [-0.5, 1], color='red', lw=0.5)
# plt.plot([-0.6, -0.6], [-0.5, 1], color='red', lw=0.5)
# plt.plot([-4, -3.5, -3, -2.5, -2, -1.5, -1, -0.6], [0.68, 0.64, 0.6, 0.56, 0.5, 0.33, 0.1, -0.07], color='red', ls='dashed')
# plt.scatter([-3.68, -2.05, -1.93],
# [0.77, 0.1, 0.13], color='k', alpha=0.5)
# with open('Si_Lacchin.txt') as f:
# lines = f.readlines()
# time_Si_Lacchin_igimf = [float(line.split()[0]) for line in lines]
# Si_Lacchin_igimf = [float(line.split()[1]) for line in lines]
# plt.plot(time_Si_Lacchin_igimf, Si_Lacchin_igimf, color="tab:blue", label='Lacchin2019 IGIMF', ls='dashed')
# plt.xlabel('[Fe/H]')
# plt.ylabel('[Si/Fe]')
# plt.xlim(-4.5, 0)
# plt.ylim(-1, 1.5)
# plt.legend(loc='lower left', prop={'size': 6})
# plt.tight_layout()
# if plot_save is True:
# plt.savefig('galaxy_evolution_fig_SiFe-FeH_{}.pdf'.format(imf), dpi=250)
# #
# if plot_show is True or plot_save is True:
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(94, figsize=(3, 2.5))
# fig.add_subplot(1, 1, 1)
# plt.plot(Fe_over_H_list, Ca_over_Fe_list, label='gas')
# # plt.scatter(Fe_over_H_list, Ca_over_Fe_list, alpha=0.5, s=10)
# plt.plot(stellar_Fe_over_H_list, stellar_Ca_over_Fe_list, label='stellar MW')
# plt.plot(stellar_Fe_over_H_list_luminosity_weighted, stellar_Ca_over_Fe_list_luminosity_weighted,
# label='stellar LW')
# plt.plot([-4, -0.6], [-0.5, -0.5], color='red', label='Lacchin2019', lw=0.5)
# plt.plot([-4, -0.6], [0.7, 0.7], color='red', lw=0.5)
# plt.plot([-4, -4], [-0.5, 0.7], color='red', lw=0.5)
# plt.plot([-0.6, -0.6], [-0.5, 0.7], color='red', lw=0.5)
# plt.plot([-4, -3.5, -3, -2.5, -2, -1.5, -1, -0.6], [0.3, 0.27, 0.22, 0.19, 0.14, 0.01, -0.15, -0.26], color='red', ls='dashed')
# plt.xlabel('[Fe/H]')
# plt.ylabel('[Ca/Fe]')
# plt.scatter([-3.68, -3.2, -2.91, -2.7, -2.5, -2.28, -2.28, -2.2, -2.06, -1.94, -1.8],
# [0.52, 0.46, 0.32, 0.18, 0.24, 0.28, 0.32, -0.01, 0.15, 0.1, 0.22], color='k', alpha=0.5)
# with open('Ca_Lacchin.txt') as f:
# lines = f.readlines()
# time_Ca_Lacchin_igimf = [float(line.split()[0]) for line in lines]
# Ca_Lacchin_igimf = [float(line.split()[1]) for line in lines]
# plt.plot(time_Ca_Lacchin_igimf, Ca_Lacchin_igimf, color="tab:blue", label='Lacchin2019 IGIMF', ls='dashed')
# plt.xlim(-4.5, 0)
# plt.ylim(-1, 1.5)
# plt.legend(loc='lower left', prop={'size': 6})
# plt.tight_layout()
# if plot_save is True:
# plt.savefig('galaxy_evolution_fig_CaFe-FeH_{}.pdf'.format(imf), dpi=250)
#
#
#
# if plot_show is True or plot_save is True:
# fig, axs = plt.subplots(3, 1, sharex=True, figsize=(3, 5))
# axs[0].plot(Fe_over_H_list, Mg_over_Fe_list, color='k')
# with open('Mg_Lacchin_best.txt') as f:
# lines = f.readlines()
# time_Mg_Lacchin_best = [float(line.split()[0]) for line in lines]
# Mg_Lacchin_best = [float(line.split()[1]) for line in lines]
# Lai2011_Fe_H = [-2.34 - 0.05, -2.37 - 0.05, -3.09 - 0.05, -2.39 - 0.05, -2.89 - 0.05, -2.2 - 0.05, -2.49 - 0.05,
# -2.48 - 0.05, -2.65 - 0.05, -2.43 - 0.05, -2.48 - 0.05, -2.49 - 0.05, -2.57 - 0.05, -2.89 - 0.05,
# -2.51 - 0.05, -3.29 - 0.05, -2.42 - 0.05, -3.79 - 0.05, -2.87 - 0.05, -2.9 - 0.05, -1.65 - 0.05,
# -2.76 - 0.05, -2.31 - 0.05, -1.86 - 0.05]
# Lai2011_Mg_Fe = [0.12 + 0.05, 0.28 + 0.05, 0.43 + 0.05, 0.35 + 0.05, 0.05 + 0.05, 0.32 + 0.05, 0.16 + 0.05,
# 0.18 + 0.05, 0.37 + 0.05, 0.05 + 0.05, 0.04 + 0.05, 0.23 + 0.05, 0.34 + 0.05, 0.3 + 0.05, 0.13 + 0.05,
# 0.28 + 0.05, 0.13 + 0.05, 0.27 + 0.05, 0.12 + 0.05, 0.16 + 0.05, 0.46 + 0.05, 0.25 + 0.05,
# 0.06 + 0.05, 0.51 + 0.05]
# Lai2011_Fe_H_error = [0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2,
# 0.2, 0.2, 0.2, 0.2, 0.2, 0.2]
# Lai2011_Mg_Fe_error = [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1,
# 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
# Lacchin2019_Fe_H = [-3.6613, -3.2003, -2.9075, -2.7063, -2.5025, -2.2810, -2.2810, -2.1969, -2.0492, -1.9219, -1.8048]
# Lacchin2019_Mg_Fe = [0.47409, 0.39382, 0.63630, 0.13418, 0.20614, 0.35587, 0.32298, 0.35595, 0.30228, 0.20672, 0.47594]
# Lacchin2019_Fe_H_error = [0.1, 0.15, 0.15, 0.15, 0.15, 0.3, 0.15, 0.15, 0.15, 0.15, 0.15]
# Lacchin2019_Mg_Fe_error = [0.1, 0.05, 0.12, 0.135, 0.2, 0.23, 0.235, 0.13, 0.23, 0.14, 0.24]
# # axs[0].errorbar(Lai2011_Fe_H, Lai2011_Mg_Fe, xerr=Lai2011_Fe_H_error, yerr=Lai2011_Mg_Fe_error, markersize=0,
# # ecolor='k', ls='none', elinewidth=0.5, alpha=0.4)
# axs[0].errorbar(Lacchin2019_Fe_H, Lacchin2019_Mg_Fe, xerr=Lacchin2019_Fe_H_error, yerr=Lacchin2019_Mg_Fe_error, markersize=0,
# ecolor='tab:blue', ls='none', elinewidth=0.5, alpha=0.4)
# if imf == 'igimf':
# axs[0].plot(time_Mg_Lacchin_igimf, Mg_Lacchin_igimf, color='tab:blue', ls='dashed', lw=0.7)
# axs[0].plot(time_Mg_Lacchin_best, Mg_Lacchin_best, color='tab:blue', ls='-.', lw=0.7)
# elif imf == 'Salpeter':
# axs[0].plot([-4, -3.5, -3, -2.5, -2, -1.5, -1, -0.6], [0.44, 0.445, 0.45, 0.44, 0.37, 0.17, -0.14, -0.44],
# color='red', ls='dashed', lw=0.7)
# axs[0].scatter(Lacchin2019_Fe_H, Lacchin2019_Mg_Fe, color='tab:blue', alpha=0.5, label='Lacchin2019', s=20)
# # axs[0].scatter(Lai2011_Fe_H, Lai2011_Mg_Fe, color='k', alpha=0.5, marker='*', s=20, label='Lai2011')
# axs[0].set_xlabel('[Fe/H]')
# axs[0].set_ylabel('[Mg/Fe]')
# axs[0].set_xlim(-4, -0.6)
# axs[0].set_ylim(-0.8, 1)
# # axs[0].legend(prop={'size': 6}, loc='lower left')
#
# with open('Si_Lacchin_best.txt') as f:
# lines = f.readlines()
# time_Si_Lacchin_best = [float(line.split()[0]) for line in lines]
# Si_Lacchin_best = [float(line.split()[1]) for line in lines]
# Lacchin2019_Fe_H = [-3.6624, -2.0549, -1.9241]
# Lacchin2019_Si_Fe = [0.77012, 0.10533, 0.13018]
# Lacchin2019_Fe_H_error = [0.11, 0.15, 0.15]
# Lacchin2019_Si_Fe_error = [0.15, 0.26, 0.23]
# axs[1].plot(Fe_over_H_list, Si_over_Fe_list, color='k')
# axs[1].errorbar(Lacchin2019_Fe_H, Lacchin2019_Si_Fe, xerr=Lacchin2019_Fe_H_error, yerr=Lacchin2019_Si_Fe_error,
# markersize=0, ecolor='tab:blue', ls='none', elinewidth=0.5, alpha=0.4)
# if imf == 'igimf':
# axs[1].plot(time_Si_Lacchin_best, Si_Lacchin_best, color='tab:blue', ls='-.', lw=0.7)
# axs[1].plot(time_Si_Lacchin_igimf, Si_Lacchin_igimf, color='tab:blue', ls='dashed', lw=0.7)
# elif imf == 'Salpeter':
# axs[1].plot([-4, -3.5, -3, -2.5, -2, -1.5, -1, -0.6], [0.68, 0.64, 0.6, 0.56, 0.5, 0.33, 0.1, -0.07],
# color='red', ls='dashed', lw=0.7)
# axs[1].scatter(Lacchin2019_Fe_H, Lacchin2019_Si_Fe, color='tab:blue', alpha=0.5, label='Lacchin2019', s=20)
# axs[1].set_xlabel('[Fe/H]')
# axs[1].set_ylabel('[Si/Fe]')
# axs[1].set_xlim(-4, -0.6)
# axs[1].set_ylim(-0.5, 1)
#
# with open('Ca_Lacchin_best.txt') as f:
# lines = f.readlines()
# time_Ca_Lacchin_best = [float(line.split()[0]) for line in lines]
# Ca_Lacchin_best = [float(line.split()[1]) for line in lines]
# Lacchin2019_Fe_H = [-3.6584, -3.1993, -2.9072, -2.7045, -2.5011, -2.2812, -2.2813, -2.2820, -2.1992, -2.0522, -1.9244, -1.8019, ]
# Lacchin2019_Ca_Fe = [0.52167, 0.45923, 0.32191, 0.18145, 0.23876, 0.31201, 0.28968, 0.13178, -0.010205, 0.14921, 0.098101, 0.22084]
# Lacchin2019_Fe_H_error = [0.11, 0.15, 0.16, 0.16, 0.16, 0.11, 0.3, 0.16, 0.16, 0.16, 0.16, 0.16]
# Lacchin2019_Ca_Fe_error = [0.1, 0.05, 0.05, 0.05, 0.08, 0.05, 0.28, 0.05, 0.11, 0.07, 0.05, 0.06]
# axs[2].plot(Fe_over_H_list, Ca_over_Fe_list, color='k')
# axs[2].errorbar(Lacchin2019_Fe_H, Lacchin2019_Ca_Fe, xerr=Lacchin2019_Fe_H_error, yerr=Lacchin2019_Ca_Fe_error,
# markersize=0, ecolor='tab:blue', ls='none', elinewidth=0.5, alpha=0.4)
# if imf == 'igimf':
# axs[2].plot(time_Ca_Lacchin_best, Ca_Lacchin_best, color='tab:blue', ls='-.', lw=0.7)
# axs[2].plot(time_Ca_Lacchin_igimf, Ca_Lacchin_igimf, color='tab:blue', ls='dashed', lw=0.7)
# elif imf == 'Salpeter':
# axs[2].plot([-4, -3.5, -3, -2.5, -2, -1.5, -1, -0.6], [0.3, 0.27, 0.22, 0.19, 0.14, 0.01, -0.15, -0.26], color='red', ls='dashed', lw=0.7)
# axs[2].scatter(Lacchin2019_Fe_H, Lacchin2019_Ca_Fe, color='tab:blue', alpha=0.5, label='Lacchin2019', s=20)
# axs[2].set_xlabel('[Fe/H]')
# axs[2].set_ylabel('[Ca/Fe]')
# axs[2].set_xlim(-4, -0.6)
# axs[2].set_ylim(-0.5, 0.7)
#
# plt.tight_layout()
# fig.subplots_adjust(hspace=0) # Remove horizontal space between axes
# plt.savefig('MgSiCa.pdf', dpi=250)
#
#
# if plot_show is True or plot_save is True:
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(10, figsize=(3, 2.5))
# fig.add_subplot(1, 1, 1)
# plt.plot(Fe_over_H_list, O_over_Fe_list, label='gas')
# plt.plot(stellar_Fe_over_H_list, stellar_O_over_Fe_list, label='stellar MW')
# plt.plot(stellar_Fe_over_H_list_luminosity_weighted, stellar_O_over_Fe_list_luminosity_weighted,
# label='stellar LW')
# plt.plot([-5, 1], [0, 0], color='red', ls='dashed', label='solar')
# plt.plot([0, 0], [-1, 3.5], color='red', ls='dashed')
# plt.xlabel('[Fe/H]')
# plt.ylabel('[O/Fe]')
# # plt.xlim(-5, 1)
# # plt.ylim(-1, 3.5)
# plt.legend(prop={'size': 7})
# plt.tight_layout()
# if plot_save is True:
# plt.savefig('galaxy_evolution_fig_OFe-FeH_{}.pdf'.format(imf), dpi=250)
#
global SNIa_number_per_century, SNII_number_per_century
# if plot_show is True or plot_save is True:
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(11, figsize=(3, 2.5))
# fig.add_subplot(1, 1, 1)
# plt.loglog(time_axis, SNIa_number_per_century, label='SNIa', color="tab:orange",
# ls='dotted') # Number per century
# plt.loglog(time_axis, SNII_number_per_century, label='SNII', color="tab:orange") # Number per century
# # plt.loglog(time_axis, SN_number_per_century, ls="dotted", label='total')
# plt.xlabel(r'time [yr]')
# plt.ylabel(r'# of SN per century')
# plt.title('Supernova rate evolution', fontsize=10)
# plt.xlim(10 ** 7, 14 * 10 ** 9)
# # plt.ylim(1e-2, 1e6)
# plt.legend(prop={'size': 7})
# plt.tight_layout()
# if plot_save is True:
# plt.savefig('galaxy_evolution_SN_number.pdf'.format(imf), dpi=250)
#
if plot_show is True or plot_save is True:
plt.rc('font', family='serif')
plt.rc('xtick', labelsize='x-small')
plt.rc('ytick', labelsize='x-small')
fig = plt.figure(111, figsize=(3, 2.5))
fig.add_subplot(1, 1, 1)
SNIa_number_per_yr = [x / 100 for x in SNIa_number_per_century]
time_in_Gyr = [x / 10**9 for x in time_axis]
plt.plot(time_in_Gyr, SNIa_number_per_yr, color="k", lw=0.8)
# with open('SNIa_Lacchin.txt') as f:
# lines = f.readlines()
# time_Lacchin = [float(line.split()[0]) for line in lines]
# SNIa_Lacchin = [10 ** float(line.split()[1]) for line in lines]
# with open('SNIa_Lacchin_igimf.txt') as f:
# lines = f.readlines()
# time_Lacchin_igimf = [float(line.split()[0]) for line in lines]
# SNIa_Lacchin_igimf = [10 ** float(line.split()[1]) for line in lines]
# # plt.plot(time_Lacchin, SNIa_Lacchin, color="tab:red", label='Lacchin2019 Salpeter', ls='dashed', lw=0.7)
# plt.plot(time_Lacchin_igimf, SNIa_Lacchin_igimf, color="tab:blue", label='Lacchin2019 IGIMF', ls='dashed', lw=0.7)
plt.yscale('log')
plt.xlabel(r'time [Gyr]')
plt.ylabel(r'# of SNIa per yr')
# plt.xlim(0, 14)
# plt.ylim(4e-10, 5e-7)
# plt.ylim(1e-9, 1e-6)
# plt.legend(prop={'size': 7})
plt.tight_layout()
if plot_save is True:
plt.savefig('galaxy_evolution_SNIa_number_loglinear.pdf'.format(imf), dpi=250)
#
if plot_show is True or plot_save is True:
plt.rc('font', family='serif')
plt.rc('xtick', labelsize='x-small')
plt.rc('ytick', labelsize='x-small')
fig = plt.figure(112, figsize=(3, 2.5))
fig.add_subplot(1, 1, 1)
SNII_number_per_yr = [x / 100 for x in SNII_number_per_century]
time_in_Gyr = [x / 10**9 for x in time_axis]
plt.plot(time_in_Gyr, SNII_number_per_yr, color="k", lw=0.8)
# with open('SNII_Lacchin.txt') as f:
# lines = f.readlines()
# time_Lacchin = [float(line.split()[0]) for line in lines]
# SNII_Lacchin = [10**float(line.split()[1]) for line in lines]
# with open('SNII_Lacchin_igimf.txt') as f:
# lines = f.readlines()
# time_Lacchin_igimf = [float(line.split()[0]) for line in lines]
# SNII_Lacchin_igimf = [10**float(line.split()[1]) for line in lines]
# # plt.plot(time_Lacchin, SNII_Lacchin, color="tab:red", label='Lacchin2019 Salpeter', ls='dashed', lw=0.7)
# plt.plot(time_Lacchin_igimf, SNII_Lacchin_igimf, color="tab:blue", label='Lacchin2019 IGIMF', ls='dashed', lw=0.7)
plt.yscale('log')
plt.xlabel(r'time [Gyr]')
plt.ylabel(r'# of SNII per yr')
# plt.xlim(0, 1)
# plt.ylim(1e-9, 1e-5)
# plt.legend(prop={'size': 7})
plt.tight_layout()
if plot_save is True:
plt.savefig('galaxy_evolution_SNII_number_loglinear.pdf'.format(imf), dpi=250)
file = open('simulation_results_from_galaxy_evol/imf{}STF{}log_SFR{}SFEN{}Z_0{}/plots/SN_number_evolution.txt'.format(imf, STF, SFR, SFEN, log_Z_0), 'w')
file.write("# time_axis\n")
i = 0
while i < length_of_time_axis:
file.write("{} ".format(time_axis[i]))
(i) = (i + 1)
file.write("\n# SNIa_number_per_century\n")
i = 0
while i < length_of_time_axis:
file.write("{} ".format(SNIa_number_per_century[i]))
(i) = (i + 1)
file.write("\n# SNII_number_per_century\n")
i = 0
while i < length_of_time_axis:
file.write("{} ".format(SNII_number_per_century[i]))
(i) = (i + 1)
file.write("\n# SN_number_per_century\n")
i = 0
while i < length_of_time_axis:
file.write("{} ".format(SN_number_per_century[i]))
(i) = (i + 1)
file.write("\n")
file.close()
# calculate the gravitational binding energy:
global expansion_factor_list, original_gas_mass
# consider the following to calculate the energy:
# galaxy_mass_without_gas_at_this_time,
# original_gas_mass,
# total_gas_mass_at_this_time,
# ejected_gas_mass_at_this_time,
# gas_mass = max(ejected_gas_mass_at_this_time, 1)
# galaxy mass--radii relation adopted from Dabringhausen 2008 eq.4
Dabringhausen_2008_a = 2.95
Dabringhausen_2008_b = 0.596
final_expansion_factor = expansion_factor_list[-1]
binding_energy_list = []
SN_energy_per_current_crossing_time_list = []
SN_energy_per_final_crossing_time_list = []
length_expansion_factor_list = len(expansion_factor_list)
global remnant_mass_list, stellar_mass_list
gravitational_constant = 6.674
finial_galaxy_inner_mass = remnant_mass_list[-1] + stellar_mass_list[-1]
final_galaxy_radii = 3 * (finial_galaxy_inner_mass / 10 ** 6) ** 0.6 # in parsec
final_crossing_time = 1 / (gravitational_constant * 10 ** (-11) * finial_galaxy_inner_mass * 2 * 10 ** 30 / (
final_galaxy_radii * 3 * 10 ** 16) ** 3) ** (0.5) / (60 * 60 * 24 * 365 * 10 ** 6) # in Myr
i = 0
while i < length_expansion_factor_list:
### binding energy ###
current_shrink_factor = final_expansion_factor / expansion_factor_list[i]
log_binding_energy = round(
math.log(3 / 5 * gravitational_constant * 1.989 ** 2 / 3.086, 10) + 40 + (2 - Dabringhausen_2008_b) *
math.log(original_gas_mass, 10) - math.log(Dabringhausen_2008_a, 10) +
6 * Dabringhausen_2008_b + math.log(current_shrink_factor, 10), 3)
# 40 = 30 (solar mass) * 2 - 11 (Gravitational constant) - 16 (pc to meter) + 7 (J to erg)
binding_energy = 10 ** log_binding_energy # [erg]
binding_energy_list.append(binding_energy)
### crossing time ###
current_galaxy_inner_mass = remnant_mass_list[i] + stellar_mass_list[i]
current_galaxy_radii = final_galaxy_radii / current_shrink_factor
crossing_time = 1 / (gravitational_constant * 10 ** (-11) * current_galaxy_inner_mass * 2 * 10 ** 30 / (
current_galaxy_radii * 3 * 10 ** 16) ** 3) ** (0.5) / (60 * 60 * 24 * 365 * 10 ** 6) # in Myr
SN_energy_per_current_crossing_time = SN_number_per_century[i] * crossing_time * 10 ** 4 * 10 ** 51
SN_energy_per_final_crossing_time = SN_number_per_century[i] * final_crossing_time * 10 ** 4 * 10 ** 51
SN_energy_per_current_crossing_time_list.append(SN_energy_per_current_crossing_time)
SN_energy_per_final_crossing_time_list.append(SN_energy_per_final_crossing_time)
(i) = (i + 1)
global log_binding_energy_initial
if plot_show is True or plot_save is True:
plt.rc('font', family='serif')
plt.rc('xtick', labelsize='x-small')
plt.rc('ytick', labelsize='x-small')
fig = plt.figure(12, figsize=(3, 2.5))
fig.add_subplot(1, 1, 1)
# plt.loglog(time_axis, SN_energy_per_current_crossing_time_list, label='in a instant crossing time')
# plt.loglog(time_axis, SN_energy_per_final_crossing_time_list, label='in a final crossing time')
plt.loglog(time_axis, SNIa_energy_release_list, label='SNIa', ls='dashed', c='k')
plt.loglog(time_axis, SNII_energy_release_list, label='SNII', c='k', lw=1.2)
plt.loglog(time_axis, total_energy_release_list, ls="dotted", label='SNIa+SNII', c='k')
# plt.loglog(time_axis, binding_energy_list, label='binding')
plt.loglog([time_axis[0], time_axis[-1]], [10**log_binding_energy_initial, 10**log_binding_energy_initial], label='binding', lw=0.5, c='k')
# plt.loglog(time_axis, total_gas_kinetic_energy_list, label='gas kinetic')
plt.xlabel(r'time [yr]')
plt.ylabel(r'Energy [erg]')
plt.xlim(1e7, 1.1*1e10)
# plt.ylim(5e49, 2e53)
# plt.title('Energy produced by supernovae (within a crossing time)', fontsize=10)
# plt.xlim(6, 1.01 * log_time_axis[-1])
# plt.ylim(8.5, 11.6)
plt.legend(prop={'size': 7})
plt.tight_layout()
if plot_save is True:
plt.savefig('energy_evolution.pdf'.format(imf), dpi=250)
file = open('simulation_results_from_galaxy_evol/imf{}STF{}log_SFR{}SFEN{}Z_0{}/plots/energy_evolution.txt'.format(imf, STF, SFR, SFEN, log_Z_0), 'w')
file.write("# time_axis\n")
i = 0
while i < length_of_time_axis:
file.write("{} ".format(time_axis[i]))
(i) = (i + 1)
file.write("\n# SNIa_energy_release_list\n")
i = 0
while i < length_of_time_axis:
file.write("{} ".format(SNIa_energy_release_list[i]))
(i) = (i + 1)
file.write("\n# SNII_energy_release_list\n")
i = 0
while i < length_of_time_axis:
file.write("{} ".format(SNII_energy_release_list[i]))
(i) = (i + 1)
file.write("\n# SN_energy_per_current_crossing_time_list\n")
i = 0
while i < length_of_time_axis:
file.write("{} ".format(SN_energy_per_current_crossing_time_list[i]))
(i) = (i + 1)
file.write("\n# SN_energy_per_final_crossing_time_list\n")
i = 0
while i < length_of_time_axis:
file.write("{} ".format(SN_energy_per_final_crossing_time_list[i]))
(i) = (i + 1)
file.write("\n# total_energy_release_list\n")
i = 0
while i < length_of_time_axis:
file.write("{} ".format(total_energy_release_list[i]))
(i) = (i + 1)
file.write("\n# binding_energy_list\n")
i = 0
while i < length_of_time_axis:
file.write("{} ".format(binding_energy_list[i]))
(i) = (i + 1)
file.write("\n# total_gas_kinetic_energy_list\n")
i = 0
while i < length_of_time_axis:
file.write("{} ".format(total_gas_kinetic_energy_list[i]))
(i) = (i + 1)
file.write("\n")
file.close()
file = open('simulation_results_from_galaxy_evol/imf{}STF{}log_SFR{}SFEN{}Z_0{}/plots/energy_mass.txt'.format(imf, STF, SFR, SFEN, log_Z_0), 'w')
file.write("# final SN_energy_release\n")
file.write("{}\n".format(total_energy_release_list[-1]))
file.write("# final binding_energy\n")
file.write("{}\n".format(binding_energy_list[-1]))
file.write("# final gas_kinetic_energy\n")
file.write("{}\n".format(total_gas_kinetic_energy_list[-1]))
file.close()
# #
# if plot_show is True or plot_save is True:
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(13, figsize=(3, 2.5))
# fig.add_subplot(1, 1, 1)
# # time_axis[0] = 1
# # time_axis_G = [0]*length_of_time_axis
# # for i in range(length_of_time_axis):
# # time_axis_G[i] = time_axis[i]/10**9
# # gas_Z_over_X_list[i]=math.log(gas_Z_over_X_list[i], 10)
# plt.plot(log_time_axis, gas_Z_over_X_list, label='gas')
# plt.plot(log_time_axis, stellar_Z_over_X_list, label='stellar MW')
# plt.plot(log_time_axis, stellar_Z_over_X_list_luminosity_weighted, label='stellar LW')
# plt.plot([6, 11], [0, 0], color='red', ls='dashed', label='solar')
# # The [Z/X]s where the applied portinari98 stellar yield table will be changed for Z=0.0127, 0.008, 0.004, 0.0004.
# plt.plot([6, 11], [-1.173, -1.173], color='red', ls='dotted', lw=0.5)
# plt.plot([6, 11], [-0.523, -0.523], color='red', ls='dotted', lw=0.5)
# plt.plot([6, 11], [-0.272, -0.272], color='red', ls='dotted', lw=0.5)
# plt.xlabel(r'log(time [Gyr])')
# plt.ylabel('[Z/X]')
# plt.title('Metallicity evolution', fontsize=10)
# # plt.ylim(-2, 1)
# # if imf == 'igimf':
# # plt.title('IGIMF')
# # elif imf == 'Kroupa':
# # plt.title('Kroupa IMF')
# # plt.legend(scatterpoints=1, numpoints=1, loc=0, prop={'size': 7.5}, ncol=2)
# # plt.xlim(6.4, 1.01*time_axis[-1])
# # plt.ylim(-0.4, 0.2)
# plt.legend(prop={'size': 7})
# plt.tight_layout()
# if plot_save is True:
# plt.savefig('galaxy_evolution_fig_Z_{}.pdf'.format(imf), dpi=250)
for i in range(length_of_time_axis):
time_axis[i] = math.log(time_axis[i], 10)
file = open('simulation_results_from_galaxy_evol/imf{}STF{}log_SFR{}SFEN{}Z_0{}/plots/Z_over_X_time.txt'.format(imf, STF, SFR, SFEN, log_Z_0), 'w')
file.write("# log_time_axis\n")
i = 0
while i < length_of_time_axis:
file.write("{} ".format(log_time_axis[i]))
(i) = (i + 1)
file.write("\n# gas_Z_over_X_list\n")
i = 0
while i < length_of_time_axis:
file.write("{} ".format(gas_Z_over_X_list[i]))
(i) = (i + 1)
file.write("\n# stellar_Z_over_X_list\n")
i = 0
while i < length_of_time_axis:
file.write("{} ".format(stellar_Z_over_X_list[i]))
(i) = (i + 1)
file.write("\n# stellar_Z_over_X_list_luminosity_weighted\n")
i = 0
while i < length_of_time_axis:
file.write("{} ".format(stellar_Z_over_X_list_luminosity_weighted[i]))
(i) = (i + 1)
file.write("\n")
file.close()
file = open('simulation_results_from_galaxy_evol/imf{}STF{}log_SFR{}SFEN{}Z_0{}/plots/Z_over_X_mass.txt'.format(imf, STF, SFR, SFEN, log_Z_0), 'w')
file.write("# gas_Z_over_X\n")
file.write("{} ".format(gas_Z_over_X_list[-1]))
file.write("\n# stellar_Z_over_X_list\n")
file.write("{} ".format(stellar_Z_over_X_list[-1]))
file.write("\n# stellar_Z_over_X_list_luminosity_weighted\n")
file.write("{} ".format(stellar_Z_over_X_list_luminosity_weighted[-1]))
file.write("\n")
file.close()
#
global BH_mass_list, NS_mass_list, WD_mass_list, total_gas_mass_list, ejected_gas_mass_list
for i in range(length_of_time_axis):
if remnant_mass_list[i] < 10 ** (-10):
remnant_mass_list[i] = 10 ** (-10)
remnant_mass_list[i] = math.log(remnant_mass_list[i], 10)
if total_gas_mass_list[i] < 10 ** (-10):
total_gas_mass_list[i] = 10 ** (-10)
total_gas_mass_list[i] = math.log(total_gas_mass_list[i], 10)
if stellar_mass_list[i] < 10 ** (-10):
stellar_mass_list[i] = 10 ** (-10)
stellar_mass_list[i] = math.log(stellar_mass_list[i], 10)
ejected_gas_mass_list[i] = math.log(ejected_gas_mass_list[i], 10)
if WD_mass_list[i] < 10 ** (-10):
WD_mass_list[i] = 10 ** (-10)
WD_mass_list[i] = math.log(WD_mass_list[i], 10)
if NS_mass_list[i] < 10 ** (-10):
NS_mass_list[i] = 10 ** (-10)
NS_mass_list[i] = math.log(NS_mass_list[i], 10)
if BH_mass_list[i] < 10 ** (-10):
BH_mass_list[i] = 10 ** (-10)
BH_mass_list[i] = math.log(BH_mass_list[i], 10)
# time_axis[0] = time_axis[1]
if plot_show is True or plot_save is True:
fig = plt.figure(14, figsize=(3, 2.5))
fig.add_subplot(1, 1, 1)
plt.plot(time_axis, total_gas_mass_list, lw=1.5, label='gas', ls='dotted', c='k')
# plt.plot(time_axis, ejected_gas_mass_list, lw=2, label='ejected gas')
plt.plot(time_axis, stellar_mass_list, lw=1.5, label='living stars', c='k')
print('plot stellar_mass final', stellar_mass_list[-1])
plt.plot(time_axis, remnant_mass_list, lw=1.5, label='stellar remnants', ls='dashed', c='k')
# plt.plot(time_axis, BH_mass_list, lw=2, label='black holes')
# plt.plot(time_axis, NS_mass_list, lw=2, label='neutron stars')
# plt.plot(time_axis, WD_mass_list, lw=2, label='white dwarfs')
plt.xlabel(r'log$_{10}$(time [yr])')
plt.ylabel(r'log$_{10}$(Mass [$M_\odot$])')
# plt.title('Mass evolution', fontsize=10)
# if imf == 'igimf':
# plt.title('IGIMF')
# elif imf == 'Kroupa':
# plt.title('Kroupa IMF')
plt.legend(prop={'size': 7})
# plt.xlim(6.4, 1.01 * time_axis[-1])
plt.xlim(6.4, 10.1)
# plt.ylim(0, 7)
plt.tight_layout()
if plot_save is True:
plt.savefig('mass_evolution.pdf'.format(imf), dpi=250)
file = open('simulation_results_from_galaxy_evol/imf{}STF{}log_SFR{}SFEN{}Z_0{}/plots/mass_evolution.txt'.format(imf, STF, SFR, SFEN, log_Z_0), 'w')
file.write("# time_axis\n")
i = 0
while i < length_of_time_axis:
file.write("{} ".format(time_axis[i]))
(i) = (i + 1)
file.write("\n# total_gas_mass_list\n")
i = 0
while i < length_of_time_axis:
file.write("{} ".format(total_gas_mass_list[i]))
(i) = (i + 1)
file.write("\n# ejected_gas_mass_list\n")
i = 0
while i < length_of_time_axis:
file.write("{} ".format(ejected_gas_mass_list[i]))
(i) = (i + 1)
file.write("\n# stellar_mass_list\n")
i = 0
while i < length_of_time_axis:
file.write("{} ".format(stellar_mass_list[i]))
(i) = (i + 1)
file.write("\n# remnant_mass_list\n")
i = 0
while i < length_of_time_axis:
file.write("{} ".format(remnant_mass_list[i]))
(i) = (i + 1)
file.write("\n# BH_mass_list\n")
i = 0
while i < length_of_time_axis:
file.write("{} ".format(BH_mass_list[i]))
(i) = (i + 1)
file.write("\n# NS_mass_list\n")
i = 0
while i < length_of_time_axis:
file.write("{} ".format(NS_mass_list[i]))
(i) = (i + 1)
file.write("\n# WD_mass_list\n")
i = 0
while i < length_of_time_axis:
file.write("{} ".format(WD_mass_list[i]))
(i) = (i + 1)
file.write("\n")
file.close()
final_alive_stellar_mass = stellar_mass_list[-1]
final_remnant_stellar_mass = remnant_mass_list[-1]
final_alive_and_remnant_stellar_mass = math.log((10 ** final_alive_stellar_mass + 10 ** final_remnant_stellar_mass),
10)
file = open('simulation_results_from_galaxy_evol/imf{}STF{}log_SFR{}SFEN{}Z_0{}/plots/mass_ratio.txt'.format(imf, STF, SFR, SFEN, log_Z_0), 'w')
file.write("# final alive stellar mass in log\n")
file.write("{}\n".format(final_alive_stellar_mass))
file.write("# final alive + remnant mass in log\n")
file.write("{}\n".format(final_alive_and_remnant_stellar_mass))
file.write("# (alive + remnant) / alive IN LOG\n")
file.write("{}\n".format(final_alive_and_remnant_stellar_mass - final_alive_stellar_mass))
file.close()
global total_star_formed
file = open('simulation_results_from_galaxy_evol/imf{}STF{}log_SFR{}SFEN{}Z_0{}/plots/SN_number_mass.txt'.format(imf, STF, SFR, SFEN, log_Z_0), 'w')
file.write("# final SNIa_number per stellar mass formed\n")
file.write("{}\n".format(SNIa_number_list[-1] / total_star_formed))
# print("total SNIa number per solar mass of star formed:", SNIa_number_list[-1]/total_star_formed)
file.write("# final SNII_number per stellar mass formed\n")
file.write("{}\n".format(SNII_number_list[-1] / total_star_formed))
file.close()
# global expansion_factor_instantaneous_list, expansion_factor_adiabat_list
# if plot_show is True or plot_save is True:
# fig = plt.figure(15, figsize=(3, 2.5))
# fig.add_subplot(1, 1, 1)
# plt.plot(time_axis, expansion_factor_instantaneous_list, label='instantaneous')
# plt.plot(time_axis, expansion_factor_adiabat_list, label='slow')
# plt.plot(time_axis, expansion_factor_list, label='average')
# plt.xlabel(r'log$_{10}$(time [yr])')
# plt.ylabel(r'expansion factor')
# plt.legend(prop={'size': 7})
# plt.title('Galaxy size evolution', fontsize=10)
# # plt.xlim(6.4, 1.01 * time_axis[-1])
# # plt.ylim(7.3, 12.2)
# # plt.ylim(6, 12)
# plt.tight_layout()
file = open('simulation_results_from_galaxy_evol/imf{}STF{}log_SFR{}SFEN{}Z_0{}/plots/expansion_factor.txt'.format(imf, STF, SFR, SFEN, log_Z_0), 'w')
file.write("# time_axis\n")
i = 0
while i < length_of_time_axis:
file.write("{} ".format(time_axis[i]))
(i) = (i + 1)
file.write("\n# expansion_factor_list\n")
i = 0
while i < length_of_time_axis:
file.write("{} ".format(expansion_factor_list[i]))
(i) = (i + 1)
file.write("\n# expansion_factor_instantaneous_list\n")
i = 0
while i < length_of_time_axis:
file.write("{} ".format(expansion_factor_instantaneous_list[i]))
(i) = (i + 1)
file.write("\n# expansion_factor_adiabatic_list\n")
i = 0
while i < length_of_time_axis:
file.write("{} ".format(expansion_factor_adiabat_list[i]))
(i) = (i + 1)
file.write("\n")
file.close()
# global ejected_gas_Mg_over_Fe_list, instant_ejected_gas_Mg_over_Fe_list
# if plot_show is True or plot_save is True:
# fig = plt.figure(16, figsize=(3, 2.5))
# fig.add_subplot(1, 1, 1)
# plt.plot(time_axis, ejected_gas_Mg_over_Fe_list, label='total')
# plt.plot(time_axis, instant_ejected_gas_Mg_over_Fe_list, label='instant')
# plt.xlabel(r'log$_{10}$(time [yr])')
# plt.ylabel(r'[Mg/Fe]')
# # plt.xlim(6.4, 1.01 * time_axis[-1])
# # plt.ylim(7.3, 12.2)
# # plt.ylim(6, 12)
# plt.legend(prop={'size': 7})
# plt.title('[Mg/Fe] of the ejected gas at different time', fontsize=10)
# plt.tight_layout()
#
# global ejected_metal_mass_list
# if plot_show is True or plot_save is True:
# fig = plt.figure(17, figsize=(3, 2.5))
# fig.add_subplot(1, 1, 1)
# plt.plot(time_axis, ejected_metal_mass_list, label='total')
# plt.xlabel(r'log$_{10}$(time [yr])')
# plt.ylabel(r'ejected metal mass')
# # plt.xlim(6.4, 1.01 * time_axis[-1])
# # plt.ylim(7.3, 12.2)
# # plt.ylim(6, 12)
# plt.title('IMF averaged yield at different time', fontsize=10)
# plt.legend(prop={'size': 7})
# plt.tight_layout()
#
# global ejected_O_mass_till_this_time_tot_list, ejected_O_mass_till_this_time_SNIa_list, ejected_O_mass_till_this_time_SNII_list
# if plot_show is True or plot_save is True:
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(21, figsize=(3, 2.5))
# fig.add_subplot(1, 1, 1)
# plt.plot(log_time_axis, ejected_O_mass_till_this_time_tot_list, label='tot')
# plt.plot(log_time_axis, ejected_O_mass_till_this_time_SNIa_list, label='from SNIa')
# plt.plot(log_time_axis, ejected_O_mass_till_this_time_SNII_list, label='from SNII')
# plt.xlabel(r'log$_{10}$(time [yr])')
# plt.ylabel(r'ejected O [$M_\odot$]')
# plt.title('IMF averaged yield', fontsize=10)
# plt.legend(prop={'size': 7})
# plt.tight_layout()
#
# global ejected_Mg_mass_till_this_time_tot_list, ejected_Mg_mass_till_this_time_SNIa_list, ejected_Mg_mass_till_this_time_SNII_list
# if plot_show is True or plot_save is True:
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(22, figsize=(3, 2.5))
# fig.add_subplot(1, 1, 1)
# plt.plot(log_time_axis, ejected_Mg_mass_till_this_time_tot_list, label='tot')
# plt.plot(log_time_axis, ejected_Mg_mass_till_this_time_SNIa_list, label='from SNIa')
# plt.plot(log_time_axis, ejected_Mg_mass_till_this_time_SNII_list, label='from SNII')
# plt.xlabel(r'log$_{10}$(time [yr])')
# plt.ylabel(r'ejected Mg [$M_\odot$]')
# plt.title('IMF averaged yield from different type of SN', fontsize=10)
# plt.legend(prop={'size': 7})
# plt.tight_layout()
#
# global ejected_Fe_mass_till_this_time_tot_list, ejected_Fe_mass_till_this_time_SNIa_list, ejected_Fe_mass_till_this_time_SNII_list
# if plot_show is True or plot_save is True:
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(23, figsize=(3, 2.5))
# fig.add_subplot(1, 1, 1)
# plt.plot(log_time_axis, ejected_Fe_mass_till_this_time_tot_list, label='tot')
# plt.plot(log_time_axis, ejected_Fe_mass_till_this_time_SNIa_list, label='from SNIa')
# plt.plot(log_time_axis, ejected_Fe_mass_till_this_time_SNII_list, label='from SNII')
# plt.xlabel(r'log$_{10}$(time [yr])')
# plt.ylabel(r'ejected Fe [$M_\odot$]')
# plt.title('IMF averaged yield from different type of SN', fontsize=10)
# plt.legend(prop={'size': 7})
# plt.tight_layout()
# if plot_save is True:
# plt.savefig('Fe_production.pdf', dpi=250)
#
# global ejected_Ca_mass_till_this_time_tot_list, ejected_Ca_mass_till_this_time_SNIa_list, ejected_Ca_mass_till_this_time_SNII_list
# if plot_show is True or plot_save is True:
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(24, figsize=(3, 2.5))
# fig.add_subplot(1, 1, 1)
# plt.plot(log_time_axis, ejected_Ca_mass_till_this_time_tot_list, label='tot')
# plt.plot(log_time_axis, ejected_Ca_mass_till_this_time_SNIa_list, label='from SNIa')
# plt.plot(log_time_axis, ejected_Ca_mass_till_this_time_SNII_list, label='from SNII')
# plt.xlabel(r'log$_{10}$(time [yr])')
# plt.ylabel(r'ejected Ca [$M_\odot$]')
# plt.title('IMF averaged yield from different type of SN', fontsize=10)
# plt.legend(prop={'size': 7})
# plt.tight_layout()
# if plot_save is True:
# plt.savefig('Ca_production.pdf', dpi=250)
#
# global ejected_S_mass_till_this_time_tot_list, ejected_S_mass_till_this_time_SNIa_list, ejected_S_mass_till_this_time_SNII_list
# if plot_show is True or plot_save is True:
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(25, figsize=(3, 2.5))
# fig.add_subplot(1, 1, 1)
# plt.plot(log_time_axis, ejected_S_mass_till_this_time_tot_list, label='tot')
# plt.plot(log_time_axis, ejected_S_mass_till_this_time_SNIa_list, label='from SNIa')
# plt.plot(log_time_axis, ejected_S_mass_till_this_time_SNII_list, label='from SNII')
# plt.xlabel(r'log$_{10}$(time [yr])')
# plt.ylabel(r'ejected S [$M_\odot$]')
# plt.title('IMF averaged yield from different type of SN', fontsize=10)
# plt.legend(prop={'size': 7})
# plt.tight_layout()
# if plot_save is True:
# plt.savefig('S_production.pdf', dpi=250)
#
# global ejected_Si_mass_till_this_time_tot_list, ejected_Si_mass_till_this_time_SNIa_list, ejected_Si_mass_till_this_time_SNII_list
# if plot_show is True or plot_save is True:
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(26, figsize=(3, 2.5))
# fig.add_subplot(1, 1, 1)
# plt.plot(log_time_axis, ejected_Si_mass_till_this_time_tot_list, label='tot')
# plt.plot(log_time_axis, ejected_Si_mass_till_this_time_SNIa_list, label='from SNIa')
# plt.plot(log_time_axis, ejected_Si_mass_till_this_time_SNII_list, label='from SNII')
# plt.xlabel(r'log$_{10}$(time [yr])')
# plt.ylabel(r'ejected Si [$M_\odot$]')
# plt.title('IMF averaged yield from different type of SN', fontsize=10)
# plt.legend(prop={'size': 7})
# plt.tight_layout()
# if plot_save is True:
# plt.savefig('Si_production.pdf', dpi=250)
#
# global ejected_Ne_mass_till_this_time_tot_list, ejected_Ne_mass_till_this_time_SNIa_list, ejected_Ne_mass_till_this_time_SNII_list
# if plot_show is True or plot_save is True:
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(27, figsize=(3, 2.5))
# fig.add_subplot(1, 1, 1)
# plt.plot(log_time_axis, ejected_Ne_mass_till_this_time_tot_list, label='tot')
# plt.plot(log_time_axis, ejected_Ne_mass_till_this_time_SNIa_list, label='from SNIa')
# plt.plot(log_time_axis, ejected_Ne_mass_till_this_time_SNII_list, label='from SNII')
# plt.xlabel(r'log$_{10}$(time [yr])')
# plt.ylabel(r'ejected Ne [$M_\odot$]')
# plt.title('IMF averaged yield from different type of SN', fontsize=10)
# plt.legend(prop={'size': 7})
# plt.tight_layout()
# if plot_save is True:
# plt.savefig('Ne_production.pdf', dpi=250)
if True: # plot_show is True:
plt.show()
return
def generate_SFH(distribution, Log_SFR, SFEN, sfr_tail, skewness, location):
if distribution == "skewnorm":
generate_sfh_skewnorm(Log_SFR, SFEN, sfr_tail, skewness, location)
elif distribution == "flat":
generate_sfh_flat(Log_SFR, SFEN)
elif distribution == "flat_tail":
generate_sfh_flat_tail(Log_SFR, SFEN)
elif distribution == "lognorm":
generate_sfh_lognorm(Log_SFR, SFEN)
elif distribution == "given":
generate_sfh_given()
else:
print('Warning: input unrecognized distribution name for galevo.generate_SFH')
return
# def generate_sfh_given():
# file = open('SFH.txt', 'w')
# file.write("0\n")
# j = 0
# while j < 1:
# file.write("0.0025\n")
# (j) = (j + 1)
# j = 1
# while j < 10:
# file.write("{}\n".format(0.0025 * 0.6 ** j))
# (j) = (j + 1)
# j = 0
# while j < 1301 - SFEN:
# file.write("0\n")
# (j) = (j + 1)
# file.write("# The value in each line stand for the SFR [solar mass / yr]\n")
# file.write("# in a star formation epoch (10 Myr)\n")
# file.write("# start from time 0 for the first line.\n")
# file.write("# Warning! Effective line number must be larger than 1.\n")
# file.write("# Add a '0' in the next line if there is only one line.\n")
#
# file.close()
# return
def generate_sfh_given():
file = open('SFH.txt', 'w')
file.write("0\n")
file.write("0.0007\n")
j = 1
while j < 10:
file.write("0.0009\n")
(j) = (j + 1)
j = 1
while j < 90:
file.write("{}\n".format(0.0009 * 0.9 ** j))
(j) = (j + 1)
j = 0
while j < 1301 - SFEN:
file.write("0\n")
(j) = (j + 1)
file.write("# The value in each line stand for the SFR [solar mass / yr]\n")
file.write("# in a star formation epoch (10 Myr)\n")
file.write("# start from time 0 for the first line.\n")
file.write("# Warning! Effective line number must be larger than 1.\n")
file.write("# Add a '0' in the next line if there is only one line.\n")
file.close()
return
# def generate_sfh_given():
# file = open('SFH.txt', 'w')
# file.write("0\n")
# j = 0
# while j < 67:
# file.write("0.00005\n")
# (j) = (j + 1)
# j = 0
# while j < 20:
# file.write("{}\n".format(0.00005 * 0.9 ** j))
# (j) = (j + 1)
# j = 0
# while j < 1301 - SFEN:
# file.write("0\n")
# (j) = (j + 1)
# file.write("# The value in each line stand for the SFR [solar mass / yr]\n")
# file.write("# in a star formation epoch (10 Myr)\n")
# file.write("# start from time 0 for the first line.\n")
# file.write("# Warning! Effective line number must be larger than 1.\n")
# file.write("# Add a '0' in the next line if there is only one line.\n")
#
# file.close()
# return
def generate_sfh_flat(Log_SFR, SFEN):
# Flat distribution for star formation history
# took input: star formation rate, star formation event number
file = open('SFH.txt', 'w')
file.write("0\n")
j = 0
while j < SFEN:
file.write("{}\n".format(10 ** Log_SFR))
(j) = (j + 1)
j = 0
while j < 1301 - SFEN:
file.write("0\n")
(j) = (j + 1)
file.write("# The value in each line stand for the SFR [solar mass / yr]\n")
file.write("# in a star formation epoch (10 Myr)\n")
file.write("# start from time 0 for the first line.\n")
file.write("# Warning! Effective line number must be larger than 1.\n")
file.write("# Add a '0' in the next line if there is only one line.\n")
file.close()
return
def generate_sfh_flat_tail(Log_SFR, SFEN):
# Flat distribution for star formation history
# took input: star formation rate, star formation event number
file = open('SFH.txt', 'w')
file.write("0\n")
j = 0
while j < SFEN:
file.write("{}\n".format(10 ** Log_SFR))
(j) = (j + 1)
j = 0
while j < 1389 - SFEN:
file.write("0\n")
(j) = (j + 1)
j = 0
while j < 10:
file.write("{}\n".format(10 ** (Log_SFR-2)))
(j) = (j + 1)
file.write("# The value in each line stand for the SFR [solar mass / yr]\n")
file.write("# in a star formation epoch (10 Myr)\n")
file.write("# start from time 0 for the first line.\n")
file.write("# Warning! Effective line number must be larger than 1.\n")
file.write("# Add a '0' in the next line if there is only one line.\n")
file.close()
return
def generate_sfh_skewnorm(Log_SFR, SFEN, sfr_tail, skewness, location):
tot_sf_set = 10 ** Log_SFR * SFEN
tot_sf = 0
while tot_sf < tot_sf_set:
SFEN += 1
result_cal_tot_sf = cal_tot_sf(Log_SFR, SFEN, skewness, location)
(tot_sf) = (result_cal_tot_sf[0])
file = open('SFH.txt', 'w')
file.write("0\n")
sfr_for_this_epoch = 0
result_starburst_sf = 0
result_tail_sf = 0
j = 0
while j < SFEN:
sfr_for_this_epoch = result_cal_tot_sf[1] * result_cal_tot_sf[2][j]
file.write("{}\n".format(sfr_for_this_epoch))
if sfr_for_this_epoch > 10 ** Log_SFR / 2:
result_starburst_sf += sfr_for_this_epoch
else:
result_tail_sf += sfr_for_this_epoch
(j) = (j + 1)
sfr_for_the_tail_epoch = sfr_for_this_epoch / 2
if sfr_tail == 0:
j = 0
while j < 1301 - SFEN:
file.write("0\n")
(j) = (j + 1)
elif sfr_tail == 1:
j = 0
while j < 101:
file.write("{}\n".format(sfr_for_the_tail_epoch))
result_tail_sf += sfr_for_the_tail_epoch
(j) = (j + 1)
while j < 1301 - SFEN:
file.write("0\n")
(j) = (j + 1)
file.write("# The value in each line stand for the SFR [solar mass / yr]\n")
file.write("# in a star formation epoch (10 Myr)\n")
file.write("# start from time 0 for the first line.\n")
file.write("# Warning! Effective line number must be larger than 1.\n")
file.write("# Add a '0' in the next line if there is only one line.\n")
if sfr_tail == 1:
print("star formation tail (after the SFR is lower than half of the maximum value) contributes {}% "
"of the total star formation.".format(
round(result_tail_sf / (result_starburst_sf + result_tail_sf) * 100, 2)))
file.close()
return
def generate_sfh_lognorm(Log_SFR, SFEN):
tot_sf_set = 10 ** Log_SFR * SFEN
time_length_in_Gyr = 13
time_step_number = time_length_in_Gyr * 100
from scipy.stats import lognorm
s = 1
sc = SFEN / 2
time_list = np.linspace(0, time_step_number, time_step_number)
star_formation_rate = tot_sf_set * lognorm.pdf(time_list, s, scale=sc)
i = 20
while i < time_step_number:
# if star_formation_rate[i] < star_formation_rate[1]/10:
# star_formation_rate[i] = 0
star_formation_rate[i] = 0
(i) = (i+1)
file = open('SFH.txt', 'w')
for i in range(time_step_number):
file.write("{}\n".format(star_formation_rate[i]))
file.write("# The value in each line stand for the SFR [solar mass / yr]\n")
file.write("# in a star formation epoch (10 Myr)\n")
file.write("# start from time 0 for the first line.\n")
file.write("# Warning! Effective line number must be larger than 1.\n")
file.write("# Add a '0' in the next line if there is only one line.\n")
file.close()
# plt.plot(time_list, star_formation_rate, label='lognorm SFH')
# plt.xlabel('time step')
# plt.ylabel(r'SFR [solar mass/year]')
# plt.show()
return
def cal_tot_sf(SFR, SFEN, skewness, location):
# Skew normal distribution for star formation history
# took input: maximum star formation rate, star formation event number
# from scipy.stats import f
from scipy.stats import skewnorm
x = np.linspace(skewnorm.ppf(0.01, skewness, location, 1), skewnorm.ppf(0.999999999, skewness, location, 1), SFEN)
y = skewnorm.pdf(x, skewness, location, 1)
# skewnorm.pdf(x, a, loc, scale) is the location and scale parameters,
# [identically equivalent to skewnorm.pdf(y, a) / scale with y = (x - loc) / scale]
# The scale is not used as the SFEN & SFR setup the scale through parameter tot_sf_set & mult.
mult = 10 ** SFR / max(y)
j = 0
tot_sf = 0
while j < SFEN:
sf = mult * y[j]
tot_sf += sf
(j) = (j + 1)
return tot_sf, mult, y
if __name__ == '__main__':
SFEN = "None"
### Generate a new SFH.txt file according to the following given parameters ###
SFEN = 3 # the number of the 10 Myr star formation epoch (thus 10 stand for a star formation timescale of 100 Myr)
Log_SFR = -2.47 # logarithmic characteristic star formation rate
location = 0 # SFH shape parameter
skewness = 10 # SFH shape parameter
sfr_tail = 0 # SFH shape parameter
# generate_SFH("given", Log_SFR, SFEN, sfr_tail, skewness, location)
## input "flat", "lognorm", "skewnorm", or "given" to generate a boxy, lognormal, or skewnorm SFH, respectively.
####################################
# str_yield_table='WW95' or 'portinari98' or 'Kobayashi06', specify the stellar evolution table
# imf='igimf' or 'Kroupa' or 'Salpeter' or 'diet_Salpeter'...(see line 652 above), specify galaxy IMF model.
# SFH_model='provided' or 'gas_mass_dependent' specify the star formation history.
# The 'provided' SFH is given in SFH.txt;
# The 'gas_mass_dependent' use SFH.txt to setup the initial condition
# then recalculate SFR at each timestep, resulting a SFH similar to SFH.txt but gas mass dependent.
# SNIa_yield_table='Thielemann1993' or 'Seitenzahl2013' or 'Iwamoto1999'
# solar_abu_table='Anders1989' or 'Asplund2009'
galaxy_evol(imf='igimf', STF=0.05, SFEN=SFEN, Z_0=0.015*1e-16, solar_mass_component="Asplund2009_mass",
str_yield_table='Kobayashi06', IMF_name='Kroupa', steller_mass_upper_bound=150,
time_resolution_in_Myr=1, mass_boundary_observe_low=1.5, mass_boundary_observe_up=8,
SFH_model='provided', SFE=0.004, SNIa_ON=True, SNIa_yield_table='Iwamoto1999',
solar_abu_table='Asplund2009',
high_time_resolution=None, plot_show=None, plot_save=None, outflow=100, check_igimf=None)
# Use plot_show=True on persenal computer to view the simualtion result immidiately after the computation
# Use plot_show=None if running on a computer cluster to avoid possible issues.
# In both cases, the simulation results are saved as txt files.
# galaxy_evol(imf='Salpeter', STF=0.039, SFEN=SFEN, Z_0=0.02 * 1e-1, solar_mass_component="Asplund2009_mass",
# str_yield_table='Kobayashi06', IMF_name='Kroupa', steller_mass_upper_bound=150,
# time_resolution_in_Myr=1, mass_boundary_observe_low=1.5, mass_boundary_observe_up=8,
# SFH_model='provided', SFE=0.0015, SNIa_ON='SD', SNIa_yield_table='Iwamoto1999',
# solar_abu_table='Asplund2009',
# high_time_resolution=True, plot_show=None, plot_save=None, outflow=100, check_igimf=None)
# # Model Reproduce L19-Salpeter
# # Change line 1128 53.7 to 53.75
# # # Reproducing the Salpeter IMF model:
# #
# galaxy_evol(imf='Salpeter', STF=0.01688, SFEN=SFEN, Z_0=0.02*1e-16, solar_mass_component="Asplund2009_mass",
# str_yield_table='Kobayashi06', IMF_name='Kroupa', steller_mass_upper_bound=150,
# time_resolution_in_Myr=1, mass_boundary_observe_low=1.5, mass_boundary_observe_up=8,
# SFH_model='gas_mass_dependent', SFE=0.001, SNIa_ON='SD', SNIa_yield_table='Iwamoto1999',
# solar_abu_table='Asplund2009',
# high_time_resolution=None, plot_show=True, plot_save=True, outflow=100, check_igimf=None)
# # # Model Reproduce L19-R14
# # # Change line 1128 53.68 STF=0.0194
# # # # Reproducing the IGIMF model:
# # #
# galaxy_evol(imf='igimf', STF=0.01688, SFEN=SFEN, Z_0=0.02*1e-7, solar_mass_component="Asplund2009_mass",
# str_yield_table='Kobayashi06', IMF_name='Kroupa', steller_mass_upper_bound=150,
# time_resolution_in_Myr=1, mass_boundary_observe_low=1.5, mass_boundary_observe_up=8,
# SFH_model='gas_mass_dependent', SFE=0.00082, SNIa_ON=True, SNIa_yield_table='Iwamoto1999',
# solar_abu_table='Asplund2009',
# high_time_resolution=None, plot_show=True, plot_save=True, outflow=105, check_igimf=None)
# # Model IGIMF-R14
# # R14 change galimf.py line 1050, 1053, 1063; 1166, 1169, 1173
# # SFH with 0.7, 0.9... to line 48
# # line 1801
# galaxy_evol(imf='igimf', STF=0.042, SFEN=SFEN, Z_0=0.02 * 1e-7, solar_mass_component="Asplund2009_mass",
# str_yield_table='Kobayashi06', IMF_name='Kroupa', steller_mass_upper_bound=150,
# time_resolution_in_Myr=1, mass_boundary_observe_low=1.5, mass_boundary_observe_up=8,
# SFH_model='gas_mass_dependent', SFE=0.0017, SNIa_ON=True, SNIa_yield_table='Iwamoto1999',
# solar_abu_table='Asplund2009',
# high_time_resolution=None, plot_show=True, plot_save=True, outflow=100, check_igimf=None)
# # #
# # Model IGIMF-R14-SD with the Greggio 1983 SNIa rate
# galaxy_evol(imf='igimf', STF=0.039, SFEN=SFEN, Z_0=0.02 * 1e-7, solar_mass_component="Asplund2009_mass",
# str_yield_table='Kobayashi06', IMF_name='Kroupa', steller_mass_upper_bound=150,
# time_resolution_in_Myr=1, mass_boundary_observe_low=1.5, mass_boundary_observe_up=8,
# SFH_model='gas_mass_dependent', SFE=0.0015, SNIa_ON='SD', SNIa_yield_table='Iwamoto1999',
# solar_abu_table='Asplund2009',
# high_time_resolution=None, plot_show=True, plot_save=None, outflow=100, check_igimf=None)
# # Model ?
# # # with IGIMF3: from R14 to IGIMF2 change galimf.py line 1050, 1053, 1063; 1166, 1168, 1169, 1172, 1173
# galaxy_evol(imf='igimf', STF=0.08, SFEN=SFEN, Z_0=0.02*1e-7, solar_mass_component="Asplund2009_mass",
# str_yield_table='Kobayashi06', IMF_name='Kroupa', steller_mass_upper_bound=150,
# time_resolution_in_Myr=1, mass_boundary_observe_low=1.5, mass_boundary_observe_up=8,
# SFH_model='gas_mass_dependent', SFE=0.004, SNIa_ON=True, SNIa_yield_table='Iwamoto1999',
# solar_abu_table='Asplund2009',
# high_time_resolution=None, plot_show=True, plot_save=True, outflow=100, check_igimf=None)
# # Model with different SFH?
# galaxy_evol(imf='igimf', STF=0.0218, SFEN=SFEN, Z_0=0.02*1e-16, solar_mass_component="Asplund2009_mass",
# str_yield_table='Kobayashi06', IMF_name='Kroupa', steller_mass_upper_bound=150,
# time_resolution_in_Myr=1, mass_boundary_observe_low=1.5, mass_boundary_observe_up=8,
# SFH_model='gas_mass_dependent', SFE=0.004, SNIa_ON=True, SNIa_yield_table='Iwamoto1999',
# solar_abu_table='Asplund2009',
# high_time_resolution=None, plot_show=True, plot_save=True, outflow=100, check_igimf=None)
# # Model IGIMF2
# galaxy_evol(imf='igimf', STF=0.073, SFEN=SFEN, Z_0=0.02 * 1e-7, solar_mass_component="Asplund2009_mass",
# str_yield_table='Kobayashi06', IMF_name='Kroupa', steller_mass_upper_bound=150,
# time_resolution_in_Myr=1, mass_boundary_observe_low=1.5, mass_boundary_observe_up=8,
# SFH_model='gas_mass_dependent', SFE=0.0045, SNIa_ON=True, SNIa_yield_table='Iwamoto1999',
# solar_abu_table='Asplund2009',
# high_time_resolution=None, plot_show=True, plot_save=True, outflow=100, check_igimf=None)
# # Model IGIMF3
# galaxy_evol(imf='igimf', STF=0.03, SFEN=SFEN, Z_0=0.02 * 1e-7, solar_mass_component="Asplund2009_mass",
# str_yield_table='Kobayashi06', IMF_name='Kroupa', steller_mass_upper_bound=150,
# time_resolution_in_Myr=1, mass_boundary_observe_low=1.5, mass_boundary_observe_up=8,
# SFH_model='gas_mass_dependent', SFE=0.002, SNIa_ON=True, SNIa_yield_table='Iwamoto1999',
# solar_abu_table='Asplund2009',
# high_time_resolution=None, plot_show=True, plot_save=True, outflow=100, check_igimf=None)
# # Model IGIMF4
# galaxy_evol(imf='igimf', STF=0.052, SFEN=SFEN, Z_0=0.02 * 1e-7, solar_mass_component="Asplund2009_mass",
# str_yield_table='Kobayashi06', IMF_name='Kroupa', steller_mass_upper_bound=150,
# time_resolution_in_Myr=1, mass_boundary_observe_low=1.5, mass_boundary_observe_up=8,
# SFH_model='gas_mass_dependent', SFE=0.0035, SNIa_ON=True, SNIa_yield_table='Iwamoto1999',
# solar_abu_table='Asplund2009',
# high_time_resolution=None, plot_show=True, plot_save=True, outflow=100, check_igimf=None)
# # Model IGIMF-Z
# galaxy_evol(imf='igimf', STF=0.043, SFEN=SFEN, Z_0=0.02 * 1e-7, solar_mass_component="Asplund2009_mass",
# str_yield_table='Kobayashi06', IMF_name='Kroupa', steller_mass_upper_bound=150,
# time_resolution_in_Myr=1, mass_boundary_observe_low=1.5, mass_boundary_observe_up=8,
# SFH_model='gas_mass_dependent', SFE=0.0035, SNIa_ON=True, SNIa_yield_table='Iwamoto1999',
# solar_abu_table='Asplund2009',
# high_time_resolution=None, plot_show=True, plot_save=True, outflow=100, check_igimf=None)
| 303,264 | 51.134262 | 212 | py |
galIMF | galIMF-master/igimf_calculator.py | # Python3 code
# Made by: Yan Zhiqiang & Tereza Jerabkova
# An example file that construct galaxy-wide IMF according to the input parameters in file "input_parameters.txt"
# The outputs of this example are:
# - the comparison plot of galaxy-wide IMF, canonical IMF, and the histogram of stellar masses (optional);
# - the txt file containing the galaxy-wide IMF.
# - the txt file containing the number of stars in each mass bin (optional);
# --------------------------------------------------------------------------------------------------------------------------------
# Import modules and libraries
# --------------------------------------------------------------------------------------------------------------------------------
import galimf # galIMF containing IGIMF function and OSGIMF function and additional computational modules
import numpy as np
import math
import time
import sys
# --------------------------------------------------------------------------------------------------------------------------------
# Import parameters from file or inputted by hand:
# --------------------------------------------------------------------------------------------------------------------------------
if len(sys.argv) == 1:
print("Input parameters from the file 'input_parameters.txt':")
file = open('input_parameters.txt', 'r')
data_txt = file.readlines()
file.close()
data = [x for x in data_txt[0].split()]
SFR = float(data[0])
M_over_H = float(data[1])
gwIMF_model = data[2]
OSrequest = data[3]
M_str_L = float(data[4])
M_str_U = float(data[5])
elif len(sys.argv) < 7:
print("There needs to be none or 6 input arguments, being:\n"
"SFR, Metallicity, gwIMF model, OSGIMF, Lowest stellar mass, Highest stellar mass\n"
"You can input 'D' to apply the default parameter value:\n"
"1, 0, IGIMF_Z, 0, 0.08, 150\n"
"If there are no input parameters, the program will look for the input from file.\n")
else:
print("Input parameters:")
if sys.argv[1] == "D" or sys.argv[1] == "d":
SFR = 1
else:
SFR = float(sys.argv[1])
if sys.argv[2] == "D" or sys.argv[2] == "d":
M_over_H = 0
else:
M_over_H = float(sys.argv[2])
if sys.argv[3] == "D" or sys.argv[3] == "d":
gwIMF_model = "IGIMF_Z"
else:
gwIMF_model = sys.argv[3]
OSrequest = sys.argv[4]
if sys.argv[5] == "D" or sys.argv[5] == "d":
M_str_L = 0.08
else:
M_str_L = float(sys.argv[5])
if sys.argv[6] == "D" or sys.argv[6] == "d":
M_str_U = 150
else:
M_str_U = float(sys.argv[6])
print("SFR={}, M_over_H={}, gwIMF_model={}, OSrequest={}, M_str_L={}, M_str_U={}, ".format(SFR, M_over_H, gwIMF_model, OSrequest, M_str_L, M_str_U))
bindw = galimf.resolution_histogram_relative = 10 ** (max((0 - math.log(SFR, 10)), 0) ** 0.2 - 1.9)
# will change the resolution of histogram for optimal sampling automatically adjusted with SFR value.
if gwIMF_model == "IGIMF3":
alpha3_model = 2 # 1 # IMF high-mass-end power-index model, see Function_alpha_3_change in file 'galimf.py'
alpha_2 = 2.3 # IMF middle-mass power-index
alpha_1 = 1.3 # IMF low-mass-end power-index
alpha2_model = 1 # 1 # see file 'galimf.py'
alpha1_model = 1 # 0 # see file 'galimf.py'
beta_model = 1
R14orNOT = False
elif gwIMF_model == "IGIMF_Z":
alpha3_model = 2 # 1 # IMF high-mass-end power-index model, see Function_alpha_3_change in file 'galimf.py'
alpha_2 = 2.3 # IMF middle-mass power-index
alpha_1 = 1.3 # IMF low-mass-end power-index
alpha2_model = 'Z' # 1 # see file 'galimf.py'
alpha1_model = 'Z' # 0 # see file 'galimf.py'
beta_model = 1
R14orNOT = False
elif gwIMF_model == "IGIMF2d5":
alpha3_model = 2 # 1 # IMF high-mass-end power-index model, see Function_alpha_3_change in file 'galimf.py'
alpha_2 = 2.3 # IMF middle-mass power-index
alpha_1 = 1.3 # IMF low-mass-end power-index
alpha2_model = 'IGIMF2.5' # 1 # see file 'galimf.py'
alpha1_model = 'IGIMF2.5' # 0 # see file 'galimf.py'
beta_model = 1
R14orNOT = False
elif gwIMF_model == "IGIMF2":
alpha3_model = 2 # 1 # IMF high-mass-end power-index model, see Function_alpha_3_change in file 'galimf.py'
alpha_2 = 2.3 # IMF middle-mass power-index
alpha_1 = 1.3 # IMF low-mass-end power-index
alpha2_model = 0 # 1 # see file 'galimf.py'
alpha1_model = 0 # 0 # see file 'galimf.py'
beta_model = 1
R14orNOT = False
elif gwIMF_model == "IGIMF_R14":
alpha3_model = 'R14' # 'R14' # 2 # 1 # IMF high-mass-end power-index model, see Function_alpha_3_change in file 'galimf.py'
alpha_2 = 2.3 # IMF middle-mass power-index
alpha_1 = 1.3 # IMF low-mass-end power-index
alpha2_model = 'R14' # 'R14' # 1 # see file 'galimf.py'
alpha1_model = 0 # 0 # see file 'galimf.py'
beta_model = 0
R14orNOT = True
# alpha3_model = 1 # IMF high-mass-end power-index model, see file 'galimf.py'
# alpha_2 = 2.3 # IMF middle-mass power-index
# alpha_1 = 1.3 # IMF low-mass-end power-index
# alpha2_model = 1 # see file 'galimf.py'
# alpha1_model = 1 # see file 'galimf.py'
# beta_model = 1
# M_str_L = 0.08 # star mass lower limit [solar mass]
# M_str_U = 150 # star mass upper limit [solar mass]
M_turn = 0.5 # IMF power-index breaking mass [solar mass]
M_turn2 = 1. # IMF power-index breaking mass [solar mass]
M_ecl_U = 10**9 # embedded cluster mass upper limit [solar mass]
M_ecl_L = 5. # embedded cluster mass lower limit [solar mass]
delta_t = 10. # star formation epoch [Myr]
I_ecl = 1. # normalization factor in the Optimal Sampling condition equation
I_str = 1. # normalization factor in the Optimal Sampling condition equation
# --------------------------------------------------------------------------------------------------------------------------------
# Construct IGIMF:
# --------------------------------------------------------------------------------------------------------------------------------
# print("\n Calculating galaxy-wide IMF......")
start_time = time.time()
galimf.function_galimf(
"I", # IorS ### "I" for IGIMF; "OS" for OSGIMF
'IGIMF', # 'R14'
SFR, # Star Formation Rate [solar mass / yr]
alpha3_model, # IMF high-mass-end power-index model, see file 'galimf.py'
delta_t, # star formation epoch [Myr]
M_over_H,
I_ecl, # normalization factor in the Optimal Sampling condition equation
M_ecl_U, # embedded cluster mass upper limit [solar mass]
M_ecl_L, # embedded cluster mass lower limit [solar mass]
beta_model, # ECMF power-index model, see file 'galimf.py'
I_str, # normalization factor in the Optimal Sampling condition equation
M_str_L, # star mass lower limit [solar mass]
alpha_1, # IMF low-mass-end power-index
alpha1_model, # see file 'galimf.py'
M_turn, # IMF power-index change point [solar mass]
alpha_2, # IMF middle-mass power-index
alpha2_model, # see file 'galimf.py'
M_turn2, # IMF power-index change point [solar mass]
M_str_U, # star mass upper limit [solar mass]
printout=True # save the generated IMF
)
masses_igimf = np.array(galimf.List_M_str_for_xi_str)
igimf = np.array(galimf.List_xi)
# --------------------------------------------------------------------------------------------------------------------------------
# Normalization:
# --------------------------------------------------------------------------------------------------------------------------------
# igimf is normalized by default to a total mass formed in 10 Myr given the SFR
# to change the normalization follow the commented part of a code
# Norm = simps(igimf*masses_igimf,masses_igimf) #- normalization to a total mass
# Norm = simps(igimf,masses_igimf) #- normalization to number of stars
# Mtot1Myr = SFR*10*1.e6 #total mass formed in 10 Myr
# igimf = np.array(igimf)*Mtot1Myr/Norm
# --------------------------------------------------------------------------------------------------------------------------------
# Construct OSGIMF if required by interactive input:
# --------------------------------------------------------------------------------------------------------------------------------
if OSrequest == "y" or OSrequest == "Y" or OSrequest == "yes" or OSrequest == "Yes" or OSrequest == "1":
resol = 0.2/(10**(math.log(SFR, 10)/8))
galimf.resolution_histogram_relative = bindw / resol
start_time = time.time()
galimf.function_galimf(
"OS", # IorS ### "I" for IGIMF; "OS" for OSGIMF
'IGIMF', # 'R14'
SFR, # Star Formation Rate [solar mass / yr]
alpha3_model, # IMF high-mass-end power-index model, see file 'galimf.py'
delta_t, # star formation epoch [Myr]
M_over_H,
I_ecl, # normalization factor in the Optimal Sampling condition equation
M_ecl_U, # embedded cluster mass upper limit [solar mass]
M_ecl_L, # embedded cluster mass lower limit [solar mass]
beta_model, # ECMF power-index model, see file 'galimf.py'
I_str, # normalization factor in the Optimal Sampling condition equation
M_str_L, # star mass lower limit [solar mass]
alpha_1, # IMF low-mass-end power-index
alpha1_model, # see file 'galimf.py'
M_turn, # IMF power-index change point [solar mass]
alpha_2, # IMF middle-mass power-index
alpha2_model, # see file 'galimf.py'
M_turn2, # IMF power-index change point [solar mass]
M_str_U, # star mass upper limit [solar mass]
printout=True # save the generated OSGIMF
)
# One can easily import data considering number of stars in each mass bin assuming optimal sampling
mass_range_center = galimf.mass_range_center
mass_range = galimf.mass_range
mass_range_upper_limit = galimf.mass_range_upper_limit
mass_range_lower_limit = galimf.mass_range_lower_limit
star_number = galimf.star_number
mass_range_center, mass_range, mass_range_upper_limit, mass_range_lower_limit, star_number = zip(
*sorted(zip(mass_range_center, mass_range, mass_range_upper_limit, mass_range_lower_limit, star_number)))
masses_osgimf = np.array(galimf.List_mass_grid_x_axis) + 1.e-50
osgimf = np.array(galimf.List_star_number_in_mass_grid_y_axis) + 1.e-50
# --------------------------------------------------------------------------------------------------------------------------------
# # Plot
# --------------------------------------------------------------------------------------------------------------------------------
# import matplotlib.pyplot as plt # matplotlib for plotting
# from scipy.integrate import quad
#
# fig0 = plt.figure(figsize=(3.4, 2.5))
# plt.plot(np.log10(masses_igimf + 1.e-50), np.log10(igimf + 1.e-50), color='blue', lw=2.5, label='Galaxy-wide IMF')
# ylim_min = np.min(igimf + 1.e-50)
# ylim_max = np.max(igimf + 1.e-50)
# plt.ylim(np.log10(ylim_min), np.log10(ylim_max))
# if OSrequest == "y" or OSrequest == "Y" or OSrequest == "yes" or OSrequest == "Yes" or OSrequest == "1":
# plt.plot(np.log10(masses_osgimf), np.log10(osgimf), color='green', lw=2.5, label='Stellar mass histogram')
#
# for k in range(20):
# sal_IMF = masses_igimf ** (-2.3)
# plt.plot(np.log10(masses_igimf), np.log10((1.e5*np.max(igimf)/np.max(sal_IMF))*sal_IMF)-k, c='grey', lw=0.5)
#
# N = 100
# can_imf = np.zeros(N)
# masses_igimf = np.logspace(np.log10(0.08), np.log10(120), N, base=10)
#
# for i, m in enumerate(masses_igimf):
# if m <= 0.5:
# can_imf[i] = m ** (-1.3)
# else:
# can_imf[i] = 0.5*m ** (-2.3)
#
#
# def imf(mass, k_star, alpha):
# return k_star*mass*mass**(-alpha)
#
#
# Norm = quad(imf, 0.08, 0.5, args=(1, 1.3))[0] + quad(imf, 0.5, 120, args=(0.5, 2.3))[0]
# Mtot1Myr = SFR*10*1.e6 # total mass formed in 10 Myr
# can_imf = np.array(can_imf)*Mtot1Myr/Norm
# plt.plot(np.log10(masses_igimf), np.log10(can_imf), color='r', lw=2, label='canonical IMF')
#
# if ylim_max < np.max(can_imf):
# ylim_max = np.max(can_imf)
#
# plt.xlabel('$\log{(m\,[M_{\odot}])}$')
# plt.ylabel('$\log{(\\xi_{\mathrm{gal}}\,[M_{\odot}^{-1}])}$')
#
# plt.ylim(np.log10(ylim_min), np.log10(ylim_max))
# plt.xlim(math.log(0.06, 10), math.log(160, 10))
#
# plt.legend(loc='best', ncol=1, fancybox=True, prop={'size': 7})
# plt.tight_layout()
# fig0.savefig('galaxy_wide_IMF_plot.pdf', dpi=250)
#
# plt.show()
| 12,511 | 45.686567 | 148 | py |
galIMF | galIMF-master/example_galaxy_evolution.py | # Python3 code
# An example
import galevo
print("\n ================================\n"
" === example_galaxy_evolution ===\n"
" ================================\n")
print(" This test code serves as an example, "
"explaining (see comments in the code) the input parameters of the galaxy chemical evolution model.\n")
Log_SFR = float(input(
" Please input the logarithmic star formation rate in the unit of solar mass per yr "
"and ended the input with the return key.\n"
" A typical input SFR is from -4 to 4. "
"Note that the code does not support extremely low SFR "
"as the IMF integration error is significant for very top-light gwIMFs.\n\n"
" log_{10}(SFR [M_sun/yr]) = "))
SFH_shape = input(
"\n\n Please input the shape of the SFH "
"and ended the input with the return key.\n"
" The input can only be: 1 for a flat SFH or 2 for a skewnorm SFH, where the latter cost more calculation time.\n\n"
" ")
if SFH_shape == '1':
SFH_shape = 'flat'
elif SFH_shape == '2':
SFH_shape = 'skewnorm'
# Other SFH shape parameters
location = 0
skewness = 10
sfr_tail = 0
SFEN = round(float(input(
"\n\n Please input the characteristic star formation timescale in the unit of 10 Myr (integer only) "
"and ended the input with the return key.\n"
" We recommend a value smaller than 10 for 'flat' SFH and smaller than 3 for 'skewnorm' SFH for the first run, "
"as longer timescale calculations take more time.\n\n"
" SFT [10Myr] = ")))
if SFEN < 1:
print("\n\n### Warning: Wrong input 'SFEN' smaller than 1! Correct SFEN to 1. ###\n\n")
SFEN = 1
print('\nGenerating new SFH...')
galevo.generate_SFH(SFH_shape, Log_SFR, SFEN, sfr_tail, skewness, location)
print('\nStart galaxy simulation...\n')
galevo.galaxy_evol(
imf='igimf',
STF=0.3, # unrealistic results if more star are forming at a time step than the instantaneous gas mass
SFEN=SFEN,
Z_0=0.00000001886,
solar_mass_component="Anders1989_mass",
str_yield_table='portinari98',
IMF_name='Kroupa',
steller_mass_upper_bound=150,
time_resolution_in_Myr=1,
mass_boundary_observe_low=1.5,
mass_boundary_observe_up=8,
SFH_model='provided',
SFE=0.013, # This parameter is not applied when SFH_model='provided'.
SNIa_ON=True,
high_time_resolution=None,
plot_show=True,
plot_save=None,
outflow=None,
check_igimf=True)
| 2,473 | 32.890411 | 123 | py |
galIMF | galIMF-master/galIMF_version_1.0.py | ######## galIMF ##########
# python3 code, last update Sat 27 May
# This is the main module, galIMF.py, controling and operating the other two modules IGIMF and OSGIMF
# --------------------------------------------------------------------------------------------------------------------------------
# importing modules and libraries
import math
import csv # csv and izip/zip are used to create output files
try:
from itertools import izip as zip
except ImportError: # will be python 3.x series
pass
#--------------------------------------------------------------------------------------------------------------------------------
# The star mass resolution is the lower resolution among
# the resolution of histogram (resolution_histogram_relative)
# and the resolution of star generation (resolution_star_... in the file IMF_schulz.py)
resolution_histogram_relative = 0.01 # The star mass resolution of histogram is: the star mass * resolution_histogram_relative
# also re-defined in a test file, it scales automatically with the SFR
# function_galIMF takes in I/OS-GMF parameters and create output files
def function_galIMF(IorS, SFR, alpha3_model, delta_t, M_over_H, I_ecl, M_ecl_U, M_ecl_L, beta_model,
I_str, M_str_L, alpha_1, alpha1_model, M_turn, alpha_2, alpha2_model, M_turn2, M_str_U, printout=True):
if IorS == "I":
global List_xi, List_M_str_for_xi_str
Function_draw_IGIMF(SFR, alpha3_model, beta_model, delta_t, M_over_H,
I_ecl, M_ecl_U, M_ecl_L, I_str, M_str_L, alpha_1, alpha1_model,
M_turn, alpha_2, alpha2_model, M_turn2, M_str_U)
if printout is True:
# write data for GalIMF_Result/IGIMF_shape
with open('Galaxy_wide_IMF.txt', 'w') as f:
writer = csv.writer(f, delimiter=' ')
f.write("# Galaxy-wide IMF output file.\n# The columns are:\n# mass xi\n# where xi=dN/dm ("
"see Yan et.al 2017 A&A...607A.126Y)\n\n")
writer.writerows(
zip(List_M_str_for_xi_str, List_xi))
print("\n ### Galaxy-wide IMF data saved in the file Galaxy_wide_IMF.txt ###\n")
return
elif IorS == "OS":
global mass_range_center, mass_range, mass_range_upper_limit, mass_range_lower_limit, star_number
sample_for_one_epoch(SFR, alpha3_model, delta_t, I_ecl, M_ecl_U, M_ecl_L, beta_model,
I_str, M_str_L, alpha_1, alpha1_model, M_turn, alpha_2, alpha2_model, M_turn2, M_over_H, M_str_U)
Function_draw(SFR, M_str_L, M_str_U, M_ecl_L, resolution_histogram_relative)
function_make_drop_line()
# write data for GalIMF_Result/histogram
function_draw_histogram()
if printout is True:
with open('Galaxy_stellar_mass_histogram.txt', 'w') as f:
writer = csv.writer(f, delimiter=' ')
f.write(
"# Stellar mass histogram output file. It gives the generated number of stars in different "
"mass range.\n# The columns are:\n# mass_range_center mass_range mass_range_upper_limit mass_"
"range_lower_limit star_number_in_the_mass_range\n\n")
writer.writerows(
zip(mass_range_center, mass_range, mass_range_upper_limit, mass_range_lower_limit, star_number))
print("\n ### Stellar mass histogram data saved in the file Galaxy_stellar_mass_histogram.txt ###\n")
return
else:
print("Input wrong parameter for 'IorS'!")
return
######## IGIMF.py #########
# python3 code, last update Sat 27 May
# IGIMF.py is module computing IGIMF as described in Yan et al 2017
# all physical quantities which are input in some function are described in test_gimf.py scrip or in readme file
# --------------------------------------------------------------------------------------------------------------------------------
# --------------------------------------------------------------------------------------------------------------------------------
# initialization of floating length arrays
List_M_ecl_for_xi_ecl = []
List_xi_ecl = []
List_M_str_for_xi_str = []
List_xi_str = []
List_xi = []
# --------------------------------------------------------------------------------------------------------------------------------
# Function_dar_IGIMF computes the IGIMF by combining Function_ECMF (embedded cluster mass
# function) and Function_IMF (stellar mass function in individual embedded clusters)
# equation (1) from Yan et al. 2017
# function returns values of global lists:
# List_M_ecl_for_xi_ecl - list of masses, M_ecl, of embedded clusters for ECMF
# List_xi IGIMF (xi_IGIMF = dN/dm, dN number of star in a mass bin dm) values
# by default normalized to total mass in Msun units (= SFR*10Myr)
# List_M_str_for_xi_str list of stellar masses for stellar IMF in Msun units
# List_xi_L logarithmic IGIMF (xi_IGIMF_L = dN/d log_10 m)
# List_Log_M_str - natural logarithm
def Function_draw_IGIMF(SFR, alpha3_model, beta_model, delta_t, M_over_H, I_ecl, M_ecl_U, M_ecl_L,
I_str, M_str_L, alpha_1, alpha1_model, M_turn, alpha_2, alpha2_model, M_turn2, M_str_U):
if SFR != 0:
global List_M_ecl_for_xi_ecl, List_xi, List_M_str_for_xi_str, List_xi_L, List_Log_M_str, x_IMF, y_IMF
Function_ECMF(SFR, beta_model, delta_t, I_ecl, M_ecl_U, M_ecl_L, M_over_H)
x_IMF = []
y_IMF = []
alpha_1_change = Function_alpha_1_change(alpha_1, alpha1_model, M_over_H)
alpha_2_change = Function_alpha_2_change(alpha_2, alpha2_model, M_over_H)
alpha_3_change = Function_alpha_3_change(alpha3_model, List_M_ecl_for_xi_ecl[-1], M_over_H)
function_draw_xi_str(M_str_L, List_M_ecl_for_xi_ecl[-1], I_str, M_str_L, alpha_1_change,
M_turn, alpha_2_change, M_turn2, alpha_3_change, M_str_U)
List_xi = [0] * len(x_IMF)
number_of_ecl = len(List_M_ecl_for_xi_ecl) - 1
Function_IMF(alpha3_model, M_over_H, I_str, M_str_L, alpha_1_change, M_turn, alpha_2_change, M_turn2, M_str_U,
number_of_ecl, 0)
x_IMF = []
y_IMF = []
function_draw_xi_str(M_str_L, List_M_ecl_for_xi_ecl[-1], I_str, M_str_L, alpha_1_change,
M_turn, alpha_2_change, M_turn2, alpha_3_change, M_str_U)
List_M_str_for_xi_str = x_IMF
lenth = len(List_M_str_for_xi_str)
List_xi_L = [0] * lenth
List_Log_M_str = [0] * lenth
Function_xi_to_xiL(lenth - 1, List_xi[0])
else:
List_M_str_for_xi_str = [0, 1000]
List_xi = [0, 0]
return
#Function_ECMF computes IMF of star clusters (ECMF - embedded cluster mass function)
#The assumed shape of ECMF is single powerlaw with slope beta (function of SFR)
# the empyrical lower limit for star cluster mass if 50 Msun
# the hypotetical upper mass limit is 10^9 Msun, but the M_ecl^max is computed, eq (12) in Yan et al. 2017
def Function_ECMF(SFR, beta_model, delta_t, I_ecl, M_ecl_U, M_ecl_L, M_over_H):
global List_M_ecl_for_xi_ecl, List_xi_ecl, x_ECMF, y_ECMF
x_ECMF = []
y_ECMF = []
beta_change = Function_beta_change(beta_model, SFR, M_over_H)
function_draw_xi_ecl(M_ecl_L, SFR, delta_t, I_ecl, M_ecl_U, M_ecl_L, beta_change)
List_M_ecl_for_xi_ecl = x_ECMF
del List_M_ecl_for_xi_ecl[0]
del List_M_ecl_for_xi_ecl[-1]
List_xi_ecl = y_ECMF
del List_xi_ecl[0]
del List_xi_ecl[-1]
return
#Function_IMF computes stellar IMF in individual embedded star clusters
def Function_IMF(alpha3_model, M_over_H, I_str, M_str_L, alpha_1_change, M_turn, alpha_2_change, M_turn2, M_str_U, number_of_ecl, i):
while i < number_of_ecl:
global List_M_str_for_xi_str, List_xi_str, List_M_ecl_for_xi_ecl, x_IMF, y_IMF
x_IMF = []
y_IMF = []
M_ecl = List_M_ecl_for_xi_ecl[i]
alpha_3_change = Function_alpha_3_change(alpha3_model, M_ecl, M_over_H)
# Here only alpha_3_change is recalculated as alpha1(2)_change do not depend on M_ecl thus do not change.
function_draw_xi_str(M_str_L, M_ecl, I_str, M_str_L, alpha_1_change, M_turn,
alpha_2_change, M_turn2, alpha_3_change, M_str_U)
List_M_str_for_xi_str = x_IMF
List_xi_str = y_IMF
number_of_str = len(List_M_str_for_xi_str)
Function_update_List_xi(i, number_of_str, 0)
(i) = (i+1)
return
def Function_update_List_xi(i, number_of_str, j):
while j < number_of_str:
global List_xi, List_xi_str, List_xi_ecl, List_M_ecl_for_xi_ecl
List_xi[j] += List_xi_str[j] * List_xi_ecl[i] * (List_M_ecl_for_xi_ecl[i+1] - List_M_ecl_for_xi_ecl[i])
(j) = (j+1)
return
def Function_xi_to_xiL(i, unit):
global List_xi_L, List_xi, List_M_str_for_xi_str, List_Log_M_str
while i > -1:
if List_xi[i] == 0:
List_xi[i] = 10**(-5)
List_xi_L[i] = math.log((List_xi[i] * math.log(10) * List_M_str_for_xi_str[i] / unit * 1800), 10)
List_Log_M_str[i] = math.log(List_M_str_for_xi_str[i] , 10)
(i) = (i-1)
return
############ OSGIMF #############
# -----------------------------------------------------------------------------------------
# initialization of open-length arrays
# -----------------------------------------------------------------------------------------
List_M_str_all_i = []
List_n_str_all_i = []
List_mass_grid_x_axis = []
List_star_number_in_mass_grid_y_axis = []
List_star_number_in_mass_grid_y_axis2 = []
List_star_number_in_mass_grid_y_axis3 = []
List_star_number_in_mass_grid_y_axis4 = []
List_mass_grid = []
List_star_number_in_mass_grid = []
#-----------------------------------------------------------------------------------------
# This function gives the stellar masses in entire galaxy in unsorted manner
# i.e. the stars are grouped in parent clusters
def sample_for_one_epoch(SFR, alpha3_model, delta_t, I_ecl, M_ecl_U, M_ecl_L, beta_model,
I_str, M_str_L, alpha_1, alpha1_model, M_turn, alpha_2, alpha2_model, M_turn2, M_over_H, M_str_U):
global List_M_str_all_i, List_n_str_all_i, list_M_ecl_i
beta_change = Function_beta_change(beta_model, SFR, M_over_H)
Function_sample_cluster(SFR, delta_t, I_ecl, M_ecl_U, M_ecl_L, beta_change)
len_of_M_ecl_list = len(list_M_ecl_i)
List_M_str_all_i = []
List_n_str_all_i = []
Function_sample_star_from_clusters(alpha3_model, I_str, M_str_L, alpha_1, alpha1_model, M_turn, alpha_2, alpha2_model,
M_turn2, M_over_H, M_str_U, len_of_M_ecl_list, 0)
return
# Masses of formed clusters
def Function_sample_cluster(SFR, delta_t, I_ecl, M_ecl_U, M_ecl_L, beta_change):
global list_m_ecl_i, list_n_ecl_i, list_M_ecl_i, M_max_ecl
list_m_ecl_i = []
list_n_ecl_i = []
list_M_ecl_i = []
M_max_ecl = 0
function_sample_from_ECMF(SFR, delta_t, I_ecl, M_ecl_U, M_ecl_L, beta_change)
return
# Stellar masses in a given star cluster
def Function_sample_star_from_clusters(alpha3_model, I_str, M_str_L, alpha_1, alpha1_model, M_turn, alpha_2, alpha2_model,
M_turn2, M_over_H, M_str_U, len_of_M_ecl_list, i):
while i < len_of_M_ecl_list: # sample a total number of i clusters
global List_M_str_all_i, List_n_str_all_i, list_m_str_i, list_n_str_i, list_M_str_i
list_m_str_i = []
list_n_str_i = []
list_M_str_i = []
alpha_1_change = Function_alpha_1_change(alpha_1, alpha1_model, M_over_H)
alpha_2_change = Function_alpha_2_change(alpha_2, alpha2_model, M_over_H)
alpha_3_change = Function_alpha_3_change(alpha3_model, list_M_ecl_i[i], M_over_H)
function_sample_from_IMF(list_M_ecl_i[i],
I_str, M_str_L, alpha_1_change, M_turn, alpha_2_change, M_turn2, alpha_3_change, M_str_U)
List_M_str_all_i += [list_M_str_i] # save all i clusters in "all_i" list
List_n_str_all_i += [list_n_str_i]
(i) = (i+1)
return
##################################################################################
## The sampling is finished here. Below are just sorting, binning, and plotting.##
##################################################################################
# Now star mass are recorded in individual star clusters in the "List_M_str_all_i" and "List_n_str_all_i"
# we have for the whole galaxy: cluster mass, number of cluster with certain mass
# and for each cluster: star mass, number of stars with certain mass
# Sort out all star mass in a epoch into a mass grid
# Main purpose here is the sorting of the stellar masses and preparation for
# plotting output
def Function_draw(SFR, M_str_low, M_str_up, M_ecl_low, resolution_histogram_relative):
M_low = min(M_str_low, M_ecl_low)
global List_mass_grid, List_star_number_in_mass_grid, List_mass_grid_x_axis, List_star_number_in_mass_grid_y_axis
# for all stars
List_mass_grid = []
Function_mass_grid(SFR, M_str_up, M_low, resolution_histogram_relative)
List_mass_grid += [M_low]
List_star_number_in_mass_grid = [0] * (len(List_mass_grid) - 1)
Function_sort_out_star_mass(0)
##########
List_mass_grid_x_axis = [M_str_up]
make_mass_grid_x_axis(1)
List_mass_grid_x_axis += [M_low]
List_star_number_in_mass_grid_y_axis = []
make_star_number_in_mass_grid_y_axis(0)
List_mass_grid_x_axis = [List_mass_grid_x_axis[0]] + List_mass_grid_x_axis
List_mass_grid_x_axis += [List_mass_grid_x_axis[-1]]
List_star_number_in_mass_grid_y_axis = [0.0000001] + List_star_number_in_mass_grid_y_axis
List_star_number_in_mass_grid_y_axis += [0.0000001]
# for most massive star
global List_mass_grid2, List_star_number_in_mass_grid2, List_mass_grid_x_axis2, List_star_number_in_mass_grid_y_axis2
List_mass_grid2 = List_mass_grid
List_star_number_in_mass_grid2 = [0] * (len(List_mass_grid2) - 1)
Function_sort_out_star_mass2(0)
##########
List_star_number_in_mass_grid_y_axis2 = []
make_star_number_in_mass_grid_y_axis2(0)
List_star_number_in_mass_grid_y_axis2 = [0.0000001] + List_star_number_in_mass_grid_y_axis2
List_star_number_in_mass_grid_y_axis2 += [0.0000001]
###################################
global List_mass_grid3, List_star_number_in_mass_grid3, List_mass_grid_x_axis3, List_star_number_in_mass_grid_y_axis3
List_mass_grid3 = List_mass_grid
List_star_number_in_mass_grid3 = [0] * (len(List_mass_grid3) - 1)
Function_sort_out_star_mass3(0)
##########
List_star_number_in_mass_grid_y_axis3 = []
make_star_number_in_mass_grid_y_axis3(0)
List_star_number_in_mass_grid_y_axis3 = [0.0000001] + List_star_number_in_mass_grid_y_axis3
List_star_number_in_mass_grid_y_axis3 += [0.0000001]
###################################
global List_mass_grid4, List_star_number_in_mass_grid4, List_mass_grid_x_axis4, List_star_number_in_mass_grid_y_axis4
List_mass_grid4 = List_mass_grid
List_star_number_in_mass_grid4 = [0] * (len(List_mass_grid4) - 1)
Function_sort_out_star_mass4(0)
##########
List_star_number_in_mass_grid_y_axis4 = []
make_star_number_in_mass_grid_y_axis4(0)
List_star_number_in_mass_grid_y_axis4 = [0.0000001] + List_star_number_in_mass_grid_y_axis4
List_star_number_in_mass_grid_y_axis4 += [0.0000001]
return
### make a mass grid ###
def Function_mass_grid(SFR, mass, M_str_low, resolution_histogram_relative):
while mass > M_str_low:
global List_mass_grid
List_mass_grid += [mass]
(mass) = (mass * (1-resolution_histogram_relative))
# we find it is useful to use the following form of mass grid sometimes.
# One can apply this alternative form by quote the above line (add a # in front of the line) and unquote the below.
#(mass) = (mass * (0.967 + math.log(SFR, 10) / 400) / (math.log(mass + 1) ** 2 / (2 ** (math.log(SFR, 10) + 6.85) - 1) + 1))
return
# count the number of star in each grid
def Function_sort_out_star_mass(i):
while i < len(List_M_str_all_i):
global l
l = 0
SubFunction_sort_out(i, 0)
(i)=(i+1)
return
def Function_sort_out_star_mass2(i):
while i < len(List_M_str_all_i):
global l
l = 0
SubFunction_sort_out2(i, 0)
(i)=(i+1)
return
def Function_sort_out_star_mass3(i):
while i < len(List_M_str_all_i):
global l
l = 0
SubFunction_sort_out3(i, 1)
(i)=(i+1)
return
def Function_sort_out_star_mass4(i):
while i < len(List_M_str_all_i):
global l
l = 0
SubFunction_sort_out4(i, 2)
(i)=(i+1)
return
def SubFunction_sort_out(i, j):
while j < len(List_M_str_all_i[i]):
global l, List_n_str_all_i
Function_find_k(i, j, l)
List_star_number_in_mass_grid[l] += List_n_str_all_i[i][j] * list_n_ecl_i[i]
(j)=(j+1)
return
def SubFunction_sort_out2(i, j):
if j < len(List_M_str_all_i[i]):
global l
Function_find_k(i, j, l)
List_star_number_in_mass_grid2[l] += list_n_ecl_i[i]
return
def SubFunction_sort_out3(i, j):
if j < len(List_M_str_all_i[i]):
global l
Function_find_k(i, j, l)
List_star_number_in_mass_grid3[l] += list_n_ecl_i[i]
return
def SubFunction_sort_out4(i, j):
if j < len(List_M_str_all_i[i]):
global l
Function_find_k(i, j, l)
List_star_number_in_mass_grid4[l] += list_n_ecl_i[i]
return
def Function_find_k(i, j, k):
while List_mass_grid[k+1] > List_M_str_all_i[i][j]:
global l
l = k+1
(k) = (k+1)
return
# prepare for the breaking line plot
def make_mass_grid_x_axis(i):
global List_mass_grid_x_axis, List_mass_grid
while i < len(List_mass_grid)-1:
List_mass_grid_x_axis += [List_mass_grid[i]]*2
(i)=(i+1)
return
def make_star_number_in_mass_grid_y_axis(i):
global List_star_number_in_mass_grid_y_axis, List_star_number_in_mass_grid, List_mass_grid
while i < len(List_star_number_in_mass_grid):
List_star_number_in_mass_grid_y_axis += [
List_star_number_in_mass_grid[i]/(List_mass_grid[i] - List_mass_grid[i+1])]*2
(i)=(i+1)
return
def make_star_number_in_mass_grid_y_axis2(i):
global List_star_number_in_mass_grid_y_axis2, List_star_number_in_mass_grid2, List_mass_grid2
while i < len(List_star_number_in_mass_grid2):
List_star_number_in_mass_grid_y_axis2 += [
List_star_number_in_mass_grid2[i]/(List_mass_grid2[i] - List_mass_grid2[i+1])]*2
(i)=(i+1)
return
def make_star_number_in_mass_grid_y_axis3(i):
global List_star_number_in_mass_grid_y_axis3, List_star_number_in_mass_grid3, List_mass_grid3
while i < len(List_star_number_in_mass_grid3):
List_star_number_in_mass_grid_y_axis3 += [
List_star_number_in_mass_grid3[i]/(List_mass_grid3[i] - List_mass_grid3[i+1])]*2
(i)=(i+1)
return
def make_star_number_in_mass_grid_y_axis4(i):
global List_star_number_in_mass_grid_y_axis4, List_star_number_in_mass_grid4, List_mass_grid4
while i < len(List_star_number_in_mass_grid4):
List_star_number_in_mass_grid_y_axis4 += [
List_star_number_in_mass_grid4[i]/(List_mass_grid4[i] - List_mass_grid4[i+1])]*2
(i)=(i+1)
return
def function_make_drop_line1(i):
while i < len(List_star_number_in_mass_grid_y_axis)-1:
if List_star_number_in_mass_grid_y_axis[i] == 0:
List_star_number_in_mass_grid_y_axis[i] = 0.0000001
(i) = (i+1)
def function_make_drop_line2(i):
while i < len(List_star_number_in_mass_grid_y_axis2)-1:
if List_star_number_in_mass_grid_y_axis2[i] == 0:
List_star_number_in_mass_grid_y_axis2[i] = 0.0000001
(i) = (i+1)
def function_make_drop_line3(i):
while i < len(List_star_number_in_mass_grid_y_axis3)-1:
if List_star_number_in_mass_grid_y_axis3[i] == 0:
List_star_number_in_mass_grid_y_axis3[i] = 0.0000001
(i) = (i+1)
def function_make_drop_line4(i):
while i < len(List_star_number_in_mass_grid_y_axis4)-1:
if List_star_number_in_mass_grid_y_axis4[i] == 0:
List_star_number_in_mass_grid_y_axis4[i] = 0.0000001
(i) = (i+1)
def function_make_drop_line():
function_make_drop_line1(0)
function_make_drop_line2(0)
function_make_drop_line3(0)
function_make_drop_line4(0)
return
######################## histogram ########################
mass_range_center = []
mass_range = []
mass_range_upper_limit = []
mass_range_lower_limit = []
star_number = []
def function_draw_histogram():
global mass_range_center, mass_range, mass_range_upper_limit, mass_range_lower_limit, star_number
mass_range_center = []
i = 0
while i < len(List_mass_grid) - 1:
mass_range_center += [
0.5 * (List_mass_grid[i] + List_mass_grid[i + 1])]
i = i + 1
mass_range = []
i = 0
while i < len(List_mass_grid) - 1:
mass_range += [List_mass_grid[i] - List_mass_grid[i + 1]]
i = i + 1
mass_range_upper_limit = []
i = 0
while i < len(List_mass_grid):
mass_range_upper_limit += [List_mass_grid[i]]
i = i + 1
mass_range_lower_limit = []
i = 0
while i < len(List_mass_grid) - 1:
mass_range_lower_limit += [List_mass_grid[i + 1]]
i = i + 1
star_number = List_star_number_in_mass_grid + []
return
############## IMF #################
# use equations in "supplementary-document-galimf.pdf"
# The star mass resolution is the lower resolution among "relative resolution" and "absolute resolution" where
# the relative resolution = star mass * resolution_star_relative
# the absolute resolution = resolution_star_absolute
resolution_star_relative = 0.001
resolution_star_absolute = 0.001
list_m_str_i = []
list_n_str_i = []
list_M_str_i = []
def function_sample_from_IMF(M_ecl, I_str, M_L, alpha_1, M_turn, alpha_2, M_turn2, alpha_3, M_U):
global list_m_str_i, list_n_str_i, list_M_str_i, M_max, M_max_function, k3, k2, k1, resolution_star_relative, resolution_star_absolute
M_max = 0
M_max_function = 0
function_M_max(M_ecl, I_str, M_L, alpha_1, M_turn, alpha_2, M_turn2, alpha_3, M_U)
k3 = 0
k2 = 0
k1 = 0
function_k321(I_str, alpha_1, M_turn, alpha_2, M_turn2, alpha_3, M_U)
list_m_str_i = []
list_n_str_i = []
function_m_i_str(k1, k2, k3, M_L, alpha_1, M_turn, alpha_2, M_turn2, alpha_3, M_max, resolution_star_relative, resolution_star_absolute) # equation 16
list_M_str_i = []
length_n = len(list_n_str_i)
function_M_i(k1, k2, k3, M_L, alpha_1, M_turn, alpha_2, M_turn2, alpha_3, M_U, length_n) # equation 18
del list_n_str_i[0]
return
# M_max is computed by solving simultaneously equations (3) and (4) from Yan et al 2017
def function_M_max(M_ecl, I_str, M_L, alpha_1, M_turn, alpha_2, M_turn2, alpha_3, M_U):
global M_max_function, M_max, M_max_function
M_constant = M_ecl * M_U ** (1 - alpha_3) / I_str / (1 - alpha_3) - M_turn2 ** (alpha_2 - alpha_3) * M_turn ** (
alpha_1 - alpha_2) * (M_turn ** (2 - alpha_1) - M_L ** (2 - alpha_1)) / (2 - alpha_1) - M_turn2 ** (
alpha_2 - alpha_3) * (M_turn2 ** (2 - alpha_2) - M_turn ** (
2 - alpha_2)) / (2 - alpha_2) + M_turn2 ** (2 - alpha_3) / (2 - alpha_3) # equation 14
function_M_max_1(M_constant, M_ecl, I_str, alpha_3, M_U, M_L, 100, 10, -1) # equation 14
M_max_function = 1
if M_max < M_turn2:
M_constant2 = M_ecl * M_turn2 ** (1 - alpha_2) / I_str / (1 - alpha_2) + M_ecl * M_turn2 ** (
alpha_3 - alpha_2) * (M_U ** (
1 - alpha_3) - M_turn2 ** (1 - alpha_3)) / I_str / (1 - alpha_3) - M_turn ** (alpha_1 - alpha_2) * (
M_turn ** (2 - alpha_1) - M_L ** (
2 - alpha_1)) / (2 - alpha_1) + M_turn ** (2 - alpha_2) / (2 - alpha_2) # equation 23
function_M_max_2(M_constant2, M_ecl, I_str, alpha_2, M_U, M_L, 0.75, 0.1, -1) # equation 23
M_max_function = 2
if M_max < M_turn:
M_constant3 = M_ecl * M_turn ** (1 - alpha_1) / I_str / (1 - alpha_1) + M_ecl * M_turn ** (
alpha_2 - alpha_1) * (M_turn2 ** (
1 - alpha_2) - M_turn ** (1 - alpha_2)) / I_str / (1 - alpha_2) + M_ecl * M_turn2 ** (
alpha_3 - alpha_2) * M_turn ** (
alpha_2 - alpha_1) * (M_U ** (1 - alpha_3) - M_turn2 ** (1 - alpha_3)) / I_str / (1 - alpha_3) + M_L ** (
2 - alpha_1) / (2 - alpha_1)
# equation 27
function_M_max_3(M_constant3, M_ecl, I_str, alpha_1, M_U, M_L, 100, 10, -1) # equation 27
M_max_function = 3
if M_max < M_L:
M_max_function = 0
print("M_max < M_L")
return
def function_k321(I_str, alpha_1, M_turn, alpha_2, M_turn2, alpha_3, M_U):
global M_max_function, k3, k2, k1, M_max
if M_max_function == 1:
k3 = I_str*(1-alpha_3)/(M_U**(1-alpha_3)-M_max**(1-alpha_3))
# equation 12
elif M_max_function == 2:
k3 = I_str/(M_turn2**(alpha_2-alpha_3)*(M_turn2**(1-alpha_2)-M_max**(1-alpha_2))/(1-alpha_2) + (
M_U**(1-alpha_3)-M_turn2**(1-alpha_3))/(1-alpha_3))
# equation 21
elif M_max_function == 3:
k3 = I_str/(M_turn2**(alpha_2-alpha_3) * M_turn**(alpha_1-alpha_2) * (M_turn**(1-alpha_1)-M_max**(1-alpha_1)) / (
1-alpha_1) + M_turn2**(alpha_2-alpha_3)*(M_turn2**(1-alpha_2)-M_turn**(1-alpha_2))/(1-alpha_2) + (M_U**(
1-alpha_3)-M_turn2**(1-alpha_3))/(1-alpha_3))
# equation 25
else:
print("function_M_max went wrong")
return
k2 = k3*M_turn2**(alpha_2-alpha_3) # equation 2
k1 = k2*M_turn**(alpha_1-alpha_2) # equation 2
return
def function_M_max_1(M_constant, M_ecl, I_str, alpha_3, M_U, M_L, m_1, step, pm): # equation 14
m_1 = round(m_1, 10) # round
M_x = m_1**(2-alpha_3)/(2-alpha_3) + M_ecl*m_1**(1-alpha_3)/I_str/(1-alpha_3)
if abs(M_x-M_constant) < abs(M_constant) * 10 ** (-7):
global M_max
M_max = m_1
elif m_1 - step <= M_L or m_1 + step >= M_U:
function_M_max_1(M_constant, M_ecl, I_str, alpha_3, M_U, M_L, m_1, step / 2, pm)
elif M_x > M_constant and pm == -1:
function_M_max_1(M_constant, M_ecl, I_str, alpha_3, M_U, M_L, m_1 - step, step, -1)
elif M_x > M_constant and pm == 1:
function_M_max_1(M_constant, M_ecl, I_str, alpha_3, M_U, M_L, m_1 - step / 2, step / 2, -1)
elif M_x < M_constant and pm == 1:
function_M_max_1(M_constant, M_ecl, I_str, alpha_3, M_U, M_L, m_1 + step, step, 1)
elif M_x < M_constant and pm == -1:
function_M_max_1(M_constant, M_ecl, I_str, alpha_3, M_U, M_L, m_1 + step / 2, step / 2, 1)
return
def function_M_max_2(M_constant2, M_ecl, I_str, alpha_2, M_U, M_L, m_1, step, pm): # equation 23
m_1 = round(m_1, 10) # round
M_x = m_1 ** (2 - alpha_2) / (2 - alpha_2) + M_ecl * m_1 ** (1 - alpha_2) / I_str / (1 - alpha_2)
if abs(M_x - M_constant2) < abs(M_constant2) * 10 ** (-7):
global M_max
M_max = m_1
elif m_1 - step <= M_L or m_1 + step >= M_U:
function_M_max_1(M_constant2, M_ecl, I_str, alpha_2, M_U, M_L, m_1, step / 2, pm)
elif M_x > M_constant2 and pm == -1:
function_M_max_1(M_constant2, M_ecl, I_str, alpha_2, M_U, M_L, m_1 - step, step, -1)
elif M_x > M_constant2 and pm == 1:
function_M_max_1(M_constant2, M_ecl, I_str, alpha_2, M_U, M_L, m_1 - step / 2, step / 2, -1)
elif M_x < M_constant2 and pm == 1:
function_M_max_1(M_constant2, M_ecl, I_str, alpha_2, M_U, M_L, m_1 + step, step, 1)
elif M_x < M_constant2 and pm == -1:
function_M_max_1(M_constant2, M_ecl, I_str, alpha_2, M_U, M_L, m_1 + step / 2, step / 2, 1)
return
def function_M_max_3(M_constant3, M_ecl, I_str, alpha_1, M_U, M_L, m_1, step, pm): # equation 27
m_1 = round(m_1, 10) # round
M_x = m_1 ** (2 - alpha_1) / (2 - alpha_1) + M_ecl * m_1 ** (1 - alpha_1) / I_str / (1 - alpha_1)
if abs(M_x-M_constant3) < abs(M_constant3) * 10 ** (-7):
global M_max
M_max = m_1
elif m_1 - step <= M_L or m_1 + step >= M_U:
function_M_max_1(M_constant3, M_ecl, I_str, alpha_1, M_U, M_L, m_1, step / 2, pm)
elif M_x > M_constant3 and pm == -1:
function_M_max_1(M_constant3, M_ecl, I_str, alpha_1, M_U, M_L, m_1 - step, step, -1)
elif M_x > M_constant3 and pm == 1:
function_M_max_1(M_constant3, M_ecl, I_str, alpha_1, M_U, M_L, m_1 - step / 2, step / 2, -1)
elif M_x < M_constant3 and pm == 1:
function_M_max_1(M_constant3, M_ecl, I_str, alpha_1, M_U, M_L, m_1 + step, step, 1)
elif M_x < M_constant3 and pm == -1:
function_M_max_1(M_constant3, M_ecl, I_str, alpha_1, M_U, M_L, m_1 + step / 2, step / 2, 1)
return
def function_m_i_str(k1, k2, k3, M_L, alpha_1, M_turn, alpha_2, M_turn2, alpha_3, M_max, resolution_star_relative, resolution_star_absolute): # equation 16
global list_m_str_i
if M_max > 100:
loop_m_i_first_three(k3, M_turn2, alpha_3, M_max, 0, resolution_star_relative, resolution_star_absolute, 0)
(m_str_i, n_str_i) = cross_M_turn(k3, k2, M_turn2, alpha_3, alpha_2, list_m_str_i[-1], resolution_star_relative, resolution_star_absolute)
loop_m_i(k2, M_turn, alpha_2, m_str_i, n_str_i, resolution_star_relative, resolution_star_absolute)
(m_str_i, n_str_i) = cross_M_turn(k2, k1, M_turn, alpha_2, alpha_1, list_m_str_i[-1], resolution_star_relative, resolution_star_absolute)
loop_m_i(k1, M_L, alpha_1, m_str_i, n_str_i, resolution_star_relative, resolution_star_absolute)
cross_M_L(k1, M_L, alpha_1, list_m_str_i[-1])
return
elif M_max > M_turn2:
loop_m_i(k3, M_turn2, alpha_3, M_max, 0, resolution_star_relative, resolution_star_absolute)
(m_str_i, n_str_i) = cross_M_turn(k3, k2, M_turn2, alpha_3, alpha_2, list_m_str_i[-1], resolution_star_relative, resolution_star_absolute)
loop_m_i(k2, M_turn, alpha_2, m_str_i, n_str_i, resolution_star_relative, resolution_star_absolute)
(m_str_i, n_str_i) = cross_M_turn(k2, k1, M_turn, alpha_2, alpha_1, list_m_str_i[-1], resolution_star_relative, resolution_star_absolute)
loop_m_i(k1, M_L, alpha_1, m_str_i, n_str_i, resolution_star_relative, resolution_star_absolute)
cross_M_L(k1, M_L, alpha_1, list_m_str_i[-1])
return
elif M_max > M_turn:
loop_m_i(k2, M_turn, alpha_2, M_max, 0, resolution_star_relative, resolution_star_absolute)
(m_str_i, n_str_i) = cross_M_turn(k2, k1, M_turn, alpha_2, alpha_1, list_m_str_i[-1], resolution_star_relative, resolution_star_absolute)
loop_m_i(k1, M_L, alpha_1, m_str_i, n_str_i, resolution_star_relative, resolution_star_absolute)
cross_M_L(k1, M_L, alpha_1, list_m_str_i[-1])
return
else:
loop_m_i(k1, M_L, alpha_1, M_max, 0, resolution_star_relative, resolution_star_absolute)
cross_M_L(k1, M_L, alpha_1, list_m_str_i[-1])
return
def function_get_n_new_str(m_i, k, alpha, m_i_plus_n, n_i, resolution_star_relative, resolution_star_absolute):
while m_i - m_i_plus_n < max(resolution_star_relative * m_i, resolution_star_absolute):
n_new = round(n_i * 1.05 + 1)
m_i_plus_n_new = (m_i ** (1 - alpha) - n_new * (1 - alpha) / k) ** (1 / (1 - alpha))
(m_i_plus_n, n_i) = (m_i_plus_n_new, n_new)
return m_i_plus_n, n_i
def loop_m_i_first_three(k, M_low, alpha, m_i, n_i, resolution_star_relative, resolution_star_absolute, count):
while m_i > M_low:
global list_m_str_i, list_n_str_i, n_turn
list_m_str_i += [m_i]
list_n_str_i += [n_i]
m_i_plus_n = (m_i ** (1 - alpha) - n_i * (1 - alpha) / k) ** (1 / (1 - alpha))
if count < 3:
m_i_plus_n = (m_i ** (1 - alpha) - (1 - alpha) / k) ** (1 / (1 - alpha))
n_turn = n_i
(m_i, n_i, count) = (m_i_plus_n, 1, (count+1))
elif m_i - m_i_plus_n > max(resolution_star_relative * m_i, resolution_star_absolute):
n_turn = n_i
(m_i, n_i) = (m_i_plus_n, n_i)
else:
(m_i_plus_n_new, n_turn) = function_get_n_new_str(m_i, k, alpha, m_i_plus_n, n_i, resolution_star_relative, resolution_star_absolute)
(m_i, n_i) = (m_i_plus_n_new, n_turn)
def loop_m_i(k, M_low, alpha, m_i, n_i, resolution_star_relative, resolution_star_absolute):
while m_i > M_low:
global list_m_str_i, list_n_str_i, n_turn
list_m_str_i += [m_i]
list_n_str_i += [n_i]
a = m_i ** (1 - alpha) - n_i * (1 - alpha) / k
if a > 0:
b = 1 / (1 - alpha)
m_i_plus_n = a ** b
if m_i - m_i_plus_n > max(resolution_star_relative * m_i, resolution_star_absolute):
(m_i, n_i) = (m_i_plus_n, n_i)
else:
(m_i_plus_n_new, n_turn) = function_get_n_new_str(m_i, k, alpha, m_i_plus_n, n_i, resolution_star_relative, resolution_star_absolute)
(m_i, n_i) = (m_i_plus_n_new, n_turn)
else:
return
def cross_M_turn(k_before, k_after, M_cross, alpha_before, alpha_after, m_i, resolution_star_relative, resolution_star_absolute):
global n_turn
n_before = int(k_before/(1-alpha_before)*(m_i**(1-alpha_before)-M_cross**(1-alpha_before)))
m_before_cross = (m_i ** (1 - alpha_before) - n_before * (1 - alpha_before) / k_before) ** (1 / (1 - alpha_before))
a = (M_cross**(1-alpha_after)+k_before/k_after*(1-alpha_after)/(1-alpha_before)*(m_before_cross**(
1-alpha_before)-M_cross**(1-alpha_before))-(1-alpha_after)/k_after)
if a > 0:
m_after_cross = a ** (1/(1-alpha_after))
n_after = int(0.9*(n_turn - n_before - 1))
m_after_cross_plus_n_after = (m_after_cross ** (1 - alpha_after) - n_after * (1 - alpha_after) / k_after) ** (1 / (1 - alpha_after))
if m_i - m_after_cross_plus_n_after > max(resolution_star_relative * m_i, resolution_star_absolute):
return (m_after_cross_plus_n_after, n_before + 1 + n_after)
else:
(m_after_cross_plus_n_new, n_after_new) = function_get_n_new_str_cross(
m_i, m_after_cross, k_after, alpha_after, m_after_cross_plus_n_after, n_after, resolution_star_relative, resolution_star_absolute)
return (m_after_cross_plus_n_new, n_before + 1 + n_after_new)
else:
return (0, 0)
def function_get_n_new_str_cross(m_i, m_after_cross, k, alpha, m_after_cross_plus_n, n_i, resolution_star_relative, resolution_star_absolute):
while m_i - m_after_cross_plus_n < max(resolution_star_relative * m_i, resolution_star_absolute):
n_after_new = round(n_i * 1.05 + 1)
m_after_cross_plus_n_new = (m_after_cross ** (1 - alpha) - n_after_new * (1 - alpha) / k) ** (1 / (1 - alpha))
(m_after_cross_plus_n, n_i) = (m_after_cross_plus_n_new, n_after_new)
return m_after_cross_plus_n, n_i
def cross_M_L(k_1, M_L, alpha_1, m_i): # equation 19
global list_m_str_i, list_n_str_i
n_i = int(k_1 / (1 - alpha_1) * (m_i ** (1 - alpha_1) - M_L ** (1 - alpha_1)))
list_m_str_i += [M_L]
list_n_str_i += [n_i]
return
def function_M_i(k1, k2, k3, M_L, alpha_1, M_turn, alpha_2, M_turn2, alpha_3, M_U, length_n): # equation 18
global list_m_str_i, new_i, list_M_str_i, M_max, list_n_str_i
new_i = 0
if M_max > M_turn2:
loop_M_i(k3, M_turn2, alpha_3, new_i)
cross_M_turn2(k3, k2, M_turn2, alpha_3, alpha_2, new_i)
if new_i + 1 < len(list_m_str_i):
loop_M_i(k2, M_turn, alpha_2, new_i)
if list_n_str_i[new_i + 1] > 0:
cross_M_turn2(k2, k1, M_turn, alpha_2, alpha_1, new_i)
if new_i + 1 < len(list_m_str_i):
loop_M_i(k1, M_L, alpha_1, new_i)
if list_n_str_i[new_i+1] == 0:
return
else:
M_i = k1 / (2 - alpha_1) * (list_m_str_i[new_i] ** (2 - alpha_1) - list_m_str_i[new_i + 1] ** (2 - alpha_1)) / \
list_n_str_i[new_i + 1]
list_M_str_i += [M_i]
return
elif M_max > M_turn:
loop_M_i(k2, M_turn, alpha_2, new_i)
cross_M_turn2(k2, k1, M_turn, alpha_2, alpha_1, new_i)
loop_M_i(k1, M_L, alpha_1, new_i)
if list_n_str_i[new_i+1] == 0:
return
else:
M_i = k1 / (2 - alpha_1) * (list_m_str_i[new_i] ** (2 - alpha_1) - list_m_str_i[new_i + 1] ** (
2 - alpha_1)) / list_n_str_i[new_i + 1]
list_M_str_i += [M_i]
return
else:
loop_M_i(k1, M_L, alpha_1, new_i)
if list_n_str_i[new_i+1] == 0:
return
else:
M_i = k1 / (2 - alpha_1) * (list_m_str_i[new_i] ** (2 - alpha_1) - list_m_str_i[new_i + 1] ** (
2 - alpha_1)) / list_n_str_i[new_i + 1]
list_M_str_i += [M_i]
return
def loop_M_i(k, M_low, alpha, i):
global list_m_str_i, list_n_str_i, list_M_str_i, new_i
while list_m_str_i[i+1] > M_low:
M_i = k/(2-alpha)*(list_m_str_i[i]**(2-alpha)-list_m_str_i[i+1]**(2-alpha))/list_n_str_i[i+1]
list_M_str_i += [M_i]
new_i = i + 1
(i) = (new_i)
def cross_M_turn2(k_before, k_after, M_cross, alpha_before, alpha_after, i):
global list_m_str_i, list_n_str_i, list_M_str_i, new_i
M_i = k_before / (2 - alpha_before) * (list_m_str_i[i] ** (2 - alpha_before) - M_cross ** (2 - alpha_before)
) / list_n_str_i[i + 1] + k_after / (2 - alpha_after) * (M_cross ** (2 - alpha_after
) - list_m_str_i[i + 1] ** (2 - alpha_after)) / list_n_str_i[i + 1]
list_M_str_i += [M_i]
new_i = i + 1
return
################# draw IMF without sampling #################
def k_str(M_str, M_ecl, I_str, M_L, alpha_1, M_turn, alpha_2, M_turn2, alpha_3, M_U):
global M_max, M_max_function, k3, k2, k1
M_max = 0
M_max_function = 0
function_M_max(M_ecl, I_str, M_L, alpha_1, M_turn, alpha_2, M_turn2, alpha_3, M_U)
k3 = 0
k2 = 0
k1 = 0
function_k321(I_str, alpha_1, M_turn, alpha_2, M_turn2, alpha_3, M_U)
return
x_IMF = []
y_IMF = []
def function_draw_xi_str(M_str, M_ecl, I_str, M_L, alpha_1, M_turn, alpha_2, M_turn2, alpha_3, M_U):
global x_IMF, y_IMF, k1, k2, k3, M_max
k_str(M_str, M_ecl, I_str, M_L, alpha_1, M_turn, alpha_2, M_turn2, alpha_3, M_U)
function_draw_xi_str_loop(M_str, alpha_1, M_turn, alpha_2, M_turn2, alpha_3)
return
def function_draw_xi_str_loop(M_str, alpha_1, M_turn, alpha_2, M_turn2, alpha_3):
global x_IMF, y_IMF, k1, k2, k3, M_max
while M_str < M_max:
x_IMF += [M_str]
if M_str > M_turn2:
xi = k3 * M_str ** (-alpha_3)
elif M_str > M_turn:
xi = k2 * M_str ** (-alpha_2)
else:
xi = k1 * M_str ** (-alpha_1)
y_IMF += [xi]
(M_str) = (1.02 * M_str)
return
########### alpha ###########
def Function_alpha_1_change(alpha_1, alpha1_model, M_over_H):
if (alpha1_model == 0):
return alpha_1
elif (alpha1_model == 1):
alpha_1_change = alpha_1 + 0.5 * M_over_H
return alpha_1_change
else:
print("alpha1_model: %s, do not exist.\nCheck file 'alpha1.py'" % (alpha1_model))
return
def Function_alpha_2_change(alpha_2, alpha2_model, M_over_H):
if (alpha2_model == 0):
return alpha_2
elif (alpha2_model == 1):
alpha_2_change = alpha_2 + 0.5 * M_over_H
return alpha_2_change
else:
print("alpha2_model: %s, do not exist.\nCheck file 'alpha2.py'" % (alpha2_model))
return
def Function_alpha_3_change(alpha3_model, M_ecl, M_over_H):
if (alpha3_model == 0):
default_alpha3 = 2.3
# print("alpha_3 is set to be a constant: %s, as this is the default alpha_3 value for alpha3_model 0.\nFor more options regarding alpha_3 variation, please check file 'alpha3.py'" % (default_alpha3))
return default_alpha3
elif (alpha3_model == 1):
rho = 10 ** (0.61 * math.log(M_ecl, 10) + 2.85)
if rho < 9.5 * 10 ** 4:
alpha_3_change = 2.3
else:
alpha_3_change = 1.86 - 0.43 * math.log(rho / 10 ** 6, 10)
# print("Notification in file 'alpha3_model' uncompleted")
if alpha_3_change < 0.5:
print("IMF alpha_3 being", alpha_3_change, "out of the tested range from Marks et al. 2012.")
return alpha_3_change
elif (alpha3_model == 2):
rho = 10 ** (0.61 * math.log(M_ecl, 10) + 2.85)
x = -0.1405 * M_over_H + 0.99 * math.log(rho / 10 ** 6, 10)
if x < -0.87:
alpha_3_change = 2.3
else:
alpha_3_change = -0.41 * x + 1.94
# print("Notification in file 'alpha3_model' uncompleted")
return alpha_3_change
else:
# print("alpha_3 is set to be a constant: %s, as this is the input value of parameter 'alpha3_model'.\nFor more options regarding alpha_3 variation, please check file 'alpha3.py'" % (alpha3_model))
return alpha3_model
########## ECMF #########
# This part gives the cluster masses according to file "supplementary-document-galimf.pdf".
# The code is only valid when SFR > 3 * 10^(-10) solar / year.
# Inputs:
# SFR,delta_t, I, M_U, M_L, \beta
# step 1
# use equation 13 or 17
# give first integration limit m_1 i.e. M_max_ecl
# step 2
# use equation 10 or 14
# give k
# step 3
# use equation 21
# give every integration limit m_i and the number of stars in this region n_i
# step 4
# use equation 22 or 23
# give every cluster mass M_i
# Outputs:
# list of star mass "list_M_ecl_i"
# and the number of star with each mass "list_n_ecl_i"
################### sample cluster from ECMF #####################
resolution_cluster_relative = 0.001 # The mass resolution of a embedded cluster with mass M is: M * resolution_cluster_relative.
list_m_ecl_i = []
list_n_ecl_i = []
list_M_ecl_i = []
M_max_ecl = 0
def function_sample_from_ECMF(SFR, delta_t, I_ecl, M_U, M_L, beta):
global list_m_ecl_i, list_n_ecl_i, list_M_ecl_i, M_max_ecl, resolution_cluster_relative
M_tot = SFR * delta_t * 10**6 # units in Myr
if beta == 2:
M_max_ecl = 0
function_M_max_ecl_2(M_tot, I_ecl, M_U, M_L, 10**8, 10**7, -1) # equation 44
k = I_ecl/(1/M_max_ecl-1/M_U) # equation 41
list_m_ecl_i = [M_max_ecl]
list_n_ecl_i = []
function_m_i_ecl(M_max_ecl, M_L, k, beta, 1) # equation 48
list_M_ecl_i = []
length_n = len(list_n_ecl_i)
function_M_i_2(k, 0, length_n) # equation 50
else:
M_max_ecl = 0
function_M_max_ecl_not_2(M_tot, I_ecl, M_U, M_L, beta, 10**8, 10**7, -1) # equation 40
k = I_ecl * (1 - beta) / (M_U ** (1 - beta) - M_max_ecl ** (1 - beta)) # equation 37
list_m_ecl_i = [M_max_ecl]
list_n_ecl_i = []
function_m_i_ecl(M_max_ecl, M_L, k, beta, 1) # equation 48
list_M_ecl_i = []
length_n = len(list_n_ecl_i)
function_M_i_not_2(k, beta, 0, length_n) # equation 49
return
def function_M_max_ecl_2(M_tot, I_ecl, M_U, M_L, m_1, step, pm): # equation 44
m_1 = round(m_1, 10) # round makes the code only valid when SFR > 3 * 10^(-10) solar / year
M_x = I_ecl * (math.log(m_1) - math.log(M_L)) / (1 / m_1 - 1 / M_U)
if M_tot * (1. + 10 ** (-5)) > M_x > M_tot * (1- 10 ** (-5)):
global M_max_ecl
M_max_ecl = m_1
elif m_1 - step < M_L or m_1 + step > M_U:
function_M_max_ecl_2(M_tot, I_ecl, M_U, M_L, m_1, step/10, pm)
elif M_x > M_tot and pm == -1:
function_M_max_ecl_2(M_tot, I_ecl, M_U, M_L, m_1 - step, step, -1)
elif M_x > M_tot and pm == 1:
function_M_max_ecl_2(M_tot, I_ecl, M_U, M_L, m_1 - step/10, step/10, -1)
elif M_x < M_tot and pm == 1:
function_M_max_ecl_2(M_tot, I_ecl, M_U, M_L, m_1 + step, step, 1)
elif M_x < M_tot and pm == -1:
function_M_max_ecl_2(M_tot, I_ecl, M_U, M_L, m_1 + step/10, step/10, 1)
def function_M_max_ecl_not_2(M_tot, I_ecl, M_U, M_L, beta, m_1, step, pm): # equation 40
m_1 = round(m_1, 10) # round makes the code only valid when SFR > 3 * 10^(-10) solar / year
M_x = I_ecl * (1 - beta) / (2 - beta) * (m_1 ** (2 - beta) - M_L ** (2 - beta)) / (
M_U ** (1 - beta) - m_1 ** (1 - beta))
if M_tot * (1.+10**(-5)) > M_x > M_tot * (1-10**(-5)):
global M_max_ecl
M_max_ecl = m_1
elif m_1 - step <= M_L or m_1 + step >= M_U:
function_M_max_ecl_not_2(M_tot, I_ecl, M_U, M_L, beta, m_1, step/2, pm)
elif M_x > M_tot and pm == -1:
function_M_max_ecl_not_2(M_tot, I_ecl, M_U, M_L, beta, m_1 - step, step, -1)
elif M_x > M_tot and pm == 1:
function_M_max_ecl_not_2(M_tot, I_ecl, M_U, M_L, beta, m_1 - step/2, step/2, -1)
elif M_x < M_tot and pm == 1:
function_M_max_ecl_not_2(M_tot, I_ecl, M_U, M_L, beta, m_1 + step, step, 1)
elif M_x < M_tot and pm == -1:
function_M_max_ecl_not_2(M_tot, I_ecl, M_U, M_L, beta, m_1 + step/2, step/2, 1)
def function_m_i_ecl(m_i, M_L, k, beta, n_i): # equation 48
while m_i > M_L:
global list_m_ecl_i, list_n_ecl_i, resolution_cluster_relative
m_i_plus_n = (m_i**(1-beta) - n_i * (1-beta) / k)**(1/(1-beta))
if m_i_plus_n < M_L:
list_m_ecl_i += [M_L]
n_L = int((m_i**(1-beta) - M_L**(1-beta)) * k / (1-beta))
if n_L == 0:
return
else:
list_n_ecl_i += [n_L]
return
elif m_i - m_i_plus_n > resolution_cluster_relative * m_i:
list_m_ecl_i += [m_i_plus_n]
list_n_ecl_i += [n_i]
(m_i, n_i) = (m_i_plus_n, n_i)
else:
(m_i_plus_n_new, n_new) = function_get_n_new_ecl(m_i, k, beta, m_i_plus_n, n_i)
list_m_ecl_i += [m_i_plus_n_new]
list_n_ecl_i += [n_new]
(m_i, n_i) = (m_i_plus_n_new, n_new)
return
def function_get_n_new_ecl(m_i, k, beta, m_i_plus_n, n_i):
while m_i - m_i_plus_n < resolution_cluster_relative * m_i:
n_new = round(n_i * 1.05 + 1)
m_i_plus_n_new = (m_i ** (1 - beta) - n_new * (1 - beta) / k) ** (1 / (1 - beta))
(m_i_plus_n, n_i) = (m_i_plus_n_new, n_new)
return m_i_plus_n, n_i
def function_M_i_2(k, i, length_n): # equation 50
while i < length_n:
global list_m_ecl_i, list_n_ecl_i, list_M_ecl_i
M_i = k * (math.log(list_m_ecl_i[i]) - math.log(list_m_ecl_i[i+1])) / list_n_ecl_i[i]
list_M_ecl_i += [M_i]
(i) = (i+1)
return
def function_M_i_not_2(k, beta, i, length_n): # equation 49
while i < length_n:
global list_m_ecl_i, list_n_ecl_i, list_M_ecl_i
M_i = k / (2-beta) * (list_m_ecl_i[i]**(2-beta)-list_m_ecl_i[i+1]**(2-beta)) / list_n_ecl_i[i]
list_M_ecl_i += [M_i]
(i) = (i+1)
return
################### draw ECMF without sampling #####################
def k_ecl(M_ecl, SFR, delta_t, I_ecl, M_U, M_L, beta):
global M_max_ecl
M_tot = SFR * delta_t * 10 ** 6 # units in Myr
if beta == 2:
M_max_ecl = 0
function_M_max_ecl_2(M_tot, I_ecl, M_U, M_L, 10**8, 10**7, -1) # equation 44
k = I_ecl/(1/M_max_ecl-1/M_U) # equation 41
else:
M_max_ecl = 0
function_M_max_ecl_not_2(M_tot, I_ecl, M_U, M_L, beta, M_U/10, M_U/100, -1) # equation 40
k = I_ecl * (1 - beta) / (M_U ** (1 - beta) - M_max_ecl ** (1 - beta)) # equation 37
return k
x_ECMF = []
y_ECMF = []
def function_draw_xi_ecl(M_ecl, SFR, delta_t, I_ecl, M_U, M_L, beta):
global x_ECMF, y_ECMF
k = k_ecl(M_ecl, SFR, delta_t, I_ecl, M_U, M_L, beta)
function_draw_xi_ecl_loop(M_ecl, k, M_U, beta)
x_ECMF = [x_ECMF[0]] + x_ECMF
x_ECMF += [x_ECMF[-1]]
y_ECMF = [0.000000001] + y_ECMF
y_ECMF += [0.000000001]
return
def function_draw_xi_ecl_loop(M_ecl, k, M_U, beta):
while M_ecl < M_max_ecl:
global x_ECMF, y_ECMF
x_ECMF += [M_ecl]
xi = k * M_ecl ** (-beta)
y_ECMF += [xi]
(M_ecl) = (1.01 * M_ecl)
return
########## beta ###########
def Function_beta_change(beta_model, SFR, M_over_H):
if (beta_model == 0):
default_beta = 2.00000001
return default_beta
elif (beta_model == 1):
beta_change = -0.106 * math.log(SFR, 10) + 2.000001 #+ 0.5*M_over_H
if beta_change < 1.5:
beta_change = 1.5
elif beta_change > 2.5:
beta_change = 2.5
# print("ECMF-beta =", beta_change)
return beta_change
elif (beta_model == 2):
if SFR > 1:
beta_change = -0.106 * math.log(SFR, 10) + 2.00000001
else:
beta_change = 2.0000001
return beta_change
else:
return beta_model
| 49,314 | 40.934524 | 208 | py |
galIMF | galIMF-master/read_yield_table.py | import time
import numpy as np
import math
import matplotlib.pyplot as plt
def function_read_file(yield_table_name):
####################
### read in file ###
####################
if yield_table_name == "portinari98":
file_yield = open(
'yield_tables/agb_and_massive_stars_portinari98_marigo01_gce_totalyields.txt', 'r')
# Use net yields of Portinari and Marigo
# Net yields with masses up to 7Msun are from Marigo, above those of Portinari are taken.
# Only isotopes are selected which are available in both yield sets and go up to Fe.
# Initial masses go from the lowest mass available up to 100Msun. (100!!!)
# Yield set ID M01P98 in Ritter et al. 2017.
# References: Marigo et al. 2001, http://ukads.nottingham.ac.uk/abs/2001A%26A...370..194M
# Portinari et al. 1998, http://ukads.nottingham.ac.uk/abs/1998A%26A...334..505P
data = file_yield.readlines()
file_yield.close()
elif yield_table_name == "Kobayashi06":
file_yield = open(
'yield_tables/agb_and_massive_stars_Kobayashi06_marigo01_gce_totalyields.txt', 'r')
# Use net yields of Kobayashi C., Umeda H., Nomoto K., Tominaga N., Ohkubo T., 2006, ApJ, 653, 1145
data = file_yield.readlines()
file_yield.close()
elif yield_table_name == "WW95":
file_yield = open(
'yield_tables/massive_stars_WW95_totalyields.txt', 'r')
# Use net yields of Woosley S. E., Weaver T. A., 1995, ApJS, 101, 181 (WW95)
# Use WW95 model B which has the highest [Mg/Fe].
data = file_yield.readlines()
file_yield.close()
elif yield_table_name == "marigo01":
file_yield = open(
'yield_tables/agb_marigo01_totalyields.txt', 'r')
data = file_yield.readlines()
file_yield.close()
###########################
### extract information ###
###########################
#
H_relative_line_number = function_get_element_line_number(data, 'H-1')
He_relative_line_number = function_get_element_line_number(data, 'He-4')
C_relative_line_number = function_get_element_line_number(data, 'C-12')
N_relative_line_number = function_get_element_line_number(data, 'N-14')
O_relative_line_number = function_get_element_line_number(data, 'O-16')
Ne_relative_line_number = function_get_element_line_number(data, 'Ne-20')
Mg_relative_line_number = function_get_element_line_number(data, 'Mg-24')
Si_relative_line_number = function_get_element_line_number(data, 'Si-28')
S_relative_line_number = function_get_element_line_number(data, 'S-32')
Ca_relative_line_number = function_get_element_line_number(data, 'Ca-40')
Fe_relative_line_number = function_get_element_line_number(data, 'Fe-56')
#
global M_list, Z_list, eject_mass_list, H_eject_mass_list, He_eject_mass_list, C_eject_mass_list, \
N_eject_mass_list, O_eject_mass_list, Ne_eject_mass_list, Mg_eject_mass_list, Si_eject_mass_list, \
S_eject_mass_list, Ca_eject_mass_list, Fe_eject_mass_list, Metal_eject_mass_list
global O_over_Mg_list, Mg_over_Fe_list, Mg_over_H_list, Fe_over_H_list, O_over_H_list, Z_over_H_list, O_over_Fe_list
#
i = 0
while i < len(data):
line_i = str.split(data[i])
if line_i[1] == 'Table:': # Here select the lines being: ['H', 'Table:', '(M=xxx,Z=xxx)']
line_H = str.split(data[i + H_relative_line_number])
line_He = str.split(data[i + He_relative_line_number])
line_C = str.split(data[i + C_relative_line_number])
line_N = str.split(data[i + N_relative_line_number])
line_O = str.split(data[i + O_relative_line_number])
line_Ne = str.split(data[i + Ne_relative_line_number])
line_Mg = str.split(data[i + Mg_relative_line_number])
line_Si = str.split(data[i + Si_relative_line_number])
line_S = str.split(data[i + S_relative_line_number])
line_Ca = str.split(data[i + Ca_relative_line_number])
line_Fe = str.split(data[i + Fe_relative_line_number])
line_Lifetime = str.split(data[i + 1])
Lifetime = function_get_Mfinal_and_Lifetime(line_Lifetime[2])
line_Mfinal = str.split(data[i + 2])
Mfinal = function_get_Mfinal_and_Lifetime(line_Mfinal[2])
(Z_ini, M_ini) = function_get_Z_M(line_i[2]) # get the initial mass and metallicity of the star
ejecta_mass = round((M_ini - Mfinal), 5) ####################
H_mass = function_get_element_mass(line_H[1])
He_mass = function_get_element_mass(line_He[1])
C_mass = function_get_element_mass(line_C[1])
N_mass = function_get_element_mass(line_N[1])
O_mass = function_get_element_mass(line_O[1])
Ne_mass = function_get_element_mass(line_Ne[1])
Mg_mass = function_get_element_mass(line_Mg[1])
Si_mass = function_get_element_mass(line_Si[1])
S_mass = function_get_element_mass(line_S[1])
Ca_mass = function_get_element_mass(line_Ca[1])
Fe_mass = function_get_element_mass(line_Fe[1])
H_num = H_mass/1.0079
O_num = O_mass/15.9994
Mg_num = Mg_mass/24.305
Fe_num = Fe_mass/55.845
O_over_Mg = math.log(O_num/Mg_num, 10) - 8.69 + 7.60
Mg_over_H = math.log(Mg_num/H_num, 10) - 7.60 + 12
Fe_over_H = math.log(Fe_num/H_num, 10) - 7.50 + 12
O_over_H = math.log(O_num/H_num, 10) - 8.69 + 12
Mg_over_Fe = math.log(Mg_num/Fe_num, 10) - 7.60 + 7.50
O_over_Fe = math.log(O_num/Fe_num, 10) - 8.69 + 7.50
Metal_mass = round((ejecta_mass - H_mass - He_mass), 5) ####################
if Metal_mass<0:
print("Warning: Metal_mass=", Metal_mass, "<0")
print("check stellar yield table with metallicity and mass being:", Z_ini, "&", M_ini)
Metal_mass = 0
Z_over_H = math.log(Metal_mass / H_mass, 10) - math.log(0.0134 / 0.7381, 10)
if len(Z_list) == 0:
Z_list.append(Z_ini)
Z_n = 0
M_list.append([])
eject_mass_list.append([])
H_eject_mass_list.append([])
He_eject_mass_list.append([])
C_eject_mass_list.append([])
N_eject_mass_list.append([])
O_eject_mass_list.append([])
Ne_eject_mass_list.append([])
Mg_eject_mass_list.append([])
Si_eject_mass_list.append([])
S_eject_mass_list.append([])
Ca_eject_mass_list.append([])
Fe_eject_mass_list.append([])
Metal_eject_mass_list.append([])
Z_over_H_list.append([])
O_over_Mg_list.append([])
Mg_over_Fe_list.append([])
Mg_over_H_list.append([])
Fe_over_H_list.append([])
O_over_H_list.append([])
O_over_Fe_list.append([])
if Z_ini != Z_list[-1]:
Z_list.append(Z_ini)
Z_n += 1
M_list.append([])
eject_mass_list.append([])
H_eject_mass_list.append([])
He_eject_mass_list.append([])
C_eject_mass_list.append([])
N_eject_mass_list.append([])
O_eject_mass_list.append([])
Ne_eject_mass_list.append([])
Mg_eject_mass_list.append([])
Si_eject_mass_list.append([])
S_eject_mass_list.append([])
Ca_eject_mass_list.append([])
Fe_eject_mass_list.append([])
Metal_eject_mass_list.append([])
O_over_Mg_list.append([])
Mg_over_Fe_list.append([])
Mg_over_H_list.append([])
Fe_over_H_list.append([])
O_over_H_list.append([])
Z_over_H_list.append([])
O_over_Fe_list.append([])
M_list[Z_n].append(M_ini)
eject_mass_list[Z_n].append(ejecta_mass)
H_eject_mass_list[Z_n].append(H_mass)
He_eject_mass_list[Z_n].append(He_mass)
C_eject_mass_list[Z_n].append(C_mass)
N_eject_mass_list[Z_n].append(N_mass)
O_eject_mass_list[Z_n].append(O_mass)
Ne_eject_mass_list[Z_n].append(Ne_mass)
Mg_eject_mass_list[Z_n].append(Mg_mass)
Si_eject_mass_list[Z_n].append(Si_mass)
S_eject_mass_list[Z_n].append(S_mass)
Ca_eject_mass_list[Z_n].append(Ca_mass)
Fe_eject_mass_list[Z_n].append(Fe_mass)
Metal_eject_mass_list[Z_n].append(Metal_mass)
O_over_Mg_list[Z_n].append(O_over_Mg)
Mg_over_Fe_list[Z_n].append(Mg_over_Fe)
Mg_over_H_list[Z_n].append(Mg_over_H)
O_over_H_list[Z_n].append(O_over_H)
Z_over_H_list[Z_n].append(Z_over_H)
Fe_over_H_list[Z_n].append(Fe_over_H)
O_over_Fe_list[Z_n].append(O_over_Fe)
(i) = (i + 1)
# print(Z_list)
# print(M_list)
# print(eject_mass_list)
# print(H_eject_mass_list)
# print(Fe_eject_mass_list)
###########################
### write data to files ###
###########################
write_data()
return
def lindexsplit(List,*lindex):
index = list(lindex)
index.sort()
templist1 = []
templist2 = []
templist3 = []
breakcounter = 0
itemcounter = 0
finalcounter = 0
numberofbreaks = len(index)
totalitems = len(List)
lastindexval = index[(len(index)-1)]
finalcounttrigger = (totalitems-(lastindexval+1))
for item in List:
itemcounter += 1
indexofitem = itemcounter - 1
nextbreakindex = index[breakcounter]
#Less than the last cut
if breakcounter <= numberofbreaks:
if indexofitem < nextbreakindex:
templist1.append(item)
elif breakcounter < (numberofbreaks - 1):
templist1.append(item)
templist2.append(templist1)
templist1 = []
breakcounter +=1
else:
if indexofitem <= lastindexval and indexofitem <= totalitems:
templist1.append(item)
templist2.append(templist1)
templist1 = []
else:
if indexofitem >= lastindexval and indexofitem < totalitems + 1:
finalcounter += 1
templist3.append(item)
if finalcounter == finalcounttrigger:
templist2.append(templist3)
return templist2
def function_get_mass_grid(): # read in a grid from 0.08 to 150 Msun
file_lifetime = open(
'yield_tables/rearranged___/setllar_lifetime_from_portinari98/portinari98_Z=0.0004.txt', 'r')
data = file_lifetime.readlines()
mass = data[3]
file_lifetime.close()
mass_grid = [float(x) for x in mass.split()]
return mass_grid
def write_data():
global M_list, Z_list, eject_mass_list, H_eject_mass_list, He_eject_mass_list, C_eject_mass_list, \
N_eject_mass_list, O_eject_mass_list, Ne_eject_mass_list, Mg_eject_mass_list, Si_eject_mass_list, \
S_eject_mass_list, Ca_eject_mass_list, Fe_eject_mass_list, Metal_eject_mass_list
mass_grid = function_get_mass_grid()
splitted_mass_grid = lindexsplit(mass_grid, 153)
for Z in range(len(Z_list)):
metallicity = Z_list[Z]
mass = M_list[Z]
eject_mass = eject_mass_list[Z]
H_eject_mass = H_eject_mass_list[Z]
He_eject_mass = He_eject_mass_list[Z]
C_eject_mass = C_eject_mass_list[Z]
N_eject_mass = N_eject_mass_list[Z]
O_eject_mass = O_eject_mass_list[Z]
Ne_eject_mass = Ne_eject_mass_list[Z]
Mg_eject_mass = Mg_eject_mass_list[Z]
Si_eject_mass = Si_eject_mass_list[Z]
S_eject_mass = S_eject_mass_list[Z]
Ca_eject_mass = Ca_eject_mass_list[Z]
Fe_eject_mass = Fe_eject_mass_list[Z]
Metal_eject_mass = Metal_eject_mass_list[Z]
### Interpolate the metal yield ###
# # portinari98 or marigo01:
# eject_mass = np.interp(mass_grid, mass, eject_mass).tolist()
# H_eject_mass = np.interp(mass_grid, mass, H_eject_mass).tolist()
# He_eject_mass = np.interp(mass_grid, mass, He_eject_mass).tolist()
# C_eject_mass = np.interp(mass_grid, mass, C_eject_mass).tolist()
# N_eject_mass = np.interp(mass_grid, mass, N_eject_mass).tolist()
# O_eject_mass = np.interp(mass_grid, mass, O_eject_mass).tolist()
# Ne_eject_mass = np.interp(mass_grid, mass, Ne_eject_mass).tolist()
# Mg_eject_mass = np.interp(mass_grid, mass, Mg_eject_mass).tolist()
# Si_eject_mass = np.interp(mass_grid, mass, Si_eject_mass).tolist()
# S_eject_mass = np.interp(mass_grid, mass, S_eject_mass).tolist()
# Ca_eject_mass = np.interp(mass_grid, mass, Ca_eject_mass).tolist()
# Metal_eject_mass = np.interp(mass_grid, mass, Metal_eject_mass).tolist()
# Fe_eject_mass = np.interp(mass_grid, mass, Fe_eject_mass).tolist()
# # WW95
eject_mass_low = np.interp(splitted_mass_grid[0], [1], [0]).tolist()
H_eject_mass_low = np.interp(splitted_mass_grid[0], [1], [0]).tolist()
He_eject_mass_low = np.interp(splitted_mass_grid[0], [1], [0]).tolist()
C_eject_mass_low = np.interp(splitted_mass_grid[0], [1], [0]).tolist()
N_eject_mass_low = np.interp(splitted_mass_grid[0], [1], [0]).tolist()
O_eject_mass_low = np.interp(splitted_mass_grid[0], [1], [0]).tolist()
Ne_eject_mass_low = np.interp(splitted_mass_grid[0], [1], [0]).tolist()
Mg_eject_mass_low = np.interp(splitted_mass_grid[0], [1], [0]).tolist()
Si_eject_mass_low = np.interp(splitted_mass_grid[0], [1], [0]).tolist()
S_eject_mass_low = np.interp(splitted_mass_grid[0], [1], [0]).tolist()
Ca_eject_mass_low = np.interp(splitted_mass_grid[0], [1], [0]).tolist()
Metal_eject_mass_low = np.interp(splitted_mass_grid[0], [1], [0]).tolist()
Fe_eject_mass_low = np.interp(splitted_mass_grid[0], [1], [0]).tolist()
eject_mass_high = np.interp(splitted_mass_grid[1], mass, eject_mass).tolist()
H_eject_mass_high = np.interp(splitted_mass_grid[1], mass, H_eject_mass).tolist()
He_eject_mass_high = np.interp(splitted_mass_grid[1], mass, He_eject_mass).tolist()
C_eject_mass_high = np.interp(splitted_mass_grid[1], mass, C_eject_mass).tolist()
N_eject_mass_high = np.interp(splitted_mass_grid[1], mass, N_eject_mass).tolist()
O_eject_mass_high = np.interp(splitted_mass_grid[1], mass, O_eject_mass).tolist()
Ne_eject_mass_high = np.interp(splitted_mass_grid[1], mass, Ne_eject_mass).tolist()
Mg_eject_mass_high = np.interp(splitted_mass_grid[1], mass, Mg_eject_mass).tolist()
Si_eject_mass_high = np.interp(splitted_mass_grid[1], mass, Si_eject_mass).tolist()
S_eject_mass_high = np.interp(splitted_mass_grid[1], mass, S_eject_mass).tolist()
Ca_eject_mass_high = np.interp(splitted_mass_grid[1], mass,Ca_eject_mass).tolist()
Metal_eject_mass_high = np.interp(splitted_mass_grid[1], mass, Metal_eject_mass).tolist()
Fe_eject_mass_high = np.interp(splitted_mass_grid[1], mass, Fe_eject_mass).tolist()
eject_mass = eject_mass_low + eject_mass_high
H_eject_mass = H_eject_mass_low + H_eject_mass_high
He_eject_mass = He_eject_mass_low + He_eject_mass_high
C_eject_mass = C_eject_mass_low + C_eject_mass_high
N_eject_mass = N_eject_mass_low + N_eject_mass_high
O_eject_mass = O_eject_mass_low + O_eject_mass_high
Ne_eject_mass = Ne_eject_mass_low + Ne_eject_mass_high
Mg_eject_mass = Mg_eject_mass_low + Mg_eject_mass_high
Si_eject_mass = Si_eject_mass_low + Si_eject_mass_high
S_eject_mass = S_eject_mass_low + S_eject_mass_high
Ca_eject_mass = Ca_eject_mass_low + Ca_eject_mass_high
Metal_eject_mass = Metal_eject_mass_low + Metal_eject_mass_high
Fe_eject_mass = Fe_eject_mass_low + Fe_eject_mass_high
# write file eject_mass
out_eject_mass = '# metallicity\n{}\n# mass\n'.format(metallicity)
for n in range(len(mass_grid)):
out_eject_mass += '{} '.format(mass_grid[n])
out_eject_mass += '\n# eject_mass\n'
for n in range(len(eject_mass)):
out_eject_mass += '{} '.format(eject_mass[n])
name_eject_mass = 'yield_tables/rearranged___/setllar_eject_mass_from_{}/{}_Z={}.txt'.format(yield_table_name, yield_table_name, metallicity)
file_eject_mass = open(name_eject_mass, 'w')
file_eject_mass.write(out_eject_mass)
file_eject_mass.close()
# write file H_eject_mass
out_H_eject_mass = '# metallicity\n{}\n# mass\n'.format(metallicity)
for n in range(len(mass_grid)):
out_H_eject_mass += '{} '.format(mass_grid[n])
out_H_eject_mass += '\n# H_eject_mass\n'
for n in range(len(H_eject_mass)):
out_H_eject_mass += '{} '.format(H_eject_mass[n])
name_H_eject_mass = 'yield_tables/rearranged___/setllar_H_eject_mass_from_{}/{}_Z={}.txt'.format(yield_table_name, yield_table_name, metallicity)
file_H_eject_mass = open(name_H_eject_mass, 'w')
file_H_eject_mass.write(out_H_eject_mass)
file_H_eject_mass.close()
# write file He_eject_mass
out_He_eject_mass = '# metallicity\n{}\n# mass\n'.format(metallicity)
for n in range(len(mass_grid)):
out_He_eject_mass += '{} '.format(mass_grid[n])
out_He_eject_mass += '\n# He_eject_mass\n'
for n in range(len(He_eject_mass)):
out_He_eject_mass += '{} '.format(He_eject_mass[n])
name_He_eject_mass = 'yield_tables/rearranged___/setllar_He_eject_mass_from_{}/{}_Z={}.txt'.format(yield_table_name, yield_table_name, metallicity)
file_He_eject_mass = open(name_He_eject_mass, 'w')
file_He_eject_mass.write(out_He_eject_mass)
file_He_eject_mass.close()
# write file C_eject_mass
out_C_eject_mass = '# metallicity\n{}\n# mass\n'.format(metallicity)
for n in range(len(mass_grid)):
out_C_eject_mass += '{} '.format(mass_grid[n])
out_C_eject_mass += '\n# C_eject_mass\n'
for n in range(len(C_eject_mass)):
out_C_eject_mass += '{} '.format(C_eject_mass[n])
name_C_eject_mass = 'yield_tables/rearranged___/setllar_C_eject_mass_from_{}/{}_Z={}.txt'.format(yield_table_name, yield_table_name, metallicity)
file_C_eject_mass = open(name_C_eject_mass, 'w')
file_C_eject_mass.write(out_C_eject_mass)
file_C_eject_mass.close()
# write file N_eject_mass
out_N_eject_mass = '# metallicity\n{}\n# mass\n'.format(metallicity)
for n in range(len(mass_grid)):
out_N_eject_mass += '{} '.format(mass_grid[n])
out_N_eject_mass += '\n# N_eject_mass\n'
for n in range(len(N_eject_mass)):
out_N_eject_mass += '{} '.format(N_eject_mass[n])
name_N_eject_mass = 'yield_tables/rearranged___/setllar_N_eject_mass_from_{}/{}_Z={}.txt'.format(yield_table_name, yield_table_name, metallicity)
file_N_eject_mass = open(name_N_eject_mass, 'w')
file_N_eject_mass.write(out_N_eject_mass)
file_N_eject_mass.close()
# write file O_eject_mass
out_O_eject_mass = '# metallicity\n{}\n# mass\n'.format(metallicity)
for n in range(len(mass_grid)):
out_O_eject_mass += '{} '.format(mass_grid[n])
out_O_eject_mass += '\n# O_eject_mass\n'
for n in range(len(O_eject_mass)):
out_O_eject_mass += '{} '.format(O_eject_mass[n])
name_O_eject_mass = 'yield_tables/rearranged___/setllar_O_eject_mass_from_{}/{}_Z={}.txt'.format(yield_table_name, yield_table_name, metallicity)
file_O_eject_mass = open(name_O_eject_mass, 'w')
file_O_eject_mass.write(out_O_eject_mass)
file_O_eject_mass.close()
# write file Ne_eject_mass
out_Ne_eject_mass = '# metallicity\n{}\n# mass\n'.format(metallicity)
for n in range(len(mass_grid)):
out_Ne_eject_mass += '{} '.format(mass_grid[n])
out_Ne_eject_mass += '\n# Ne_eject_mass\n'
for n in range(len(Ne_eject_mass)):
out_Ne_eject_mass += '{} '.format(Ne_eject_mass[n])
name_Ne_eject_mass = 'yield_tables/rearranged___/setllar_Ne_eject_mass_from_{}/{}_Z={}.txt'.format(yield_table_name, yield_table_name, metallicity)
file_Ne_eject_mass = open(name_Ne_eject_mass, 'w')
file_Ne_eject_mass.write(out_Ne_eject_mass)
file_Ne_eject_mass.close()
# write file Mg_eject_mass
out_Mg_eject_mass = '# metallicity\n{}\n# mass\n'.format(metallicity)
for n in range(len(mass_grid)):
out_Mg_eject_mass += '{} '.format(mass_grid[n])
out_Mg_eject_mass += '\n# Mg_eject_mass\n'
for n in range(len(Mg_eject_mass)):
out_Mg_eject_mass += '{} '.format(Mg_eject_mass[n])
name_Mg_eject_mass = 'yield_tables/rearranged___/setllar_Mg_eject_mass_from_{}/{}_Z={}.txt'.format(yield_table_name, yield_table_name, metallicity)
file_Mg_eject_mass = open(name_Mg_eject_mass, 'w')
file_Mg_eject_mass.write(out_Mg_eject_mass)
file_Mg_eject_mass.close()
# write file Si_eject_mass
out_Si_eject_mass = '# metallicity\n{}\n# mass\n'.format(metallicity)
for n in range(len(mass_grid)):
out_Si_eject_mass += '{} '.format(mass_grid[n])
out_Si_eject_mass += '\n# Si_eject_mass\n'
for n in range(len(Si_eject_mass)):
out_Si_eject_mass += '{} '.format(Si_eject_mass[n])
name_Si_eject_mass = 'yield_tables/rearranged___/setllar_Si_eject_mass_from_{}/{}_Z={}.txt'.format(yield_table_name, yield_table_name, metallicity)
file_Si_eject_mass = open(name_Si_eject_mass, 'w')
file_Si_eject_mass.write(out_Si_eject_mass)
file_Si_eject_mass.close()
# write file S_eject_mass
out_S_eject_mass = '# metallicity\n{}\n# mass\n'.format(metallicity)
for n in range(len(mass_grid)):
out_S_eject_mass += '{} '.format(mass_grid[n])
out_S_eject_mass += '\n# S_eject_mass\n'
for n in range(len(S_eject_mass)):
out_S_eject_mass += '{} '.format(S_eject_mass[n])
name_S_eject_mass = 'yield_tables/rearranged___/setllar_S_eject_mass_from_{}/{}_Z={}.txt'.format(yield_table_name, yield_table_name, metallicity)
file_S_eject_mass = open(name_S_eject_mass, 'w')
file_S_eject_mass.write(out_S_eject_mass)
file_S_eject_mass.close()
# write file Ca_eject_mass
out_Ca_eject_mass = '# metallicity\n{}\n# mass\n'.format(metallicity)
for n in range(len(mass_grid)):
out_Ca_eject_mass += '{} '.format(mass_grid[n])
out_Ca_eject_mass += '\n# Ca_eject_mass\n'
for n in range(len(Ca_eject_mass)):
out_Ca_eject_mass += '{} '.format(Ca_eject_mass[n])
name_Ca_eject_mass = 'yield_tables/rearranged___/setllar_Ca_eject_mass_from_{}/{}_Z={}.txt'.format(yield_table_name, yield_table_name, metallicity)
file_Ca_eject_mass = open(name_Ca_eject_mass, 'w')
file_Ca_eject_mass.write(out_Ca_eject_mass)
file_Ca_eject_mass.close()
# write file Fe_eject_mass
out_Fe_eject_mass = '# metallicity\n{}\n# mass\n'.format(metallicity)
for n in range(len(mass_grid)):
out_Fe_eject_mass += '{} '.format(mass_grid[n])
out_Fe_eject_mass += '\n# Fe_eject_mass\n'
for n in range(len(Fe_eject_mass)):
out_Fe_eject_mass += '{} '.format(Fe_eject_mass[n])
name_Fe_eject_mass = 'yield_tables/rearranged___/setllar_Fe_eject_mass_from_{}/{}_Z={}.txt'.format(yield_table_name, yield_table_name, metallicity)
file_Fe_eject_mass = open(name_Fe_eject_mass, 'w')
file_Fe_eject_mass.write(out_Fe_eject_mass)
file_Fe_eject_mass.close()
# write file Metal_eject_mass
out_Metal_eject_mass = '# metallicity\n{}\n# mass\n'.format(metallicity)
for n in range(len(mass_grid)):
out_Metal_eject_mass += '{} '.format(mass_grid[n])
out_Metal_eject_mass += '\n# Metal_eject_mass\n'
for n in range(len(Metal_eject_mass)):
out_Metal_eject_mass += '{} '.format(Metal_eject_mass[n])
name_Metal_eject_mass = 'yield_tables/rearranged___/setllar_Metal_eject_mass_from_{}/{}_Z={}.txt'.format(yield_table_name, yield_table_name, metallicity)
file_Metal_eject_mass = open(name_Metal_eject_mass, 'w')
file_Metal_eject_mass.write(out_Metal_eject_mass)
file_Metal_eject_mass.close()
print('yield_tables/rearranged___/setllar_...eject_mass_from_{}/{}_Z=....txt saved'.format(yield_table_name, yield_table_name))
return
def function_get_Mfinal_and_Lifetime(input_string):
i_end = len(input_string)
i = 0
in_str = ''
while i < i_end:
in_str += input_string[i]
(i) = (i + 1)
output = float(in_str)
return output
def function_get_element_mass(element_mass_string):
i_end = len(element_mass_string)
i = 1
mass_str = ''
while i < i_end:
mass_str += element_mass_string[i]
(i) = (i + 1)
mass = float(mass_str)
return mass
def function_get_element_line_number(data, element):
i = 0
while i < len(data):
line_i = str.split(data[i])
if line_i[1] == 'Table:':
start = i
j = 0
while j < 100:
line_j = str.split(data[j])
if line_j[0] == '&'+element:
end = j
element_relative_line_number = j - i
break
(j) = (j+1)
break
(i) = (i + 1)
return element_relative_line_number
def function_get_Z_M(M_Z_string):
i = 0
i_M_start = 0
i_M_end = 0
i_Z_start = 0
i_Z_end = 0
while i < len(M_Z_string):
if M_Z_string[i] == 'M':
i_M_start = i+2
if M_Z_string[i] == ',':
i_M_end = i
i_Z_start = i+3
if M_Z_string[i] == ')':
i_Z_end = i
(i) = (i+1)
i = i_Z_start
Z_str = ''
while i < i_Z_end:
Z_str += M_Z_string[i]
(i) = (i + 1)
Z = float(Z_str)
i = i_M_start
M_str = ''
while i < i_M_end:
M_str += M_Z_string[i]
(i) = (i + 1)
M = float(M_str)
return (Z, M)
def funtion_plot_yields():
global O_over_Mg_list, Mg_over_Fe_list, Mg_over_H_list, Fe_over_H_list, O_over_H_list, Z_over_H_list, O_over_Fe_list, M_list, Z_list
j = 0
while j < len(M_list):
i = 0
while i < len(M_list[j]):
M_list[j][i] = math.log(M_list[j][i], 10)
(i) = (i+1)
(j) = (j+1)
plt.rc('font', family='serif')
plt.rc('xtick', labelsize='x-small')
plt.rc('ytick', labelsize='x-small')
fig = plt.figure(1, figsize=(6, 5.25))
ax = fig.add_subplot(1, 1, 1)
plt.xlim(-0.5, 2.2)
plt.ylim(0, 2)
i = 0
while i < len(M_list):
plt.plot(M_list[i], O_over_Mg_list[i], label='Z={}'.format(Z_list[i]))
(i) = (i+1)
O_mass_eject_SNIa = 0.148 # TNH93 0.148 i99CDD1 0.09, i99CDD2 0.06, i99W7 0.14, ivo12/13 0.09-0.1, t03 0.14, t86 0.13
Mg_mass_eject_SNIa = 0.009 # TNH93 0.009 i99CDD1 0.0077, i99CDD2 0.0042, i99W7 0.0085, ivo12/13 0.015-0.029, t03 0.013, t86 0.016
O_num = O_mass_eject_SNIa / 15.9994
Mg_num = Mg_mass_eject_SNIa / 24.305
O_over_Mg_SNIa = math.log(O_num / Mg_num, 10) - 8.69 + 7.60
plt.plot([-0.3, 0.9], [O_over_Mg_SNIa, O_over_Mg_SNIa], ls="--", lw=2, label="SNIa")
plt.legend(prop={'size': 10}, loc='best')
plt.xlabel(r'log stellar mass [M$_\odot$]')
plt.ylabel(r'[O/Mg]=log($N_{O}/N_{Mg}$)-[O/Mg]$_\odot$')
plt.tight_layout()
plt.rc('font', family='serif')
plt.rc('xtick', labelsize='x-small')
plt.rc('ytick', labelsize='x-small')
fig = plt.figure(2, figsize=(6, 5.25))
ax = fig.add_subplot(1, 1, 1)
plt.xlim(-0.5, 2.2)
plt.ylim(-2, 7)
i = 0
while i < len(M_list):
plt.plot(M_list[i], Mg_over_Fe_list[i], label='Z={}'.format(Z_list[i]))
(i) = (i+1)
Mg_mass_eject_SNIa = 0.009 # TNH93 0.148 i99CDD1 0.09, i99CDD2 0.06, i99W7 0.14, ivo12/13 0.09-0.1, t03 0.14, t86 0.13
Fe_mass_eject_SNIa = 0.372 #0.63 # Recchi2009 halfed to 0.372 # TNH93 0.744 i99CDD1 0.56, i99CDD2 0.76, i99W7 0.63, ivo12/13 0.62-0.67, t03 0.74, t86 0.63
Mg_num = Mg_mass_eject_SNIa / 24.305
Fe_num = Fe_mass_eject_SNIa / 55.845
Mg_over_Fe_SNIa = math.log(Mg_num / Fe_num, 10) - 7.60 + 7.50
plt.plot([-0.3, 0.9], [Mg_over_Fe_SNIa, Mg_over_Fe_SNIa], ls="--", lw=2, label="SNIa")
plt.plot([-2, 3], [0, 0], lw=0.1)
plt.legend(prop={'size': 10}, loc='best')
plt.xlabel(r'log stellar mass [M$_\odot$]')
plt.ylabel(r'[Mg/Fe]=log($N_{Mg}/N_{Fe}$)-[Mg/Fe]$_\odot$')
plt.tight_layout()
plt.rc('font', family='serif')
plt.rc('xtick', labelsize='x-small')
plt.rc('ytick', labelsize='x-small')
fig = plt.figure(3, figsize=(6, 5.25))
ax = fig.add_subplot(1, 1, 1)
plt.xlim(-0.5, 2.2)
plt.ylim(-2, 7)
i = 0
while i < len(M_list):
plt.plot(M_list[i], O_over_Fe_list[i], label='Z={}'.format(Z_list[i]))
(i) = (i+1)
O_over_Fe_SNIa = math.log(O_num / Fe_num, 10) - 7.60 + 7.50
plt.plot([-0.3, 0.9], [O_over_Fe_SNIa, O_over_Fe_SNIa], ls="--", lw=2, label="SNIa")
plt.legend(prop={'size': 10}, loc='best')
plt.xlabel(r'log stellar mass [M$_\odot$]')
plt.ylabel(r'[O/Fe]=log($N_{O}/N_{Fe}$)-[O/Fe]$_\odot$')
plt.tight_layout()
plt.rc('font', family='serif')
plt.rc('xtick', labelsize='x-small')
plt.rc('ytick', labelsize='x-small')
fig = plt.figure(4, figsize=(6, 5.25))
ax = fig.add_subplot(1, 1, 1)
plt.xlim(-0.5, 2.2)
plt.ylim(-2, 2)
i = 0
while i < len(M_list):
plt.plot(M_list[i], Mg_over_H_list[i], label='Z={}'.format(Z_list[i]))
(i) = (i + 1)
plt.plot([-2, 3], [0, 0], lw=0.1)
plt.legend(prop={'size': 10}, loc='best')
plt.xlabel(r'log stellar mass [M$_\odot$]')
plt.ylabel(r'[Mg/H]=log($N_{Mg}/N_{H}$)-log(same)$_\odot$')
plt.tight_layout()
plt.rc('font', family='serif')
plt.rc('xtick', labelsize='x-small')
plt.rc('ytick', labelsize='x-small')
fig = plt.figure(5, figsize=(6, 5.25))
ax = fig.add_subplot(1, 1, 1)
plt.xlim(-0.5, 2.2)
plt.ylim(-2, 2)
i = 0
while i < len(M_list):
plt.plot(M_list[i], O_over_H_list[i], label='Z={}'.format(Z_list[i]))
(i) = (i + 1)
plt.plot([-2, 3], [0, 0], lw=0.1)
plt.legend(prop={'size': 10}, loc='best')
plt.xlabel(r'log stellar mass [M$_\odot$]')
plt.ylabel(r'[O/H]')
plt.tight_layout()
plt.rc('font', family='serif')
plt.rc('xtick', labelsize='x-small')
plt.rc('ytick', labelsize='x-small')
fig = plt.figure(6, figsize=(6, 5.25))
ax = fig.add_subplot(1, 1, 1)
plt.xlim(-0.5, 2.2)
plt.ylim(-2, 2)
i = 0
while i < len(M_list):
plt.plot(M_list[i], Fe_over_H_list[i], label='Z={}'.format(Z_list[i]))
(i) = (i + 1)
plt.plot([-2, 3], [0, 0], lw=0.1)
plt.legend(prop={'size': 10}, loc='best')
plt.xlabel(r'log stellar mass [M$_\odot$]')
plt.ylabel(r'[Fe/H]')
plt.tight_layout()
plt.rc('font', family='serif')
plt.rc('xtick', labelsize='x-small')
plt.rc('ytick', labelsize='x-small')
fig = plt.figure(7, figsize=(6, 5.25))
ax = fig.add_subplot(1, 1, 1)
plt.xlim(-0.5, 2.2)
plt.ylim(-2, 2)
i = 0
while i < len(M_list):
plt.plot(M_list[i], Z_over_H_list[i], label='Z={}'.format(Z_list[i]))
(i) = (i + 1)
plt.plot([-2, 3], [0, 0], lw=0.1)
plt.legend(prop={'size': 10}, loc='best')
plt.xlabel(r'log stellar mass [M$_\odot$]')
plt.ylabel(r'[Z/H]')
plt.tight_layout()
plt.show()
return
if __name__ == '__main__':
start_time = time.time()
Z_list = []
M_list = []
eject_mass_list = []
H_eject_mass_list = []
He_eject_mass_list = []
C_eject_mass_list = []
N_eject_mass_list = []
O_eject_mass_list = []
Ne_eject_mass_list = []
Mg_eject_mass_list = []
Si_eject_mass_list = []
S_eject_mass_list = []
Ca_eject_mass_list = []
Fe_eject_mass_list = []
Metal_eject_mass_list = []
O_over_Mg_list = []
Mg_over_H_list = []
Fe_over_H_list = []
O_over_H_list = []
Z_over_H_list = []
Mg_over_Fe_list = []
O_over_Fe_list = []
yield_table_name = "Kobayashi06" # being "WW95" or "portinari98" or "marigo01" or "Kobayashi06"
function_read_file(yield_table_name)
# funtion_plot_yields()
print(" - Run time: %s -" % round((time.time() - start_time), 2)) | 33,360 | 44.450954 | 161 | py |
galIMF | galIMF-master/element_abundances_solar.py | # This function returns the customary astronomical scale for logarithmic abundances of the sun,
# that is, log(N_X/N_H)+12
# reference:
# Asplund, Martin; Grevesse, Nicolas; Sauval, A. Jacques; Scott, Pat (2009). ARAA 47 (1): 481–522.
# Anders, E., & Grevesse, N. 1989 is applied in WW95, Geochim. Cosmochim. Acta, 53, 197
def function_solar_element_abundances(reference_name, element_name):
if reference_name == 'Anders1989':
if element_name == "H":
solar_element_abundances = 12
elif element_name == "He":
solar_element_abundances = 10.99
elif element_name == "C":
solar_element_abundances = 8.56
elif element_name == "N":
solar_element_abundances = 8.05
elif element_name == "O":
solar_element_abundances = 8.93
elif element_name == "Ne":
solar_element_abundances = 8.09
elif element_name == "Mg":
solar_element_abundances = 7.58
elif element_name == "Si":
solar_element_abundances = 7.55
elif element_name == "S":
solar_element_abundances = 7.21
elif element_name == "Ca":
solar_element_abundances = 6.36
elif element_name == "Fe":
solar_element_abundances = 7.67
else:
print("Wrong/unknown element name for function_solar_element_abundances; Anders1989")
solar_element_abundances = None
elif reference_name == 'Asplund2009':
if element_name == "H":
solar_element_abundances = 12
elif element_name == "He":
solar_element_abundances = 10.93
elif element_name == "C":
solar_element_abundances = 8.43
elif element_name == "N":
solar_element_abundances = 7.83
elif element_name == "O":
solar_element_abundances = 8.69
elif element_name == "Ne":
solar_element_abundances = 7.93
elif element_name == "Mg":
solar_element_abundances = 7.60
elif element_name == "Si":
solar_element_abundances = 7.51
elif element_name == "S":
solar_element_abundances = 7.12
elif element_name == "Ca":
solar_element_abundances = 6.34
elif element_name == "Fe":
solar_element_abundances = 7.50
else:
print("Wrong/unknown element name for function_solar_element_abundances; Asplund2009")
solar_element_abundances = None
elif reference_name == 'Anders1989_mass':
if element_name == "H":
solar_element_abundances = 0.70683
elif element_name == "He":
solar_element_abundances = 0.27431
elif element_name == "Metal":
solar_element_abundances = 0.01886
else:
print("Wrong/unknown element name for function_solar_element_abundances; Anders1989_mass")
solar_element_abundances = None
elif reference_name == 'Anders1989_mass_according_to_Asplund2009':
if element_name == "H":
solar_element_abundances = 0.7096
elif element_name == "He":
solar_element_abundances = 0.2691
elif element_name == "Metal":
solar_element_abundances = 0.0213
else:
print("Wrong/unknown element name for function_solar_element_abundances; Anders1989_mass_according_to_Asplund2009")
solar_element_abundances = None
elif reference_name == 'Asplund2009_mass':
if element_name == "H":
solar_element_abundances = 0.7154
elif element_name == "He":
solar_element_abundances = 0.2703
elif element_name == "Metal":
solar_element_abundances = 0.0142
else:
print("Wrong/unknown element name for function_solar_element_abundances; Asplund2009_mass")
solar_element_abundances = None
else:
print('Wrong input reference_name for element_abundances_solar.function_solar_element_abundances.')
solar_element_abundances = None
return solar_element_abundances
| 4,110 | 41.822917 | 127 | py |
galIMF | galIMF-master/SFT__galaxy_mass_26.py | import galevo
import math
import element_abundances_solar
import multiprocessing as mp
from time import time
def simulate(imf, Log_SFR, SFEN, STF):
Z_0 = 0.0000000142
solar_mass_component = "Asplund2009_mass"
Z_solar = element_abundances_solar.function_solar_element_abundances(solar_mass_component, 'Metal')
galevo.galaxy_evol(
imf=imf,
STF=STF, # unrealistic results if more star are forming at a time step than the instantaneous gas mass
SFEN=SFEN,
Z_0=Z_0,
solar_mass_component=solar_mass_component,
str_yield_table='Kobayashi06',
IMF_name='Kroupa',
steller_mass_upper_bound=150,
time_resolution_in_Myr=1,
mass_boundary_observe_low=1.5,
mass_boundary_observe_up=8,
SFH_model='provided',
SFE=0.013, # This parameter is not applied when SFH_model='provided'.
SNIa_ON=True,
SNIa_yield_table='Iwamoto1999',
solar_abu_table='Asplund2009',
high_time_resolution=None,
plot_show=None,
plot_save=None,
outflow=None,
check_igimf=None)
end_time = time()
log_Z_0 = round(math.log(Z_0 / Z_solar, 10), 2)
file = open(
'simulation_results_from_galaxy_evol/imf{}STF{}log_SFR{}SFEN{}Z_0{}/chemical_and_SN_evolution.txt'.format(imf, STF, Log_SFR,
SFEN, log_Z_0), 'r')
data = file.readlines()
file.close()
Alive_stellar_mass = [float(x) for x in data[7].split()]
dynamical_mass = [float(x) for x in data[11].split()]
gas_Mg_over_Fe = [float(x) for x in data[23].split()]
Mass_weighted_stellar_Mg_over_Fe = [float(x) for x in data[25].split()]
luminosity_weighted_stellar_Mg_over_Fe = [float(x) for x in data[63].split()]
gas_Z_over_X = [float(x) for x in data[39].split()]
Mass_weighted_stellar_Z_over_X = [float(x) for x in data[41].split()]
luminosit_weighted_stellar_Z_over_X = [float(x) for x in data[61].split()]
gas_Fe_over_H = [float(x) for x in data[19].split()]
Mass_weighted_stellar_Fe_over_H = [float(x) for x in data[21].split()]
# luminosit_weighted_stellar_Fe_over_H = [float(x) for x in data[??].split()]
if imf == 'igimf':
file_name = 'Metal_mass_relation_IGIMFZ'
elif imf == 'Kroupa':
file_name = 'Metal_mass_relation_KroupaIMF'
file = open('simulation_results_from_galaxy_evol/{}.txt'.format(file_name), 'r')
old_lines = file.read()
file.close()
file = open('simulation_results_from_galaxy_evol/{}.txt'.format(file_name), 'w')
if imf == 'Kroupa':
imf__ = 0
elif imf == 'igimf':
imf__ = 1
else:
imf__ = imf
new_line = old_lines + "{} {} {} {} {} {} {} {} {} {} {} {} {} {}\n".format(imf__, Log_SFR, SFEN, STF,
Alive_stellar_mass[0], dynamical_mass[0],
Mass_weighted_stellar_Mg_over_Fe[-1], Mass_weighted_stellar_Z_over_X[-1],
gas_Mg_over_Fe[-1], gas_Z_over_X[-1],
luminosity_weighted_stellar_Mg_over_Fe[-1], luminosit_weighted_stellar_Z_over_X[-1],
gas_Fe_over_H[-1], Mass_weighted_stellar_Fe_over_H[-1])
file.write(new_line)
file.close()
return
# Parallelizing using Pool.map()
def a_pipeline(parameter):
STF = parameter
print("\n Start simulation for: SFEN={} STF={} Log_SFR={} imf={}".format(SFEN, STF, Log_SFR, imf))
simulate(imf, Log_SFR, SFEN, STF)
return
def a_pipeline_pair(parameters):
imf = parameters[0]
STF = parameters[1]
print("\n Start simulation for: SFEN={} STF={} Log_SFR={} imf={}".format(SFEN, STF, Log_SFR, imf))
simulate(imf, Log_SFR, SFEN, STF)
return
if __name__ == '__main__':
start = time()
# Parallelizing only work for the same SFEN since SFH.txt file is the same!
SFH_shape = 'flat'
location = 0
skewness = 10
sfr_tail = 0
imf = 'Kroupa'
# SFEN_list = [100]
# for SFEN in SFEN_list:
# Log_SFR_list = [5.0]
# for Log_SFR in Log_SFR_list:
# galevo.generate_SFH(SFH_shape, Log_SFR, SFEN, sfr_tail, skewness, location)
# STF_list = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5]
# pool = mp.Pool(mp.cpu_count())
# pool.map(a_pipeline, [STF for STF in STF_list])
# pool.close()
SFEN_list = [2, 5, 10, 20, 50, 100, 150, 200, 250, 300, 350, 400]
for SFEN in SFEN_list:
Log_SFR_list = [-2.0, -1.5, -1.0, -0.5, 0.0, 0.5, 1.0, 2.0, 3.0, 3.5, 4.0]
for Log_SFR in Log_SFR_list:
galevo.generate_SFH(SFH_shape, Log_SFR, SFEN, sfr_tail, skewness, location)
STF_list = [0.1, 0.35, 0.6, 0.85, 1.1]
pool = mp.Pool(mp.cpu_count())
pool.map(a_pipeline, [STF for STF in STF_list])
pool.close()
# SFEN_list = [400]
# for SFEN in SFEN_list:
# Log_SFR_list = [-2.0, 4.0]
# for Log_SFR in Log_SFR_list:
# galevo.generate_SFH(SFH_shape, Log_SFR, SFEN, sfr_tail, skewness, location)
# STF_list = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5]
# pool = mp.Pool(mp.cpu_count())
# pool.map(a_pipeline, [STF for STF in STF_list])
# pool.close()
end = time()
print("Run time:", end - start)
| 5,476 | 38.688406 | 132 | py |
galIMF | galIMF-master/example_star_cluster_IMF.py | # Python3 code, last update Wed 20 Dec 2018
# Example file for sampling the stellar masses of every star in the star cluster.
# Made by: Yan Zhiqiang & Tereza Jerabkova
# The outputs of this example are:
# - a comparison plot of generated variable IMF and canonical IMF ('star_cluster_IMF_plot.pdf');
# - a .txt file containing the stellar masses ('Stellar_masses_for_a_star_cluster.txt').
# --------------------------------------------------------------------------------------------------------------------------------
# Import modules and libraries
# --------------------------------------------------------------------------------------------------------------------------------
import galimf # Main part of the GalIMF code for generating and sampling Galaxy-wide stellar Initial Mass Function.
from pylab import *
import matplotlib.pyplot as plt
import numpy as np
from scipy.integrate import quad
import csv # csv and izip/zip are used to create output files
try:
from itertools import izip as zip
except ImportError: # will be python 3.x series
pass
# -----------------------------------------------------------------------
# figure output settings:
fig0 = plt.figure(figsize=(4, 3)) # size for one column plot
gs1 = GridSpec(1, 1)
ax0 = plt.subplot(gs1[0])
# input parameters:
StarClusterMass = float(input("\n ================================\n"
" === example_star_cluster_IMF ===\n"
" ================================\n\n"
" This code generate the stellar masses of one star-cluster given the total "
"star-cluster mass applying optimal sampling.\n\n"
" Please type in the cluster mass in solar mass unit then hit return:"))
M_over_H = float(input("\n The code assumes an empirical relation between the IMF slopes for low-mass stars and metallicity.\n"
" Canonical IMF is recovered with solar metallicity, i.e., [M/H]=0.\n"
" Please type in the initial metallicity of the cluster, [M/H], then hit return to sample stellar masses:"))
age = float(input("\n The code calculate the mass of the most massive star at a given age according to PARSEC v1.2 stellar evolution model.\n"
" Currently, the age resolution is 10 Myr.\n"
" Please type in the age of the star cluster in [yr], then hit return to sample stellar masses (input 0 if age is not a concern):"))
# Calculate lifetime according to (mass, metallicity)
Z_list_value = [0.0001, 0.004, 0.02, 0.04]
Z_list_index = np.argmin(np.abs(np.array(Z_list_value) - 0.02*10**M_over_H))
stellar_Z_extrapolated = Z_list_value[Z_list_index]
if stellar_Z_extrapolated == 0.0001:
data_AGB = np.loadtxt('Mass_lifetime_relation/PARSEC/Mass_lifetime_relation_Z_0.0001.txt')
elif stellar_Z_extrapolated == 0.004:
data_AGB = np.loadtxt('Mass_lifetime_relation/PARSEC/Mass_lifetime_relation_Z_0.004.txt')
elif stellar_Z_extrapolated == 0.02:
data_AGB = np.loadtxt('Mass_lifetime_relation/PARSEC/Mass_lifetime_relation_Z_0.02.txt')
elif stellar_Z_extrapolated == 0.04:
data_AGB = np.loadtxt('Mass_lifetime_relation/PARSEC/Mass_lifetime_relation_Z_0.04.txt')
def function_mass_boundary(this_time, data_AGB):
logAge_mass_boundary = np.round(data_AGB[:, 0], 5)
logAge_value = np.log10(this_time)
logAge_list_value = np.round(sorted(set(logAge_mass_boundary)), 5)
logAge_list_index = np.argmin(np.abs(np.array(logAge_list_value) - np.round(logAge_value, 5)))
logAge_value_extrapolated = logAge_list_value[logAge_list_index]
index = np.where((logAge_mass_boundary == np.round(logAge_value_extrapolated, 5)))
index = index[0]
AGB_mass_boundary = 10**data_AGB[index, 2]
star_mass_boundary = 10**data_AGB[index, 3]
return AGB_mass_boundary, star_mass_boundary
(AGB_mass_boundary, star_mass_boundary) = function_mass_boundary(age, data_AGB)
print(" The most massive star alive at the given age has {} solar mass.".format(star_mass_boundary))
# setup alpha values:
alpha_2 = 2.3
alpha_1 = 1.3
alpha3_model = 2
alpha2_model = 'Z' # or 1 for our publications before 2020
alpha1_model = 'Z' # or 1 for our publications before 2020
alpha3_change = galimf.function_alpha_3_change(alpha3_model, StarClusterMass, M_over_H)
alpha2_change = galimf.function_alpha_2_change(alpha_2, alpha2_model, M_over_H)
alpha1_change = galimf.function_alpha_1_change(alpha_1, alpha1_model, M_over_H)
print("\n - Sampling the star cluster with {} solar mass and [M/H] = {} -".format(StarClusterMass, M_over_H))
# apply galIMF to optimally sample stars from IMF:
galimf.function_sample_from_imf(StarClusterMass, 1, 0.08, alpha1_change, 0.5, alpha2_change, 1, alpha3_change, 150)
# apply galIMF to draw IMF analytically:
galimf.function_draw_xi_str(0.08, StarClusterMass, 1, 0.08, alpha1_change, 0.5, alpha2_change, 1, alpha3_change, 150)
List_M_str_for_xi_str = galimf.x_IMF
List_xi_str = galimf.y_IMF
print("\n - Sampling completed -\n")
# followings are all sampled results:
# most massive stellar mass in the cluster:
print(" The most massive star formed in this star cluster has {} solar mass.".format(round(galimf.list_M_str_i[0], 2)))
# All of the sampled stellar masses in solar mass unit are (from massive to less massive):
list_stellar_masses = np.array(galimf.list_M_str_i)
# # The bolometric luminosity is estimated according to Yan et al. 2019, 2022:
# L_bol_tot = 0
# for mass in list_stellar_masses:
# log_mass = math.log(mass, 10)
# if log_mass < -0.37571790416: # < log0.421
# log_L_bol = 2.3 * log_mass -0.63827216398
# elif log_mass < 0.29225607135:
# log_L_bol = 4 * log_mass
# elif log_mass < 1.74358815016:
# log_L_bol = 3.5 * log_mass + 0.14612803567
# else:
# log_L_bol = log_mass + 4.50514997832
# L_bol_tot += 10**log_L_bol
# print(" The total (ZAMS) bolometric luminosity of all the optimally-sampled stars is estiamted to be: {} L_sun.".format(round(L_bol_tot, 2)))
# NOTE! Multiple stars can be represented by a same stellar mass if they have similar masses,
# The number of stars represented by the stellar masses above are:
list_stellar_numbers = galimf.list_n_str_i
if list_stellar_numbers[-1] == 0:
del list_stellar_numbers[-1]
n_stars = np.array(list_stellar_numbers)
# save the sampled stellar mass in a txt file:
with open('Stellar_masses_for_a_star_cluster.txt', 'w') as file:
writer = csv.writer(file, delimiter=' ')
file.write(
"# Output file of the generated stellar masses for a star cluster with given mass and metallicity.\n"
"# The columns are:\n# Mass in solar mass unit; "
"Number of stars in this star cluster have mass close to this value\n\n")
writer.writerows(
zip(list_stellar_masses, list_stellar_numbers))
print("\n Stellar masses of every star in the star cluster is saved in the file: "
"Stellar_masses_for_a_star_cluster.txt")
# formatting a figure output to compare the optimally sampled result (label: OS) with canonical IMF (label: IMF):
# binning the sampled star number:
bins = np.logspace(np.log10(0.08), np.log10(150), 20, base=10)
vals0 = np.zeros(len(bins))
for i, b in enumerate(bins):
if i == len(bins)-1:
break
else:
star_array = (list_stellar_masses[np.logical_and(list_stellar_masses >= b, list_stellar_masses < bins[i+1])])
n_array = (n_stars[np.logical_and(list_stellar_masses >= b, list_stellar_masses < bins[i+1])])
len_array = 0
for j, n in enumerate(n_array):
len_array = len_array+n
vals0[i] = len_array/(bins[i+1]-bins[i])
ax0.step(np.log10(bins), np.log10(vals0+1.e-3), color='blue', where='post', zorder=1, lw=1.5, label="Optimally sampled stellar masses")
# constructing the canonical IMF:
N = 100
can_imf = np.zeros(N)
masses = np.logspace(np.log10(0.08), np.log10(150), N, base=10)
for i, m in enumerate(masses):
if m <= 0.5:
can_imf[i] = m ** (-1.3)
else:
can_imf[i] = 0.5*m ** (-2.3)
def imf(mass, k, alpha):
return k*mass*mass**(-alpha)
Norm = quad(imf, 0.08, 0.5, args=(1, 1.3))[0] + quad(imf, 0.5, 150, args=(0.5, 2.3))[0]
can_imf = np.array(can_imf)*StarClusterMass/Norm
ax0.plot(np.log10(masses), np.log10(can_imf), color='black', lw=1.5, label='Canonical IMF', zorder=0, ls='dotted')
# plot analytical IMF:
ax0.plot(np.log10(List_M_str_for_xi_str), np.log10(List_xi_str), color='red', label='analytical IMF', zorder=0, ls='dashed')
# plot settings:
ax0.set_ylabel(r'$\log_{\rm 10}(\xi, [\#_{\star}/M_\odot])$')
ax0.set_xlabel(r'$\log_{\rm 10}(m, [M_\odot])$')
plt.legend()
plt.tight_layout()
# save the plot:
plt.savefig('star_cluster_IMF_plot.pdf', dpi=300)
# end of the example:
print(" The plot is saved in the file: star_cluster_IMF_plot.pdf\n\n"
" ============================\n")
# show the plot
plt.show()
### Plot the mmax--Mecl relation:
#
# alpha_2 = 2.3
# alpha_1 = 1.3
# alpha3_model = 2
# alpha2_model = 1
# alpha1_model = 1
# M_over_H_list = [-3, -2, -1, 0, 1]
# for j in range(5):
# M_over_H = M_over_H_list[j]
# # The lowest possible star cluster mass has a limit when M_ecl=m_max, depending on the assumed IMF.
# if M_over_H < 0.1:
# lower_cluster_mass_limit = 0.15 - M_over_H / 11
# else:
# lower_cluster_mass_limit = 0.15 - M_over_H / 28
# alpha2_change = galimf.function_alpha_2_change(alpha_2, alpha2_model, M_over_H)
# alpha1_change = galimf.function_alpha_1_change(alpha_1, alpha1_model, M_over_H)
# StarClusterMass_list = np.arange(lower_cluster_mass_limit, 10, 0.1).tolist()
# for i in range(49):
# StarClusterMass_list += [10 ** ((i + 10) / 10)]
#
# M_max_list = []
#
# for i in range(len(StarClusterMass_list)):
# StarClusterMass = StarClusterMass_list[i]
# alpha3_change = galimf.function_alpha_3_change(alpha3_model, StarClusterMass, M_over_H)
# galimf.function_sample_from_imf(StarClusterMass, 1, 0.08, alpha1_change, 0.5, alpha2_change, 1, alpha3_change,
# 150)
# galimf.function_draw_xi_str(0.08, StarClusterMass, 1, 0.08, alpha1_change, 0.5, alpha2_change, 1, alpha3_change,
# 150)
# List_M_str_for_xi_str = galimf.x_IMF
# List_xi_str = galimf.y_IMF
# M_max_list.append(galimf.list_M_str_i[0])
# plt.loglog(StarClusterMass_list, M_max_list, label="[Z]={}".format(M_over_H))
#
# plt.loglog([5, 5], [0.1, 10], lw=0.5, label=r'cluster mass = 5 [M$_\odot$]')
# plt.loglog([0.1, 10], [1, 1], ls='dotted', c='0.5')
#
# plt.xlabel(r"Star cluster mass [M$_\odot$]")
# plt.ylabel(r"Most massive star mass [M$_\odot$]")
# plt.legend()
# plt.tight_layout()
# plt.show()
| 10,907 | 43.161943 | 153 | py |
galIMF | galIMF-master/element_abundances_primordial.py | import element_weight_table, element_abundances_solar
H_weight = element_weight_table.function_element_weight("H")
primary_He_mass_fraction = 0.247
primary_H_mass_fraction_roughly = 1 - primary_He_mass_fraction # Corrected in below
primary_D_mass_fraction = primary_H_mass_fraction_roughly * 2.58 * 10**-5
primary_He3_mass_fraction = primary_H_mass_fraction_roughly * 10**-4
primary_L_mass_fraction = primary_H_mass_fraction_roughly * 5 * 10**-10
# Reference: Cyburt+ 2016, Big bang nucleosynthesis: Present status, DOI: 10.1103/RevModPhys.88.015004
Z_0 = 10**-6
primary_H_mass_fraction = 1 - primary_He_mass_fraction - primary_D_mass_fraction - primary_He3_mass_fraction\
- primary_L_mass_fraction - Z_0
def function_element_mass_primary_fraction(solar_abu_reference_name, element_name, Z_0, Z_solar):
if element_name == "H":
element_mass_fraction = primary_H_mass_fraction
elif element_name == "He":
element_mass_fraction = primary_He_mass_fraction
elif element_name == "C":
element_mass_fraction = primary_H_mass_fraction / H_weight\
* 10**(element_abundances_solar.function_solar_element_abundances(solar_abu_reference_name, "C") - 12) \
* element_weight_table.function_element_weight("C") * Z_0 / Z_solar
elif element_name == "N":
element_mass_fraction = primary_H_mass_fraction / H_weight\
* 10**(element_abundances_solar.function_solar_element_abundances(solar_abu_reference_name, "N") - 12) \
* element_weight_table.function_element_weight("N") * Z_0 / Z_solar
elif element_name == "O":
element_mass_fraction = primary_H_mass_fraction / H_weight\
* 10**(element_abundances_solar.function_solar_element_abundances(solar_abu_reference_name, "O") - 12) \
* element_weight_table.function_element_weight("O") * Z_0 / Z_solar
elif element_name == "Ne":
element_mass_fraction = primary_H_mass_fraction / H_weight\
* 10**(element_abundances_solar.function_solar_element_abundances(solar_abu_reference_name, "Ne") - 12) \
* element_weight_table.function_element_weight("Ne") * Z_0 / Z_solar
elif element_name == "Mg":
element_mass_fraction = primary_H_mass_fraction / H_weight\
* 10**(element_abundances_solar.function_solar_element_abundances(solar_abu_reference_name, "Mg") - 12) \
* element_weight_table.function_element_weight("Mg") * Z_0 / Z_solar
elif element_name == "Si":
element_mass_fraction = primary_H_mass_fraction / H_weight\
* 10**(element_abundances_solar.function_solar_element_abundances(solar_abu_reference_name, "Si") - 12) \
* element_weight_table.function_element_weight("Si") * Z_0 / Z_solar
elif element_name == "S":
element_mass_fraction = primary_H_mass_fraction / H_weight\
* 10**(element_abundances_solar.function_solar_element_abundances(solar_abu_reference_name, "S") - 12) \
* element_weight_table.function_element_weight("S") * Z_0 / Z_solar
elif element_name == "Ca":
element_mass_fraction = primary_H_mass_fraction / H_weight\
* 10**(element_abundances_solar.function_solar_element_abundances(solar_abu_reference_name, "Ca") - 12) \
* element_weight_table.function_element_weight("Ca") * Z_0 / Z_solar
elif element_name == "Fe":
element_mass_fraction = primary_H_mass_fraction / H_weight\
* 10**(element_abundances_solar.function_solar_element_abundances(solar_abu_reference_name, "Fe") - 12) \
* element_weight_table.function_element_weight("Fe") * Z_0 / Z_solar
else:
print("Wrong element name for function_element_mass_primary_fraction")
element_mass_fraction = None
return element_mass_fraction
| 4,340 | 69.016129 | 145 | py |
galIMF | galIMF-master/IMFs/diet_Salpeter_IMF.py | def custom_imf(mass, time): # there is no time dependence for Salpeter IMF
# Bell & de Jong (2001). Salpeter IMF x = 1.35 with a flat x = 0 slope below 0.35
# integrate this function's output xi result in the number of stars in mass limits.
if mass < 0.35:
xi = mass ** (-1)
elif mass < 150:
xi = mass ** (-2.35) * 0.35**(-1) / 0.35**(-2.35)
else:
xi = 0
return xi | 413 | 40.4 | 87 | py |
galIMF | galIMF-master/IMFs/given_IMF.py | def custom_imf(mass, time):
change_time = 10*10**7
change_limit = 1
alpha_change = (change_time - time)/change_time
if alpha_change < 0 - change_limit:
alpha_change = 0 - change_limit
if alpha_change > change_limit:
alpha_change = change_limit
if mass < 0.08:
return 0
elif mass < 150:
xi = mass ** (-2.35 + alpha_change)
return xi
else:
return 0
| 428 | 24.235294 | 51 | py |
galIMF | galIMF-master/IMFs/Salpeter_IMF.py | from scipy.integrate import quad
def custom_imf_unnormalized(mass): # there is no time dependence for Salpeter IMF
if mass < 0.1:
return 0
elif mass < 100:
return mass ** (-2.35)
else:
return 0
def mass_function(mass):
return custom_imf_unnormalized(mass) * mass
integrated_mass = quad(mass_function, 0.08, 150, limit=50)[0]
def custom_imf(mass, time=0): # normalized to a population with mass = 1 Msun
if mass < 0.1:
return 0
elif mass < 100:
return mass ** (-2.35)/integrated_mass
else:
return 0
| 584 | 20.666667 | 82 | py |
galIMF | galIMF-master/IMFs/Kroupa_IMF.py | from scipy.integrate import quad
alpha3 = 2.3
def custom_imf_unnormalized(mass): # there is no time dependence for Kroupa IMF
if mass < 0.08:
return 0
elif mass < 0.5:
return 2*mass**(-1.3)
elif mass < 1:
return mass**(-2.3)
elif mass < 150:
return mass**(-alpha3)
else:
return 0
def mass_function(mass):
return custom_imf_unnormalized(mass) * mass
integrated_mass = quad(mass_function, 0.08, 150, limit=50)[0]
def custom_imf(mass, time=0): # normalized to a population with mass = 1 Msun
if mass < 0.08:
return 0
elif mass < 0.5:
return 2*mass**(-1.3)/integrated_mass
elif mass < 1:
return mass**(-2.3)/integrated_mass
elif mass < 150:
return mass**(-alpha3)/integrated_mass
else:
return 0
| 825 | 21.944444 | 80 | py |
galIMF | galIMF-master/yield_tables/SNIa_yield.py | # This function returns the element mass ejected for a type Ia supernova event
def function_mass_ejected(yield_reference_name, element_name):
mass_ejected = 0
if yield_reference_name == 'Thielemann1993':
# Reference: Thielemann et al. (1993)
# Values adopted from
# Gibson, B. K., Loewenstein, M., & Mushotzky, R. F. 1997, MNRAS, 290, 623, their TNH93 dataset
if element_name == "O":
mass_ejected = 0.148 #
elif element_name == "Ne":
mass_ejected = 0.005 #
elif element_name == "Mg":
mass_ejected = 0.009 #
elif element_name == "Si":
mass_ejected = 0.158 #
elif element_name == "S":
mass_ejected = 0.086 #
elif element_name == "Fe":
mass_ejected = 0.744 #
else:
mass_ejected = 0
elif yield_reference_name == 'Seitenzahl2013':
# Reference: Seitenzahl et al. 2013, MNRAS, 429, 1156
# Below adopt the mean value of all the model results in their table 2
if element_name == "C":
mass_ejected = 0.0073 # +-0.0047
elif element_name == "O":
mass_ejected = 0.11 # +-0.06
elif element_name == "Ne":
mass_ejected = 0.0057 # +-0.004
elif element_name == "Mg":
mass_ejected = 0.019 # +-0.01
elif element_name == "Si":
mass_ejected = 0.248 # +-0.092
elif element_name == "S":
mass_ejected = 0.0935 # +-0.032
elif element_name == "Ar":
mass_ejected = 0.0148 # +-0.005
elif element_name == "Ca":
mass_ejected = 0.012 # +-0.004
elif element_name == "Cr":
mass_ejected = 0.0072 # +-0.0024
elif element_name == "Mn":
mass_ejected = 0.0106 # +-0.0025
elif element_name == "Fe":
mass_ejected = 0.69 # +-0.21
elif element_name == "Ni":
mass_ejected = 0.065 # +-0.010
else:
mass_ejected = 0
elif yield_reference_name == 'Iwamoto1999':
# Reference: https://ui.adsabs.harvard.edu/abs/1999ApJS..125..439I/abstract
# Below adopt the main isotope of W70 model # the mean value of all models (W, WDD, CDD) in their table 3
if element_name == "C":
mass_ejected = 0.0508 #
elif element_name == "O":
mass_ejected = 0.133 #
elif element_name == "Ne":
mass_ejected = 0.00229 #
elif element_name == "Mg":
mass_ejected = 0.0158 # 0.00727 # (8.5+15.8+7.55+4.47+2.62+7.72+4.2)/7
elif element_name == "Si":
mass_ejected = 0.142 # 0.201 # (1.54+1.42+2.72+2.06+1.58+2.77+1.98)/7
elif element_name == "S":
mass_ejected = 0.0914 #
elif element_name == "Ar":
mass_ejected = 0.0191 #
elif element_name == "Ca":
mass_ejected = 0.0181 # 0.0228 # (1.19+1.81+3.1+2.43+1.88+3.18+2.38)/7
elif element_name == "Cr":
mass_ejected = 0.00773 #
elif element_name == "Mn":
mass_ejected = 0.00666 #
elif element_name == "Fe":
mass_ejected = 0.68 # 0.675 # (6.26+6.8+5.87+7.13+7.95+5.65+7.57)/7
elif element_name == "Ni":
mass_ejected = 0.0834 #
else:
mass_ejected = 0
else:
print('input wrong yield_reference_name')
return mass_ejected
# # Other yield tables:
# # t86: Thielemann et al. 1986; ivo13: Seitenzahl et al. 201
# Fe_mass_eject = 0.744 # Nomoto 1984 0.613, TNH93 0.744, i99CDD1/CDD2/W7 0.56 /0.76 /0.63, ivo12/13 0.62-0.67, t03 0.74, t86 0.63
# Si_mass_eject = 0.158
# O_mass_eject = 0.148 # Nomoto 1984 0.140, TNH93 0.148, i99CDD1/CDD2/W7 0.09 /0.06, /0.14, ivo12/13 0.09-0.1, t03 0.14, t86 0.13
# S_mass_eject = 0.086
# Mg_mass_eject = 0.009 # Nomoto 1984 0.023, TNH93 0.009, i99CDD1/CDD2/W7 0.0077 /0.0042 /0.0085, ivo12/13 0.015-0.029, t03 0.013, t86 0.016
# Ne_mass_eject = 0.005
# # O/Mg_mass = # Nomoto 1984 6.0869, TNH93 16.44, i99CDD1/CDD2/W7 11.688 /14.28 /16.47, ivo12/13 6-3.448, t03 10.77, t86 8.125
| 4,190 | 42.65625 | 142 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/benchpress.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas and Chris Cummins.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BenchPress: A directed compiler benchmark generator powered by active learning.
The core operations of BenchPress are:
1. Preprocess and encode a corpus of human-written programs.
2. Define and train a machine learning model on the corpus.
3. Sample the trained model to generate new programs.
This program automates the execution of all three stages of the pipeline.
The pipeline can be interrupted and resumed at any time. Results are cached
across runs. Please note that many of the steps in the pipeline are extremely
compute intensive and highly parallelized. If configured with CUDA support,
any NVIDIA GPUs will be used to improve performance where possible.
"""
import contextlib
import cProfile
import os
import pathlib
import time
import sys
import typing
import datetime
from absl import app, flags
from deeplearning.benchpress.samplers import sample_observers as sample_observers_lib
from deeplearning.benchpress.samplers import samplers
from deeplearning.benchpress.util import pbutil
from deeplearning.benchpress.util import memory
from deeplearning.benchpress.util import environment
from deeplearning.benchpress.util import cache
from deeplearning.benchpress.util import distrib
from deeplearning.benchpress.util import logging as l
from deeplearning.benchpress.dashboard import dashboard
from deeplearning.benchpress.models import language_models
from deeplearning.benchpress.reinforcement_learning import reinforcement_models
from deeplearning.benchpress.proto import benchpress_pb2
from deeplearning.benchpress.proto import model_pb2
from deeplearning.benchpress.proto import sampler_pb2
from deeplearning.benchpress.github import miner
from deeplearning.benchpress.util import pytorch
from deeplearning.benchpress.util import proxy_bash
from eupy.hermes import client
FLAGS = flags.FLAGS
flags.DEFINE_string(
"notify_me",
None,
"Set receiver mail address to notify for program failures or termination."
)
flags.DEFINE_integer(
"notify_me_level",
5,
"Define logging level of mail client"
)
flags.DEFINE_boolean(
"color", True, "Colorize or not, logging messages"
)
flags.DEFINE_boolean(
"step", False, "Enable step execution on debug logs (debug level must be selected)"
)
flags.DEFINE_string(
"config", "/benchpress/config.pbtxt", "Path to a benchpress.Instance proto file."
)
flags.DEFINE_string(
"workspace_dir",
"/tmp/benchpress",
"Root path of the working space directory. Corpus, dataset, model and all meta files"
"will be stored here. Default value is /tmp folder.",
)
flags.DEFINE_integer(
"min_samples",
0,
"The minimum number of samples to make. If <= 0, sampling continues "
"indefinitely and never terminates.",
)
flags.DEFINE_boolean(
"print_samples", True, "If set, print the generated samples."
)
flags.DEFINE_boolean(
"store_samples_db", True, "If set, store generated samples to database."
)
flags.DEFINE_boolean(
"cache_samples", False, "If set, cache the generated sample protobufs."
)
flags.DEFINE_string(
"sample_text_dir", None, "A directory to write plain text samples to."
)
flags.DEFINE_string(
"stop_after",
None,
'Stop BenchPress early. Valid options are: "corpus", or "train".',
)
flags.DEFINE_boolean(
"only_sample",
False,
"Select to deploy sampling without training."
)
flags.DEFINE_string(
"print_cache_path",
None,
'Print the directory of a cache and exit. Valid options are: "pre_train_corpus", "corpus", '
'"model", or "sampler".',
)
flags.DEFINE_boolean(
"debug",
False,
"Enable a debugging mode of BenchPress python runtime. When enabled, errors "
"which may otherwise be caught lead to program crashes and stack traces.",
)
flags.DEFINE_boolean(
"profiling",
False,
"Enable BenchPress self profiling. Profiling results be logged.",
)
flags.DEFINE_boolean(
"monitor_mem_usage",
False,
"Plot application RAM and GPU memory usage."
)
flags.DEFINE_boolean(
"dashboard_only", False, "If true, launch dashboard only."
)
flags.DEFINE_boolean(
"proxy_bash",
False,
"Set True to start a proxy bash thread."
"Commands are provided from BenchPress's"
"running terminal and standard's input format"
"must be: `>> CMD'."
)
class Instance(object):
"""A BenchPress instance encapsulates a github_miner, model, sampler, and working directory."""
def __init__(self, config: benchpress_pb2.Instance):
"""Instantiate an instance.
Args:
config: An Instance proto.
"""
self.working_dir = None
self.github = None
self.model = None
self.sampler = None
self.config = config
if config.HasField("github_miner"):
self.github = miner.GithubMiner.FromConfig(config.github_miner)
if config.HasField("working_dir"):
self.working_dir: pathlib.Path = pathlib.Path(
os.path.join(FLAGS.workspace_dir, config.working_dir)
).expanduser().resolve()
# Enter a session so that the cache paths are set relative to any requested
# working directory.
with self.Session():
# Initialize pytorch to make distributed barrier accessible.
pytorch.initPytorch()
if config.HasField("language_model"):
self.model: language_models.Model = language_models.Model(config.language_model)
elif config.HasField("rl_model"):
self.model: reinforcement_models.RLModel = reinforcement_models.RLModel(config.rl_model)
## Specialize 'locks' folder.
if environment.WORLD_SIZE > 1:
lock_cache = pathlib.Path(self.model.cache.path / "locks")
if environment.WORLD_RANK == 0:
lock_cache.mkdir(exist_ok = True)
else:
while not lock_cache.exists():
time.sleep(0.5)
distrib.init(lock_cache)
if config.HasField("sampler"):
self.sampler: samplers.Sampler = samplers.Sampler(
config.sampler,
model_hash = self.model.hash,
)
if environment.WORLD_RANK == 0:
self.dashboard = dashboard.Launch()
@contextlib.contextmanager
def Session(self) -> "Instance":
"""Scoped $BENCHPRESS_CACHE value."""
old_working_dir = os.environ.get("BENCHPRESS_CACHE", "")
if self.working_dir:
os.environ["BENCHPRESS_CACHE"] = str(self.working_dir)
yield self
if self.working_dir:
os.environ["BENCHPRESS_CACHE"] = old_working_dir
def Create(self) -> None:
with self.Session():
self.model.Create()
def PreTrain(self, *args, **kwargs) -> None:
if self.model.pre_train_corpus:
with self.Session():
test_sampler = None
if not self.sampler.is_active:
test_sampler_config = sampler_pb2.Sampler()
test_sampler_config.CopyFrom(self.sampler.config)
# Make all test samples the same sequence_length length.
del test_sampler_config.termination_criteria[:]
test_sampler_config.termination_criteria.extend(
[
sampler_pb2.SampleTerminationCriterion(
maxlen=sampler_pb2.MaxTokenLength(maximum_tokens_in_sample=self.sampler.sequence_length)
),
]
)
test_sampler = samplers.Sampler(test_sampler_config, sample_db_name = "pre_epoch_samples.db")
# We inject the `test_sampler` argument so that we can create samples
# during training.
self.model.PreTrain(*args, test_sampler = test_sampler, **kwargs)
def Train(self, *args, **kwargs) -> None:
with self.Session():
test_sampler = None
if not self.sampler.is_active:
test_sampler_config = sampler_pb2.Sampler()
test_sampler_config.CopyFrom(self.sampler.config)
# Make all test samples the same sequence_length length.
del test_sampler_config.termination_criteria[:]
test_sampler_config.termination_criteria.extend(
[
sampler_pb2.SampleTerminationCriterion(
maxlen=sampler_pb2.MaxTokenLength(maximum_tokens_in_sample=self.sampler.sequence_length)
),
]
)
test_sampler = samplers.Sampler(test_sampler_config, sample_db_name = "epoch_samples.db")
# We inject the `test_sampler` argument so that we can create samples
# during training.
self.model.Train(*args, test_sampler = test_sampler, **kwargs)
def Sample(self, *args, **kwargs) -> typing.List[model_pb2.Sample]:
self.PreTrain()
self.Train()
with self.Session():
self.model.Sample(self.sampler, *args, **kwargs)
def ToProto(self) -> benchpress_pb2.Instance:
"""Get the proto config for the instance."""
config = benchpress_pb2.Instance()
config.working_dir = str(self.working_dir)
if config.HasField("language_model"):
config.language_model.CopyFrom(self.model.config)
elif config.HasField("rl_model"):
config.rl_model.CopyFrom(self.model.config)
config.sampler.CopyFrom(self.sampler.config)
return config
@classmethod
def FromFile(cls, path: pathlib.Path) -> "Instance":
return cls(pbutil.FromFile(path, benchpress_pb2.Instance()))
def ConfigFromFlags() -> benchpress_pb2.Instance:
config_path = pathlib.Path(FLAGS.config)
if not config_path.is_file():
raise FileNotFoundError (f"BenchPress --config file not found: '{config_path}'")
config = pbutil.FromFile(config_path, benchpress_pb2.Instance())
os.environ["PWD"] = str(config_path.parent)
return config
def SampleObserversFromFlags(instance: Instance) -> typing.List[
sample_observers_lib.SampleObserver
]:
"""Instantiate sample observers from flag values."""
if instance.sampler is None:
return []
sample_observers = []
if FLAGS.min_samples <= 0:
l.logger().warning(
"Entering an infinite sample loop, this process will never end!"
)
else:
sample_observers.append(
sample_observers_lib.MaxSampleCountObserver(FLAGS.min_samples * instance.sampler.batch_size)
)
if FLAGS.print_samples:
sample_observers.append(sample_observers_lib.PrintSampleObserver())
if FLAGS.store_samples_db:
if environment.WORLD_RANK == 0:
(instance.model.cache.path / "samples" / instance.sampler.hash).mkdir(exist_ok = True)
sample_observers.append(sample_observers_lib.SamplesDatabaseObserver(
instance.model.cache.path / "samples" / instance.sampler.hash / instance.sampler.sample_db_name,
plot_sample_status = True
)
)
instance.sampler.symlinkModelDB(
instance.model.cache.path / "samples" / instance.sampler.hash,
instance.model.hash
)
if FLAGS.cache_samples:
sample_observers.append(sample_observers_lib.LegacySampleCacheObserver())
if FLAGS.sample_text_dir:
sample_observers.append(
sample_observers_lib.SaveSampleTextObserver(
pathlib.Path(FLAGS.sample_text_dir)
)
)
return sample_observers
def DoFlagsAction(
instance: Instance,
sample_observers: typing.List[sample_observers_lib.SampleObserver],
) -> None:
"""Do the action requested by the command line flags.
By default, this method trains and samples the instance using the given
sample observers. Flags which affect this behaviour are:
--print_cache_path={corpus,model,sampler}: Prints the path and returns.
--stop_after={corpus,train}: Stops after corpus creation or training,
respectively
--export_model=<path>: Train the model and export it to the requested path.
Args:
instance: The BenchPress instance to act on.
sample_observer: A list of sample observers. Unused if no sampling occurs.
"""
if instance.github:
instance.github.fetch()
if instance.model:
with instance.Session():
if FLAGS.print_cache_path == "pre_train_corpus":
print(instance.model.pre_train_corpus.cache.path)
return
elif FLAGS.print_cache_path == "corpus":
print(instance.model.corpus.cache.path)
return
elif FLAGS.print_cache_path == "model":
print(instance.model.cache.path)
return
elif FLAGS.print_cache_path == "sampler":
if instance.sampler:
print(instance.model.SamplerCache(instance.sampler))
else:
raise ValueError("Sampler config has not been specified.")
return
elif FLAGS.print_cache_path:
raise ValueError(f"Invalid --print_cache_path argument: '{FLAGS.print_cache_path}'")
# The default action is to sample the model.
if FLAGS.stop_after == "corpus":
instance.model.corpus.Create()
if instance.model.pre_train_corpus:
instance.model.pre_train_corpus.Create(tokenizer = instance.model.corpus.tokenizer)
elif FLAGS.stop_after == "pre_train":
instance.PreTrain()
l.logger().info("Model: {}".format(instance.model.cache.path))
elif FLAGS.stop_after == "train":
instance.Train()
l.logger().info("Model: {}".format(instance.model.cache.path))
elif FLAGS.stop_after:
raise ValueError(
f"Invalid --stop_after argument: '{FLAGS.stop_after}'"
)
else:
if instance.sampler:
instance.Sample(sample_observers)
instance.sampler.symlinkModelDB(
instance.model.cache.path / "samples" / instance.sampler.hash,
instance.model.hash
)
else:
l.logger().warn("Sampler has not been provided. Use --stop_after to create corpus or train.")
else:
if FLAGS.stop_after in {"corpus", "train"}:
l.logger().warn("FLAGS.stop_after {} will be ignored without model config.".format(FLAGS.stop_after))
if FLAGS.print_cache_path in {"pre_train_corpus", "corpus", "model", "sampler"}:
raise ValueError("{} config has not been specified.".format(FLAGS.print_cache_path))
elif FLAGS.print_cache_path:
raise ValueError(f"Invalid --print_cache_path argument: '{FLAGS.print_cache_path}'")
return
def main():
"""Main entry point."""
if FLAGS.dashboard_only:
if environment.WORLD_RANK == 0:
dash = dashboard.Launch(debug = {"debug": True})
else:
instance = Instance(ConfigFromFlags())
sample_observers = SampleObserversFromFlags(instance)
DoFlagsAction(instance, sample_observers)
return
def initMain(*args, **kwargs):
"""
Pre-initialization for the main function of the program
Args:
*args: Arguments to be passed to the function.
**kwargs: Arguments to be passed to the function.
"""
mail = None
if FLAGS.notify_me:
mail = client.initClient(FLAGS.notify_me)
l.initLogger(name = "benchpress", mail = mail, rank = environment.WORLD_RANK)
if FLAGS.local_filesystem:
pathlib.Path(FLAGS.local_filesystem).resolve().mkdir(exist_ok = True, parents = True)
if FLAGS.monitor_mem_usage:
mem_monitor_threads = memory.init_mem_monitors(
pathlib.Path(FLAGS.workspace_dir).resolve()
)
if FLAGS.proxy_bash:
proxy_bash.start()
if FLAGS.debug:
# Enable verbose stack traces. See: https://pymotw.com/2/cgitb/
import cgitb
cgitb.enable(format="text")
main()
return
try:
if FLAGS.profiling:
cProfile.runctx("main()", None, None, sort="tottime")
else:
main()
except KeyboardInterrupt:
return
except Exception as e:
l.logger().error(e)
if mail:
if FLAGS.config is not None:
job = pathlib.Path(FLAGS.config)
else:
job = ""
mail.send_message("benchpress:{}".format(str(job.stem)), e)
raise
if mail:
if FLAGS.config is not None:
job = pathlib.Path(FLAGS.config)
else:
job = ""
mail.send_message("benchpress: {}".format(str(job.stem)), "Program terminated successfully at {}.".format(datetime.datetime.utcnow().strftime("%m/%d/%Y, %H:%M:%S")))
return
if __name__ == "__main__":
app.run(initMain)
sys.exit(0)
| 16,345 | 33.340336 | 169 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/reinforcement_learning/reinforcement_models.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
RL Environment for the task of targeted benchmark generation.
"""
import pathlib
import os
import time
import typing
from deeplearning.benchpress.corpuses import tokenizers
from deeplearning.benchpress.corpuses import corpuses
from deeplearning.benchpress.samplers import samplers
from deeplearning.benchpress.models import backends
from deeplearning.benchpress.models import language_models
from deeplearning.benchpress.proto import reinforcement_learning_pb2
from deeplearning.benchpress.proto import internal_pb2
from deeplearning.benchpress.util import logging as l
from deeplearning.benchpress.util import distrib
from deeplearning.benchpress.util import commit
from deeplearning.benchpress.util import environment
from deeplearning.benchpress.util import pbutil
from deeplearning.benchpress.util import crypto
from deeplearning.benchpress.util import cache
from deeplearning.benchpress.reinforcement_learning import env
from deeplearning.benchpress.reinforcement_learning import agent
from deeplearning.benchpress.reinforcement_learning import memory
from deeplearning.benchpress.models.torch_bert import model as bert_model
from absl import flags
FLAGS = flags.FLAGS
from deeplearning.benchpress.util import cache
def AssertConfigIsValid(config: reinforcement_learning_pb2.RLModel) -> reinforcement_learning_pb2.RLModel:
"""
Check validity of RL Model config.
"""
## Just check if language_model exists, later the language_models class will check the pbtxt.
pbutil.AssertFieldIsSet(config, "language_model")
## Now check the specialized agent attributes.
pbutil.AssertFieldIsSet(config, "target_features")
pbutil.AssertFieldIsSet(config, "agent")
## Parse FeatureTokenizer fields.
pbutil.AssertFieldIsSet(config.agent, "feature_tokenizer")
pbutil.AssertFieldIsSet(config.agent, "batch_size")
pbutil.AssertFieldIsSet(config.agent, "action_temperature_micros")
pbutil.AssertFieldIsSet(config.agent, "token_temperature_micros")
pbutil.AssertFieldIsSet(config.agent, "num_epochs")
pbutil.AssertFieldIsSet(config.agent, "num_episodes")
pbutil.AssertFieldIsSet(config.agent, "steps_per_episode")
pbutil.AssertFieldIsSet(config.agent, "num_updates")
pbutil.AssertFieldIsSet(config.agent, "gamma")
pbutil.AssertFieldIsSet(config.agent, "lam")
pbutil.AssertFieldIsSet(config.agent, "epsilon")
pbutil.AssertFieldIsSet(config.agent, "learning_rate_micros")
pbutil.AssertFieldIsSet(config.agent, "value_loss_coefficient")
pbutil.AssertFieldIsSet(config.agent, "entropy_coefficient")
pbutil.AssertFieldIsSet(config.agent.feature_tokenizer, "feature_max_value_token")
pbutil.AssertFieldIsSet(config.agent.feature_tokenizer, "feature_singular_token_thr")
pbutil.AssertFieldIsSet(config.agent.feature_tokenizer, "feature_token_range")
pbutil.AssertFieldIsSet(config.agent.feature_tokenizer, "feature_sequence_length")
return config
class RLModel(object):
"""
Manager class of Reinforcement Learning pipeline for benchmark generation.
"""
@property
def tokenizer(self) -> tokenizers.TokenizerBase:
return self.language_model.tokenizer
@property
def corpus(self) -> corpuses.Corpus:
return self.language_model.corpus
@property
def pre_train_corpus(self) -> corpuses.Corpus:
return self.language_model.pre_train_corpus
@staticmethod
def _ComputeHash(language_model: language_models.Model, config: reinforcement_learning_pb2.RLModel) -> str:
"""
Compute unique hash of model specifications.
"""
lm_hash = language_model.hash
config_to_hash = reinforcement_learning_pb2.RLModel()
config_to_hash.CopyFrom(config)
config_to_hash.ClearField("language_model")
return crypto.sha1_list([lm_hash, config_to_hash.SerializeToString()])
def __init__(self, config: reinforcement_learning_pb2.RLModel):
"""
A Reinforcement Learning model, wrapping a Language Model backend.
"""
# Error early, so that a cache isn't created.
if not isinstance(config, reinforcement_learning_pb2.RLModel):
t = type(config).__name__
raise TypeError(f"Config must be an RLModel proto. Received: '{t}'")
self.config = reinforcement_learning_pb2.RLModel()
self.config.CopyFrom(AssertConfigIsValid(config))
# Initialize the LM-backend for token sampling.
self.language_model = language_models.Model(self.config.language_model)
self.hash = self._ComputeHash(self.language_model, self.config)
self._created = False
if environment.WORLD_RANK == 0:
self.cache = cache.mkcache("rl_model", self.hash)
self.cache.path.mkdir(exist_ok = True, parents = True)
else:
while not cache.cachepath("rl_model", self.hash).exists():
time.sleep(0.5)
self.cache = cache.mkcache("rl_model", self.hash)
if environment.WORLD_RANK == 0:
# Create the necessary cache directories.
(self.cache.path / "feature_sampler").mkdir(exist_ok = True)
(self.cache.path / "samples").mkdir(exist_ok = True)
# Create symlink to language model.
symlink = self.cache.path / "language_model"
if not symlink.is_symlink():
os.symlink(
os.path.relpath(
pathlib.Path(self.language_model.cache.path),
self.cache.path
),
symlink
)
# Setup META.pbtxt
if self.cache.get("META.pbtxt"):
cached_meta = pbutil.FromFile(
pathlib.Path(self.cache["META.pbtxt"]), internal_pb2.RLModelMeta()
)
# Exclude num_epochs and corpus location from metadata comparison.
config_to_compare = reinforcement_learning_pb2.RLModel()
config_to_compare.CopyFrom(self.config)
config_to_compare.language_model.corpus.ClearField("contentfiles")
if config_to_compare.language_model.HasField("pre_train_corpus"):
config_to_compare.language_model.pre_train_corpus.ClearField("contentfiles")
config_to_compare.language_model.training.ClearField("num_epochs")
config_to_compare.language_model.training.ClearField("num_train_steps")
if config_to_compare.language_model.HasField("pre_train_corpus"):
config_to_compare.language_model.training.ClearField("num_pretrain_steps")
config_to_compare.language_model.training.ClearField("batch_size")
if config_to_compare.language_model.training.HasField("data_generator"):
config_to_compare.language_model.training.data_generator.ClearField("steps_per_epoch")
config_to_compare.language_model.training.data_generator.ClearField("validation_set")
# These fields should have already been cleared, but we'll do it again
# so that metadata comparisons don't fail when the cached meta schema
# is updated.
cached_to_compare = reinforcement_learning_pb2.RLModel()
cached_to_compare.CopyFrom(cached_meta.config)
cached_to_compare.language_model.corpus.ClearField("contentfiles")
if cached_to_compare.language_model.HasField("pre_train_corpus"):
cached_to_compare.language_model.pre_train_corpus.ClearField("contentfiles")
cached_to_compare.language_model.training.ClearField("num_epochs")
cached_to_compare.language_model.training.ClearField("num_train_steps")
if cached_to_compare.language_model.HasField("pre_train_corpus"):
cached_to_compare.language_model.training.ClearField("num_pretrain_steps")
cached_to_compare.language_model.training.ClearField("batch_size")
if cached_to_compare.language_model.training.HasField("data_generator"):
cached_to_compare.language_model.training.data_generator.ClearField("steps_per_epoch")
cached_to_compare.language_model.training.data_generator.ClearField("validation_set")
if cached_to_compare.language_model.training.sequence_length != config_to_compare.language_model.training.sequence_length:
l.logger().warning("Mismatch between pre-trained and current config sequence_length!\
This can only be intended in BERT model!")
cached_to_compare.language_model.training.ClearField("sequence_length")
config_to_compare.language_model.training.ClearField("sequence_length")
if config_to_compare != cached_to_compare:
raise SystemError("Metadata mismatch: {} \n\n {}".format(config_to_compare, cached_to_compare))
self.meta = cached_meta
else:
self.meta = internal_pb2.RLModelMeta()
self.meta.config.CopyFrom(self.config)
self._WriteMetafile()
## Store current commit
commit.saveCommit(self.cache.path)
l.logger().info("Initialized RL Pipeline in {}".format(self.cache.path))
"""
How do you target features during training ?
1) Active learner - downstream task <- Sampler
2) Random feasible vectors (collected from OpenCL corpus ?) <- Sampler ?
3) Got from benchmark suites ? <- Sampler
"""
return
def Create(self, **kwargs) -> bool:
"""
Create the LM and RL environment.
"""
_ = self.language_model.Create()
if self.language_model.pre_train_corpus:
self.language_model.PreTrain(**kwargs)
self.language_model.Train(**kwargs)
self.feature_tokenizer = tokenizers.FeatureTokenizer.FromArgs(
self.config.agent.feature_tokenizer.feature_singular_token_thr,
self.config.agent.feature_tokenizer.feature_max_value_token,
self.config.agent.feature_tokenizer.feature_token_range
)
if self._created:
return False
FLAGS.sample_indices_limit = 1 # Force BERT-LM on one prediction per hole.
self._created = True
self.env = env.Environment(
self.config,
self.language_model.backend.config.architecture.max_position_embeddings,
self.language_model.corpus,
self.tokenizer,
self.feature_tokenizer,
self.cache.path,
)
self.agent = agent.Agent(
self.config, self.language_model, self.tokenizer, self.feature_tokenizer, self.cache.path
)
self.memory = memory.Memory(self.cache.path)
return True
def PreTrain(self, **kwargs) -> 'RLModel':
"""
Pre-train wrapper for Language model.
No-pretraining is supported for RL model.
"""
self.Create(**kwargs)
return self
def Train(self, **kwargs) -> None:
"""
Train the RL-Agent.
"""
self.Create(**kwargs)
## First, train the Language model backend.
num_epochs = self.config.agent.num_epochs
num_episodes = self.config.agent.num_episodes
steps_per_episode = self.config.agent.steps_per_episode
num_updates = self.config.agent.num_updates
gamma = self.config.agent.gamma
lam = self.config.agent.lam
epsilon = self.config.agent.epsilon
lr = self.config.agent.learning_rate_micros / 10e6
value_loss_coeff = self.config.agent.value_loss_coefficient
entropy_coeff = self.config.agent.entropy_coefficient
self.agent.Train(
env = self.env,
num_epochs = num_epochs,
num_episodes = num_episodes,
steps_per_episode = steps_per_episode,
num_updates = num_updates,
gamma = gamma,
lr = lr,
lam = lam,
epsilon = epsilon,
value_loss_coeff = value_loss_coeff,
entropy_coeff = entropy_coeff,
)
return
def Sample(self, sampler: samplers.Sampler) -> None:
"""
Instead of calling Model's sample, this sample will be called, acting as a backend (BERT) wrapper.
"""
raise NotImplementedError("Here you must sample your RL-Model.")
return
def SamplerCache(self, sampler: samplers.Sampler) -> pathlib.Path:
"""Get the path to a sampler cache.
Args:
sampler: A Sampler instance.
Returns:
A path to a directory. Note that this directory may not exist - it is
created only after a call to Sample().
"""
return self.cache.path / "samples" / sampler.hash
def _WriteMetafile(self) -> None:
pbutil.ToFile(self.meta, pathlib.Path(self.cache.keypath("META.pbtxt")))
def saveCheckpoint(self) -> None:
"""
Save current state of RL pipeline.
"""
self.feature_loader.saveCheckpoint()
self.env.saveCheckpoint()
self.agent.saveCheckpoint()
self.memory.saveCheckpoint()
return
def loadCheckpoint(self) -> None:
"""
Load RL pipeline checkpoint.
"""
self.feature_loader.loadCheckpoint()
self.env.loadCheckpoint()
self.agent.loadCheckpoint()
self.memory.loadCheckpoint()
return
| 13,200 | 40.382445 | 130 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/reinforcement_learning/hooks.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Hook class to monitor reinforcement learning agent's learning process.
"""
import json
import numpy as np
import pathlib
from deeplearning.benchpress.util import plotter
from deeplearning.benchpress.util import logging as l
class tensorMonitorHook(object):
def __init__(self,
cache_path : pathlib.Path,
current_step : int,
step_freq : int,
flush_freq : int = None,
average : bool = True,
):
self.cache_path = cache_path
self.current_step = current_step
self.step_freq = step_freq
self.flush_freq = flush_freq
self.average = average
self.jsonfile = cache_path / "training.json"
self.tensors = []
self.plot_tensors = {}
self.epoch_tensors = {}
self.epch_loss = []
self.delay_checkpoint = True if current_step != 0 else False
self._initTensors()
self.monitor_func = [
self._tensor2JSON,
self._tensor2plot,
]
return
@property
def epoch_loss(self):
return sum(self.epch_loss) / len(self.epch_loss)
def step(self, **tensors):
for key, value in tensors.items():
if value is None:
continue
if key in self.epoch_tensors and "num_" not in key and not "val_" in key:
# "num_" means tensor registers accumulated number and won't average.
# Therefore we are just going to take the last registed value.
self.epoch_tensors[key] += value
else:
self.epoch_tensors[key] = value
self.current_step += 1
if self._step_triggered():
self._logTensors()
self.epoch_tensors = {}
return
def end_epoch(self, **tensors):
for key, value in tensors.items():
if value is None:
continue
self.epoch_tensors[key] = value
# if self._step_triggered():
self._logTensors()
self.epoch_tensors = {}
self.epch_loss = []
return
def _initTensors(self):
if self.current_step > 0:
if self.jsonfile.exists():
with open(self.jsonfile, 'r') as js:
loaded_tensors = json.load(js)
if loaded_tensors[-1]['step'] > self.current_step:
# If previous sessions have written beyond current step, overwrite them.
back_index = -2
while loaded_tensors[back_index]['step'] > self.current_step:
back_index -= 1
self.tensors = loaded_tensors[:back_index + 1]
else:
self.tensors = loaded_tensors
for ch in self.tensors:
for k, v in ch.items():
if k == 'step':
continue
if k not in self.plot_tensors:
self.plot_tensors[k] = {'value': [], 'step': []}
self.plot_tensors[k]['value'].append(v)
self.plot_tensors[k]['step'].append(ch['step'])
else:
l.logger().error("Training json log-file not found. Will keep track from this point on.")
return
def _step_triggered(self):
if self.delay_checkpoint:
self.delay_checkpoint = False
return False
if (self.current_step) % self.step_freq == 0 or self.current_step - 1 == 0:
return True
return False
def _logTensors(self):
effective_step = self.current_step if self.current_step - 1 != 0 else 0
if self.average is True:
epoch_tensors = (self.epoch_tensors if effective_step == 0
else {k: (v / self.step_freq if not "num_" in k and not "val_" in k else v) for k, v in self.epoch_tensors.items()})
else:
epoch_tensors = (self.epoch_tensors if effective_step == 0
else {k: v for k, v in self.epoch_tensors.items()})
self.tensors.append(epoch_tensors)
self.tensors[-1]['step'] = effective_step
if 'total_loss' in epoch_tensors:
self.epch_loss.append(epoch_tensors['total_loss'])
for key, value in epoch_tensors.items():
if key == 'step':
continue
if key not in self.plot_tensors:
self.plot_tensors[key] = {'value': [], 'step': []}
self.plot_tensors[key]['value'].append(value)
self.plot_tensors[key]['step'].append(effective_step)
for func in self.monitor_func:
func()
return
def _tensor2JSON(self):
with open(self.jsonfile, 'w') as js:
json.dump(self.tensors, js, indent = 2, sort_keys = True)
return
def _tensor2plot(self):
for (key, value) in self.plot_tensors.items():
if key != "step":
plotter.SingleScatterLine(
x = value['step'],
y = value['value'],
title = key,
x_name = "Training Step",
y_name = key,
plot_name = key,
path = self.cache_path,
)
return
| 5,385 | 31.642424 | 137 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/reinforcement_learning/memory.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Memory replay buffer for reinforcement learning training.
"""
import pathlib
import typing
import pickle
from deeplearning.benchpress.reinforcement_learning import interactions
from deeplearning.benchpress.util import environment
from deeplearning.benchpress.util import distrib
from deeplearning.benchpress.util import pytorch
torch = pytorch.torch
class Memory(object):
"""
Replay buffer of previous states and actions.
"""
def __init__(self, cache_path: pathlib.Path):
self.cache_path = cache_path / "memory"
if environment.WORLD_RANK == 0:
self.cache_path.mkdir(exist_ok = True, parents = True)
self.action_buffer = []
self.state_buffer = []
self.reward_buffer = []
self.done_buffer = []
self.info_buffer = []
self.loadCheckpoint()
return
def add(self,
action : interactions.Action,
state : interactions.State,
reward : interactions.Reward,
done : bool,
info : str,
) -> None:
"""Add single step to memory buffers."""
self.action_buffer.append(action)
self.state_buffer.append(state)
self.reward_buffer.append(reward)
self.done_buffer.append(done)
self.info_buffer.append(info)
return
def sample(self) -> typing.Dict[str, torch.Tensor]:
"""
Sample memories to update the RL agent.
"""
return
def loadCheckpoint(self) -> None:
"""Fetch memory's latest state."""
if (self.cache_path / "memory.pkl").exists():
distrib.lock()
with open(self.cache_path / "memory.pkl", 'rb') as inf:
checkpoint = pickle.load(inf)
distrib.unlock()
self.action_buffer = checkpoint['action_buffer']
self.action_buffer = checkpoint['state_buffer']
self.action_buffer = checkpoint['reward_buffer']
return
def saveCheckpoint(self) -> None:
"""Save Checkpoint state."""
if environment.WORLD_RANK == 0:
checkpoint = {
'action_buffer' : self.action_buffer,
'reward_buffer' : self.reward_buffer,
'state_buffer' : self.state_buffer,
}
with open(self.cache_path / "memory.pkl", 'wb') as outf:
pickle.dump(checkpoint, outf)
distrib.barrier()
return
| 2,825 | 29.387097 | 74 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/reinforcement_learning/data_generator.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Memory replay buffer for reinforcement learning training.
"""
import typing
import numpy as np
from deeplearning.benchpress.reinforcement_learning import interactions
from deeplearning.benchpress.corpuses import tokenizers
from deeplearning.benchpress.corpuses import corpuses
from deeplearning.benchpress.features import extractor
from deeplearning.benchpress.proto import reinforcement_learning_pb2
from deeplearning.benchpress.util import pytorch
torch = pytorch.torch
def from_config(config : reinforcement_learning_pb2.RLModel,
feature_tokenizer : tokenizers.FeatureTokenizer,
corpus : corpuses.Corpus,
) -> torch.utils.data.Dataset:
"""
Return the right torch dataloader based on configuration.
"""
if config.HasField("train_set"):
return CorpusFeatureLoader(config, corpus, feature_tokenizer)
elif config.HasField("random"):
return RandomFeatureLoader(config, feature_tokenizer)
return
def StateToActionTensor(state : interactions.State,
padToken : int,
feat_padToken : int,
batch_size : int,
) -> typing.Dict[str, torch.Tensor]:
"""
Pre-process state to tensor inputs for Action Deep QValues.
"""
seq_len = len(state.encoded_code)
feat_seq_len = len(state.encoded_features)
src_ids = torch.LongTensor(state.encoded_code).unsqueeze(0).repeat(batch_size, 1)
src_mask = src_ids != padToken
src_pos = torch.arange(seq_len, dtype = torch.int64).unsqueeze(0).repeat(batch_size, 1)
feat_ids = torch.LongTensor(state.encoded_features).unsqueeze(0).repeat(batch_size, 1)
feat_mask = feat_ids != feat_padToken
feat_pos = torch.arange(feat_seq_len, dtype = torch.int64).unsqueeze(0).repeat(batch_size, 1)
return {
'encoder_feature_ids' : feat_ids,
'encoder_feature_mask' : feat_mask,
'encoder_position_ids' : feat_pos,
'decoder_input_ids' : src_ids,
'decoder_input_mask' : src_mask,
'decoder_position_ids' : src_pos,
}
def StateToTokenTensor(state : interactions.State,
mask_idx : int,
maskToken : int,
padToken : int,
feat_padToken : int,
batch_size : int,
replace_token : bool = False,
) -> typing.Dict[str, torch.Tensor]:
"""
Pre-process state to
"""
seq_len = len(state.encoded_code)
feat_seq_len = len(state.encoded_features)
if replace_token:
masked_code = state.encoded_code
masked_code[mask_idx] = maskToken
else:
masked_code = np.concatenate((state.encoded_code[:mask_idx+1], [maskToken], state.encoded_code[mask_idx+1:]))
masked_code = torch.LongTensor(masked_code[:seq_len]).unsqueeze(0).repeat(batch_size, 1)
enc_features = torch.LongTensor(state.encoded_features).unsqueeze(0).repeat(batch_size, 1)
return {
'encoder_feature_ids' : enc_features,
'encoder_feature_mask' : enc_features != feat_padToken,
'encoder_position_ids' : torch.arange(feat_seq_len, dtype = torch.int64).unsqueeze(0).repeat(batch_size, 1),
'decoder_input_ids' : masked_code,
'decoder_input_mask' : masked_code != padToken,
'decoder_position_ids' : torch.arange(seq_len, dtype = torch.int64).unsqueeze(0).repeat(batch_size, 1),
}
class CorpusFeatureLoader(torch.utils.data.Dataset):
"""
Dataloading from language model's training corpus.
"""
def __init__(self,
config: reinforcement_learning_pb2.RLModel,
corpus: corpuses.Corpus,
feature_tokenizer: tokenizers.FeatureTokenizer
):
self.config = config
self.data = corpus.GetTrainingFeatures()
self.feature_tokenizer = feature_tokenizer
self.setup_dataset()
return
def __len__(self) -> int:
return len(self.dataset)
def __getitem__(self, idx: int) -> typing.Dict[str, torch.Tensor]:
return
def setup_dataset(self) -> typing.List[typing.Dict[str, torch.Tensor]]:
"""Process raw feature vectors to processed dataset."""
self.dataset = []
for dp in self.data:
for k, v in dp.items():
if v:
fvec = self.feature_tokenizer.TokenizeFeatureVector(v, k, self.config.agent.action_qv.feature_sequence_length)
self.dataset.append(
{
'input_features': torch.LongTensor(fvec),
'input_features_key_padding_mask': torch.LongTensor(fvec != self.feature_tokenizer.padToken),
}
)
return
class RandomFeatureLoader(torch.utils.data.Dataset):
"""
Torch-based dataloading class for target feature vectors.
"""
def __init__(self,
config : reinforcement_learning_pb2.RLModel,
feature_tokenizer : tokenizers.FeatureTokenizer,
):
self.config = config
self.feature_tokenizer = feature_tokenizer
return
def __len__(self) -> int:
return len(self.dataset)
def __getitem__(self, idx: int) -> typing.Dict[str, torch.Tensor]:
return
| 5,794 | 36.62987 | 120 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/reinforcement_learning/model.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Modeling for reinforcement learning program synthesis.
"""
import pathlib
import typing
import math
from deeplearning.benchpress.reinforcement_learning import interactions
from deeplearning.benchpress.reinforcement_learning import data_generator
from deeplearning.benchpress.reinforcement_learning import config
from deeplearning.benchpress.models.torch_bert import model
from deeplearning.benchpress.models import language_models
from deeplearning.benchpress.corpuses import tokenizers
from deeplearning.benchpress.util import environment
from deeplearning.benchpress.util import pytorch
from deeplearning.benchpress.util import logging as l
torch = pytorch.torch
class PredictionHeadTransform(torch.nn.Module):
def __init__(self,
config : config.QValuesConfig,
dense_size : int
):
super().__init__()
self.dense = torch.nn.Linear(dense_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = model.ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = torch.nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class ActionHead(torch.nn.Module):
"""Classification head for action prediction."""
def __init__(self, config, output_dim: int = None):
super().__init__()
if output_dim is None:
output_dim = len(interactions.ACTION_TYPE_SPACE) * config.max_position_embeddings
self.transform = PredictionHeadTransform(config, dense_size = config.hidden_size)
self.decoder = torch.nn.Linear(config.hidden_size * config.max_position_embeddings, output_dim, bias = False)
self.bias = torch.nn.Parameter(torch.zeros(output_dim))
self.decoder.bias = self.bias
return
def forward(self, decoder_out: torch.FloatTensor) -> torch.FloatTensor:
transformed = self.transform(decoder_out)
flat = transformed.reshape((transformed.shape[0], -1))
action_logits = self.decoder(flat)
return action_logits
class TokenHead(torch.nn.Module):
"""Classification head for token prediction."""
def __init__(self, config, output_dim: int):
super().__init__()
self.transform = PredictionHeadTransform(config, dense_size = config.hidden_size)
self.decoder = torch.nn.Linear(config.hidden_size, output_dim, bias = False)
self.bias = torch.nn.Parameter(torch.zeros(output_dim))
self.decoder.bias = self.bias
return
def forward(self, decoder_out: torch.FloatTensor) -> torch.FloatTensor:
hidden_states = self.transform(decoder_out)
token_logits = self.decoder(hidden_states)
return token_logits
class ActionQV(torch.nn.Module):
"""Deep Q-Values for Action type prediction."""
def __init__(self,
language_model : language_models.Model,
config : config.QValuesConfig,
is_critic : bool = False
):
super().__init__()
## Pre-trained Encoder LM.
self.feature_encoder = language_model.backend.GetEncoderModule(
vocab_size = config.feature_vocab_size,
hidden_size = config.hidden_size,
num_hidden_layers = config.num_hidden_layers,
num_attention_heads = config.num_attention_heads,
intermediate_size = config.intermediate_size,
hidden_act = config.hidden_act,
hidden_dropout_prob = config.hidden_dropout_prob,
attention_probs_dropout_prob = config.attention_probs_dropout_prob,
max_position_embeddings = config.feature_sequence_length,
type_vocab_size = config.type_vocab_size,
initializer_range = config.initializer_range,
layer_norm_eps = config.layer_norm_eps,
pad_token_id = config.feature_pad_idx,
with_checkpoint = False,
)
## Decoder for token prediction, given features and source code encoded memory.
self.source_decoder = language_model.backend.GetDecoderModule(
with_checkpoint = True,
without_label_head = True,
)
output_dim = None
if is_critic:
output_dim = 1
self.action_head = ActionHead(config, output_dim = output_dim)
self.softmax = torch.nn.Softmax(dim = -1)
return
def forward(self,
encoder_feature_ids : torch.LongTensor,
encoder_feature_mask : torch.LongTensor,
encoder_position_ids : torch.LongTensor,
decoder_input_ids : torch.LongTensor,
decoder_input_mask : torch.LongTensor,
decoder_position_ids : torch.LongTensor,
# actor_action_logits : torch.LongTensor = None,
) -> typing.Dict[str, torch.Tensor]:
"""Action type forward function."""
## Run BERT-Encoder in target feature vector.
encoder_out = self.feature_encoder(
input_ids = encoder_feature_ids,
input_mask = encoder_feature_mask,
position_ids = encoder_position_ids,
input_features = None,
)
encoder_memory = encoder_out['hidden_states']
## Run source code over pre-trained BERT decoder.
decoder_out = self.source_decoder(
input_ids = decoder_input_ids,
input_mask = decoder_input_mask,
position_ids = decoder_position_ids,
encoder_hidden_states = encoder_memory,
input_features = None,
)
decoded_source = decoder_out['hidden_states']
## Predict action type logits.
action_logits = self.action_head(decoded_source)
action_probs = self.softmax(action_logits)
return {
'action_logits' : action_logits,
'action_probs' : action_probs,
}
class ActionLanguageModelQV(torch.nn.Module):
"""Deep Q-Values for Token type prediction."""
def __init__(self,
language_model : language_models.Model,
config : config.QValuesConfig,
is_critic : bool = False,
):
super(ActionLanguageModelQV, self).__init__()
## Feature-Encoder.
self.encoder = language_model.backend.GetEncoderModule(
vocab_size = config.feature_vocab_size,
hidden_size = config.hidden_size,
num_hidden_layers = config.num_hidden_layers,
num_attention_heads = config.num_attention_heads,
intermediate_size = config.intermediate_size,
hidden_act = config.hidden_act,
hidden_dropout_prob = config.hidden_dropout_prob,
attention_probs_dropout_prob = config.attention_probs_dropout_prob,
max_position_embeddings = config.feature_sequence_length,
type_vocab_size = config.type_vocab_size,
initializer_range = config.initializer_range,
layer_norm_eps = config.layer_norm_eps,
pad_token_id = config.feature_pad_idx,
with_checkpoint = False,
)
## Decoder for token prediction, given features memory and source code.
if is_critic:
output_dim = 1
self.language_model = language_model.backend.GetDecoderModule(
with_checkpoint = True,
without_label_head = True,
)
self.decoder = TokenHead(config, output_dim)
else:
output_dim = config.vocab_size
self.language_model = language_model.backend.GetDecoderModule(
with_checkpoint = True,
)
self.softmax = torch.nn.Softmax(dim = -1)
self.is_critic = is_critic
return
def forward(self,
encoder_feature_ids : torch.LongTensor,
encoder_feature_mask : torch.LongTensor,
encoder_position_ids : torch.LongTensor,
decoder_input_ids : torch.LongTensor,
decoder_input_mask : torch.LongTensor,
decoder_position_ids : torch.LongTensor,
encoder_input_features = None,
):
encoder_out = self.encoder(
input_ids = encoder_feature_ids,
input_mask = encoder_feature_mask,
position_ids = encoder_position_ids,
input_features = encoder_input_features,
)
encoder_memory = encoder_out['hidden_states']
decoder_out = self.language_model(
input_ids = decoder_input_ids,
input_mask = decoder_input_mask,
position_ids = decoder_position_ids,
encoder_hidden_states = encoder_memory,
)
if self.is_critic:
decoded_source = decoder_out['hidden_states']
token_logits = self.decoder(decoded_source)
else:
token_logits = decoder_out['prediction_logits']
token_probs = self.softmax(token_logits)
return {
'token_logits' : token_logits,
'token_probs' : token_probs,
}
class QValuesModel(object):
"""
Handler of Deep-QNMs for program synthesis.
"""
@property
def action_parameters(self) -> torch.Tensor:
"""
Return all gradient parameters for model involved in action decision.
"""
if self.model:
if isinstance(self.model.action, torch.nn.DataParallel):
module = self.model.action.module
else:
module = self.model.action
return (
[x for x in module.feature_encoder.parameters()] +
[x for x in module.source_decoder.parameters()] +
[x for x in module.action_head.parameters()]
)
else:
return None
@property
def index_parameters(self) -> torch.Tensor:
"""
Return all gradient parameters for model involved in action decision.
"""
if self.model:
if isinstance(self.model.action, torch.nn.DataParallel):
module = self.model.action.module
else:
module = self.model.action
return (
[x for x in module.feature_encoder.parameters()] +
[x for x in module.source_decoder.parameters()] +
[x for x in module.index_head.parameters()]
)
else:
return None
@property
def token_parameters(self) -> torch.Tensor:
"""
Return all gradient parameters for model involved in action decision.
"""
if self.model:
if isinstance(self.model.token, torch.nn.DataParallel):
module = self.model.token.module
else:
module = self.model.token
return (
[x for x in module.encoder.parameters()] +
[x for x in module.language_model.parameters()]
)
else:
return None
| 11,311 | 38.141869 | 115 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/reinforcement_learning/interactions.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A module containing all possible interactions between
the environment and an agent.
"""
import typing
import numpy as np
ACTION_TYPE_SPACE = {
'ADD' : 0,
'REM' : 1,
'COMP' : 2,
'REPLACE' : 3,
}
ACTION_TYPE_MAP = {
v: k for k, v in ACTION_TYPE_SPACE.items()
}
class Action(typing.NamedTuple):
"""
Agent action representation.
"""
action : int # Selected action
index : int # At selected index
indexed_action : int # Action/Index perplexed id in action head's output.
action_logits : np.array # ACTION_SPACE * SEQ_LEN logits array.
action_probs : np.array # ACTION_SPACE * SEQ_LEN probability array.
token : int # Your policy function picks the best token.
token_logits : np.array # Distribution logits over possible tokens.
token_probs : np.array # Distribution probs over possible tokens.
comment : str # Add action description.
class State(typing.NamedTuple):
"""
Environment's state representation.
"""
target_features : typing.Dict[str, float]
feature_space : str
encoded_features : np.array
code : str
encoded_code : np.array
comment : str
class Reward(typing.NamedTuple):
"""
Reward provided to agent as feedback.
"""
action : Action
value : float
distance : float
comment : str
class Memory(typing.NamedTuple):
"""
A memory representation used for agent training.
"""
state : State # Input state for memory.
action : Action # Action taken by agent.
reward : Reward # Isolated reward of that action.
rtg : float # Reward-to-go from trajectory.
length : int # Current index within the trajectory.
| 2,333 | 29.710526 | 82 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/reinforcement_learning/config.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modeling configuration for Deep_Q Network"""
from deeplearning.benchpress.corpuses import tokenizers
from deeplearning.benchpress.models import language_models
from deeplearning.benchpress.proto import reinforcement_learning_pb2
class QValuesConfig(object):
@classmethod
def from_config(cls,
config : reinforcement_learning_pb2.RLModel,
max_position_embeddings : int,
tokenizer : tokenizers.TokenizerBase,
feature_tokenizer : tokenizers.FeatureTokenizer,
language_model : language_models.Model,
) -> 'QValuesConfig':
dict = {
'vocab_size' : tokenizer.vocab_size,
'feature_vocab_size' : feature_tokenizer.vocab_size,
'feature_pad_idx' : feature_tokenizer.padToken,
'pad_token_id' : tokenizer.padToken,
'max_position_embeddings' : max_position_embeddings,
'feature_sequence_length' : config.agent.feature_tokenizer.feature_sequence_length,
'hidden_dropout_prob' : language_model.backend.bert_config.hidden_dropout_prob,
'feature_dropout_prob' : language_model.backend.bert_config.hidden_dropout_prob,
'hidden_size' : language_model.backend.bert_config.hidden_size,
'feature_embedding_size' : language_model.backend.bert_config.hidden_size,
'num_attention_heads' : language_model.backend.bert_config.num_attention_heads,
'intermediate_size' : language_model.backend.bert_config.intermediate_size,
'num_hidden_layers' : language_model.backend.bert_config.num_hidden_layers,
'layer_norm_eps' : language_model.backend.bert_config.layer_norm_eps,
'hidden_act' : language_model.backend.bert_config.hidden_act,
'attention_probs_dropout_prob' : language_model.backend.bert_config.attention_probs_dropout_prob,
'type_vocab_size' : language_model.backend.bert_config.type_vocab_size,
'initializer_range' : language_model.backend.bert_config.initializer_range,
'action_temperature' : config.agent.action_temperature_micros / 10e6,
'token_temperature' : config.agent.token_temperature_micros / 10e6,
'feature_encoder' : False,
'batch_size' : config.agent.batch_size
}
return QValuesConfig(**dict)
def __init__(self, **attrs):
self.__dict__.update(attrs)
return
| 3,206 | 54.293103 | 103 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/reinforcement_learning/agent.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Agents module for reinforcement learning.
"""
from cmath import inf
from code import interact
import pathlib
import typing
import tqdm
import numpy as np
from deeplearning.benchpress.reinforcement_learning import interactions
from deeplearning.benchpress.reinforcement_learning import model
from deeplearning.benchpress.reinforcement_learning import env
from deeplearning.benchpress.reinforcement_learning import hooks
from deeplearning.benchpress.reinforcement_learning.config import QValuesConfig
from deeplearning.benchpress.models import language_models
from deeplearning.benchpress.proto import reinforcement_learning_pb2
from deeplearning.benchpress.corpuses import tokenizers
from deeplearning.benchpress.util import pytorch
from deeplearning.benchpress.util import distrib
from deeplearning.benchpress.util import environment
from deeplearning.benchpress.util import logging as l
from absl import flags
FLAGS = flags.FLAGS
torch = pytorch.torch
class Policy(object):
"""
The policy selected over Q-Values
"""
def __init__(self, action_temp: float, token_temp: float):
self.action_temperature = action_temp
self.token_temperature = token_temp
return
def SampleActions(self,
action_logits : torch.FloatTensor,
actual_lengths : typing.Tuple[torch.LongTensor, torch.LongTensor],
) -> typing.Tuple[int, int]:
"""
Get the Q-Values for action and apply policy on it.
"""
actions = torch.zeros((action_logits.shape[0]), dtype = torch.long)
batch_idxs, seq_idxs = actual_lengths
for bidx, sidx, seq_logits in zip(batch_idxs, seq_idxs, action_logits):
try:
ct = torch.distributions.relaxed_categorical.RelaxedOneHotCategorical(
temperature = self.action_temperature if self.action_temperature is not None else 1.0,
logits = seq_logits[:(sidx * len(interactions.ACTION_TYPE_SPACE))],
validate_args = False if "1.9." in torch.__version__ else None,
).sample()
action = torch.argmax(ct, dim = -1)
actions[bidx] = action
except Exception as e:
l.logger().error(seq_logits[:(sidx * len(interactions.ACTION_TYPE_SPACE))])
raise e
return actions
def SampleTokens(self, token_logits: torch.FloatTensor) -> int:
"""
Get logit predictions for token and apply policy on it.
"""
ct = torch.distributions.relaxed_categorical.RelaxedOneHotCategorical(
temperature = self.token_temperature if self.token_temperature is not None else 1.0,
logits = token_logits,
validate_args = False if "1.9." in torch.__version__ else None,
).sample()
tokens = torch.argmax(ct, dim = -1)
return tokens
class Agent(object):
"""
Benchmark generation RL-Agent.
"""
def __init__(self,
config : reinforcement_learning_pb2.RLModel,
language_model : language_models.Model,
tokenizer : tokenizers.TokenizerBase,
feature_tokenizer : tokenizers.FeatureTokenizer,
cache_path : pathlib.Path
):
self.cache_path = cache_path / "agent"
self.ckpt_path = self.cache_path / "checkpoint"
self.log_path = self.cache_path / "logs"
if environment.WORLD_RANK == 0:
self.cache_path.mkdir(exist_ok = True, parents = True)
self.ckpt_path.mkdir(exist_ok = True, parents = True)
self.log_path.mkdir(exist_ok = True, parents = True)
self.config = config
self.language_model = language_model
self.tokenizer = tokenizer
self.feature_tokenizer = feature_tokenizer
self.qv_config = QValuesConfig.from_config(
self.config,
self.language_model.backend.config.architecture.max_position_embeddings,
self.tokenizer,
self.feature_tokenizer,
self.language_model,
)
self.policy = Policy(
action_temp = self.qv_config.action_temperature,
token_temp = self.qv_config.token_temperature,
)
return
def _ConfigModelParams(self, learning_rate: float) -> None:
"""
Initialize torch models and send them to device.
"""
self.action_actor = model.ActionQV(self.language_model, self.qv_config).to(pytorch.device)
self.action_critic = model.ActionQV(self.language_model, self.qv_config, is_critic = True).to(pytorch.device)
self.token_actor = model.ActionLanguageModelQV(self.language_model, self.qv_config).to(pytorch.device)
self.token_critic = model.ActionLanguageModelQV(self.language_model, self.qv_config, is_critic = True).to(pytorch.device)
if pytorch.num_nodes > 1:
self.action_actor = torch.nn.DistributedDataParallel(
self.action_actor,
device_ids = [pytorch.offset_device],
output_device = pytorch.offset_device,
find_unused_parameters = True,
)
self.action_critic = torch.nn.DistributedDataParallel(
self.action_critic,
device_ids = [pytorch.offset_device],
output_device = pytorch.offset_device,
find_unused_parameters = True,
)
self.token_actor = torch.nn.DistributedDataParallel(
self.token_actor,
device_ids = [pytorch.offset_device],
output_device = pytorch.offset_device,
find_unused_parameters = True,
)
self.token_critic = torch.nn.DistributedDataParallel(
self.token_critic,
device_ids = [pytorch.offset_device],
output_device = pytorch.offset_device,
find_unused_parameters = True,
)
elif pytorch.num_gpus > 1:
self.action_actor = torch.nn.DataParallel(self.action_actor)
self.action_critic = torch.nn.DataParallel(self.action_critic)
self.token_actor = torch.nn.DataParallel(self.token_actor)
self.token_critic = torch.nn.DataParallel(self.token_critic)
self.action_optim = torch.optim.Adam(
list(self.action_actor.parameters()) + list(self.action_critic.parameters()),
lr = learning_rate
)
self.token_optim = torch.optim.Adam(
list(self.token_actor.parameters()) + list(self.token_critic.parameters()),
lr = learning_rate
)
return
def Train(self,
env : env.Environment,
num_epochs : int,
num_episodes : int, # Equivalent to batch size
steps_per_episode : int, # Depth length of single trajectory.
num_updates : int,
gamma : float,
lr : float,
lam : float,
epsilon : float,
value_loss_coeff : float,
entropy_coeff : float,
) -> None:
"""
Run PPO over policy and train the agent.
"""
self._ConfigModelParams(learning_rate = lr)
self.ckpt_step = max(0, self.loadCheckpoint())
########### DOES LM WORK ALONE ?
code = "[START][HOLE]kernel[END]"
encoded = list(self.tokenizer.TokenizeString(code))
encoded = encoded + [self.tokenizer.padToken] * (self.language_model.backend.config.architecture.max_position_embeddings - len(encoded))
inputs = {
'input_ids' : torch.LongTensor(encoded).unsqueeze(0).to(pytorch.device),
'input_mask' : (torch.LongTensor(encoded) != self.tokenizer.padToken).unsqueeze(0).to(pytorch.device),
'position_ids' : torch.arange(self.language_model.backend.config.architecture.max_position_embeddings).unsqueeze(0).to(pytorch.device),
'mask_labels' : None,
'input_features': None,
}
out = self.language_model.backend.model_step(
self.language_model.backend.GetEncoderModule(with_checkpoint = True, without_label_head = False).to(pytorch.device),
inputs,
)
preds = torch.argmax(out['prediction_logits'], dim = -1)
l.logger().info(self.tokenizer.tokensToString([int(x) for x in preds.squeeze(0)[:10].cpu()]))
########### DOES LM WORK ALONE ?
if self.is_world_process_zero():
rollout_hook = hooks.tensorMonitorHook(
self.log_path,
self.ckpt_step,
1, 1,
average = False,
)
# train_hook = hooks.tensorMonitorHook(
# self.logfile_path,
# self.current_step,
# min(self.steps_per_epoch, FLAGS.monitor_frequency)
# )
action_type_distrib = {
k: ([], []) for k in interactions.ACTION_TYPE_SPACE.keys()
}
index_type_distrib = {
k: ([], []) for k in range(self.qv_config.max_position_embeddings)
}
for ep in range(num_epochs):
# Run a batch of episodes.
input_ids, final_state, masked_input_ids, feature_ids,\
action_values, action_predictions, action_policy_probs,\
token_values, token_predictions, token_policy_probs,\
use_lm, rewards, discounted_rewards, done = self.rollout(
env, num_episodes, steps_per_episode, gamma,
)
action_advantages, token_advantages = self.gae(
rewards,
action_values,
token_values,
use_lm,
done,
gamma,
lam
)
# Compute reward-to-gos.
action_reward_to_go = action_advantages + action_values.squeeze(-1)
token_reward_to_go = token_advantages + token_values.squeeze(-1)
# Nornmalize advantages.
action_advantages = (action_advantages - action_advantages.mean()) / (action_advantages.std() + 1e-5)
token_advantages = (token_advantages - token_advantages.mean()) / (token_advantages.std() + 1e-5)
# Set the batch size.
batch_size = int(input_ids.shape[0])
num_batches = int(input_ids.shape[1])
# Reshape to 2 dimensions.
action_advantages = torch.reshape(action_advantages, (-1, ) + action_advantages.shape[2:])
token_advantages = torch.reshape(token_advantages, (-1, ) + token_advantages.shape[2:])
action_reward_to_go = torch.reshape(action_reward_to_go, (-1, ) + action_reward_to_go.shape[2:])
token_reward_to_go = torch.reshape(token_reward_to_go, (-1, ) + token_reward_to_go.shape[2:])
action_values = torch.reshape(action_values, (-1, ) + action_values.shape[2:])
token_values = torch.reshape(token_values, (-1, ) + token_values.shape[2:])
action_predictions = torch.reshape(action_predictions, (-1, ) + action_predictions.shape[2:])
token_predictions = torch.reshape(token_predictions, (-1, ) + token_predictions.shape[2:])
use_lm = torch.reshape(use_lm, (-1, ) + use_lm.shape[2:])
input_ids = torch.reshape(input_ids, (-1, ) + input_ids.shape[2:])
masked_input_ids = torch.reshape(masked_input_ids, (-1, ) + masked_input_ids.shape[2:])
feature_ids = torch.reshape(feature_ids, (-1, ) + feature_ids.shape[2:])
action_policy_probs = torch.reshape(action_policy_probs, (-1, ) + action_policy_probs.shape[2:])
token_policy_probs = torch.reshape(token_policy_probs, (-1, ) + token_policy_probs.shape[2:])
if environment.WORLD_SIZE > 1:
raise NotImplementedError("Gather all the tensors here ?")
for k in action_type_distrib.keys():
action_type_distrib[k][0].append(ep)
action_type_distrib[k][1].append(0)
for k in index_type_distrib.keys():
index_type_distrib[k][0].append(ep)
index_type_distrib[k][1].append(0)
for act in action_predictions:
act_type = int(act) % len(interactions.ACTION_TYPE_SPACE)
act_index = int(act) // len(interactions.ACTION_TYPE_SPACE)
try:
action_type_distrib[interactions.ACTION_TYPE_MAP[act_type]][1][ep] += 1
index_type_distrib[act_index][1][ep] += 1
except IndexError as e:
l.logger().error(act_type)
l.logger().error(act_index)
l.logger().info(act)
l.logger().warn(action_type_distrib)
l.logger().info(index_type_distrib)
raise e
from deeplearning.benchpress.util import plotter as plt
plt.GrouppedBars(
groups = action_type_distrib,
plot_name = "Acts_per_rollout_step",
path = self.log_path,
)
plt.GrouppedBars(
groups = index_type_distrib,
plot_name = "pos_index_per_rollout_step",
path = self.log_path,
)
## Print the full trajectory with the best reward.
best_full_traj = torch.argmax(discounted_rewards[:,-1], dim = -1)
l.logger().info("Best full-trajectory sample:")
print(self.tokenizer.tokensToString([int(x) for x in final_state[int(best_full_traj)]], ignore_token=self.tokenizer.padToken))
# Split the data into batches in the num_workers dimension
for epoch in tqdm.tqdm(range(num_updates), total = num_updates, desc = "Epoch"):
for batch in tqdm.tqdm(range(num_batches), total = num_batches, desc = "Batch", leave = False):
start = batch * batch_size
end = (batch + 1) * batch_size
# Step batch
mean_action_loss, mean_token_loss = self.ppo_train_step(
epsilon,
value_loss_coeff,
entropy_coeff,
action_advantages [start:end].to(pytorch.device),
token_advantages [start:end].to(pytorch.device),
action_reward_to_go [start:end].to(pytorch.device),
token_reward_to_go [start:end].to(pytorch.device),
action_values [start:end].to(pytorch.device),
token_values [start:end].to(pytorch.device),
action_predictions [start:end],
token_predictions [start:end],
use_lm [start:end],
input_ids [start:end],
masked_input_ids [start:end],
feature_ids [start:end],
action_policy_probs [start:end].to(pytorch.device),
token_policy_probs [start:end].to(pytorch.device),
)
# Probably here save the necessary checkpoints.
# Also log the following stuff:
# Rewards, advantages (?), size of code ?, rtg ? Distribution of actions selected ?
# self.saveCheckpoint()
if self.is_world_process_zero():
rollout_hook.step(
mean_action_loss = float(mean_action_loss),
mean_token_loss = float(mean_token_loss),
mean_final_reward = float(torch.mean(discounted_rewards[:,-1])),
)
self.ckpt_step += 1
## distribution of actions per
return
def ppo_train_step(self,
epsilon : float,
value_loss_coeff : float,
entropy_coeff : float,
action_advantages : torch.FloatTensor,
token_advantages : torch.FloatTensor,
action_reward_to_go : torch.FloatTensor,
token_reward_to_go : torch.FloatTensor,
action_values : torch.FloatTensor,
token_values : torch.FloatTensor,
action_predictions : torch.LongTensor,
token_predictions : torch.LongTensor,
use_lm : torch.BoolTensor,
input_ids : torch.LongTensor,
masked_input_ids : torch.LongTensor,
feature_ids : torch.LongTensor,
action_policy_probs : torch.FloatTensor,
token_policy_probs : torch.FloatTensor,
) -> typing.Tuple[float, float]:
"""
Run a batch through PPO training.
Inputs:
action_optim:
Adam optimizer that handles action actor and critic.
token_optim:
Adam optimizer that handles token actor and critic.
action_advantages:
Calculated advantages for action model.
token_advantages:
Calculated advantages for token model.
action_reward_to_go:
Aggregated rewards for actions trajectory.
token_reward_to_go:
Aggregated rewards for tokens trajectory.
action_values:
Predicted values by action critic.
token_values:
Predicted values by token critic.
action_predictions:
Predicted action labels by action actor.
token_predictions:
Predicted token labels by token actor.
use_lm:
Indices of states that used the language model.
input_ids:
Input code for the action model.
masked_input_ids:
Masked input code for the token model. Contains masked code where use_lm==True, zeros otherwise.
feature_ids:
Tokenized vector of target state features.
action_policy_probs:
Predicted action label's probability.
token_policy_probs:
Predicted token label's probability.
"""
# Enable training mode for these little fuckers.
self.action_actor.train()
self.action_critic.train()
self.token_actor.train()
self.token_critic.train()
self.action_optim.zero_grad()
self.token_optim.zero_grad()
seq_len, feat_seq_len, batch_size = input_ids.shape[-1], feature_ids.shape[-1], input_ids.shape[0]
mean_action_loss, action_backwards = 0.0, 0
mean_token_loss, token_backwards = 0.0, 0
# Prepare model inputs.
feature_mask = feature_ids != self.feature_tokenizer.padToken
feature_pos = torch.arange(feat_seq_len, dtype = torch.long).repeat(batch_size, 1)
input_mask = feature_ids != self.feature_tokenizer.padToken
input_pos = torch.arange(seq_len, dtype = torch.long).repeat(batch_size, 1)
# Run the batch again in actor/critic.
# Actor model returns logits of action.
action_actor_out = self.action_actor(
encoder_feature_ids = feature_ids.to(pytorch.device),
encoder_feature_mask = feature_mask.to(pytorch.device),
encoder_position_ids = feature_pos.to(pytorch.device),
decoder_input_ids = input_ids.to(pytorch.device),
decoder_input_mask = input_mask.to(pytorch.device),
decoder_position_ids = input_pos.to(pytorch.device),
)
new_action_logits, new_action_probs = action_actor_out['action_logits'], action_actor_out['action_probs']
# Critic model returns value logit.
action_critic_out = self.action_critic(
encoder_feature_ids = feature_ids.to(pytorch.device),
encoder_feature_mask = feature_mask.to(pytorch.device),
encoder_position_ids = feature_pos.to(pytorch.device),
decoder_input_ids = input_ids.to(pytorch.device),
decoder_input_mask = input_mask.to(pytorch.device),
decoder_position_ids = input_pos.to(pytorch.device),
)
new_action_values, new_action_values_probs = action_critic_out['action_logits'], action_critic_out['action_probs']
# Sample the most likely action.
actual_lengths = torch.where(input_ids == self.tokenizer.endToken)
step_actions = self.policy.SampleActions(new_action_logits, actual_lengths)
# Collect the probability of said selected action, per episode.
new_action_probs = new_action_probs[(torch.arange(new_action_probs.shape[0]), step_actions)]
# Compute entropy of actions
new_action_entropy = torch.distributions.categorical.Categorical(logits = new_action_logits).entropy()
# Flatten the critic values.
new_action_values = new_action_values.flatten()
# Compute the PPO loss
action_prob_ratio = torch.exp(new_action_probs) / torch.exp(action_policy_probs)
a = action_prob_ratio * action_advantages
b = torch.clamp(action_prob_ratio, 1 - epsilon, 1 + epsilon) * action_advantages
action_ppo_loss = -1 * torch.mean(torch.min(a, b))
# Compute the value function loss
# Clipped loss - same idea as PPO loss, don't allow value to move too
# far from where it was previously
value_pred_clipped = action_values + (new_action_values - action_values).clamp(-epsilon, epsilon)
value_losses = (new_action_values - action_reward_to_go) ** 2
value_losses_clipped = (value_pred_clipped - action_reward_to_go) ** 2
value_loss = 0.5 * torch.max(value_losses, value_losses_clipped)
action_value_loss = value_loss.mean()
action_entropy_loss = torch.mean(new_action_entropy)
# Compute the final loss and backward.
action_loss = action_ppo_loss + value_loss_coeff * action_value_loss - entropy_coeff * action_entropy_loss
action_loss.backward()
mean_action_loss += action_loss.item()
action_backwards += 1
torch.nn.utils.clip_grad_norm_(self.action_actor.parameters(), .5)
torch.nn.utils.clip_grad_norm_(self.action_critic.parameters(), .5)
self.action_optim.step()
if torch.any(use_lm):
# Get the indices where use_lm is True.
lm_indices = torch.where(use_lm == True)[0]
# Prepare token model inputs.
lm_feature_ids = torch.index_select(feature_ids, 0, lm_indices)
lm_feature_mask = lm_feature_ids != self.feature_tokenizer.padToken
lm_feat_pos_id = torch.arange(feat_seq_len, dtype = torch.long).repeat(lm_feature_ids.shape[0], 1)
lm_input_ids = torch.index_select(masked_input_ids, 0, lm_indices)
lm_input_mask = lm_input_ids != self.tokenizer.padToken
lm_pos_id = torch.arange(seq_len, dtype = torch.long).repeat(lm_input_ids.shape[0], 1)
# Keep track of where [HOLE] reside.
ep_idx, seq_idx = torch.where(lm_input_ids == self.tokenizer.holeToken)
# Run the batch in actor/critic.
# The input indices are based on those the rollout action actor decided to use the LM.
# We directly use masked_input_ids for this reason.
# Actor model returns logits of the token predictions.
token_actor_out = self.token_actor(
encoder_feature_ids = lm_feature_ids.to(pytorch.device),
encoder_feature_mask = lm_feature_mask.to(pytorch.device),
encoder_position_ids = lm_feat_pos_id.to(pytorch.device),
decoder_input_ids = lm_input_ids.to(pytorch.device),
decoder_input_mask = lm_input_mask.to(pytorch.device),
decoder_position_ids = lm_pos_id.to(pytorch.device),
)
t, new_token_probs = token_actor_out['token_logits'], token_actor_out['token_probs']
# Collect the logits but only for the hole indices.
new_token_logits = t[(ep_idx, seq_idx)]
new_token_probs = new_token_probs[(ep_idx, seq_idx)]
# Critic model returns value logit.
token_critic_out = self.token_critic(
encoder_feature_ids = lm_feature_ids.to(pytorch.device),
encoder_feature_mask = lm_feature_mask.to(pytorch.device),
encoder_position_ids = lm_feat_pos_id.to(pytorch.device),
decoder_input_ids = lm_input_ids.to(pytorch.device),
decoder_input_mask = lm_input_mask.to(pytorch.device),
decoder_position_ids = lm_pos_id.to(pytorch.device),
)
new_token_values, new_token_values_probs = token_critic_out['token_logits'], token_critic_out['token_probs']
# Collect the critic's value for this hole index.
new_token_values = new_token_values[(ep_idx, seq_idx)]
new_token_values_probs = new_token_values_probs[(ep_idx, seq_idx)]
# According to policy, select the best token.
new_tokens = self.policy.SampleTokens(new_token_logits)
# Get probability of said token, per sequence.
new_token_probs = new_token_probs[(torch.arange(new_token_probs.shape[0]), new_tokens)]
# Calculate the entropy of new token logits.
new_token_entropy = torch.distributions.categorical.Categorical(logits = new_token_logits).entropy()
# Flatten critic values.
new_token_values = new_token_values.flatten()
# Keep only the advantages and policy probs for the indices where the LM was used.
lm_indices = lm_indices.to(pytorch.device)
token_advantages = torch.index_select(token_advantages, 0, lm_indices)
token_reward_to_go = torch.index_select(token_reward_to_go, 0, lm_indices)
token_policy_probs = torch.index_select(token_policy_probs, 0, lm_indices)
token_values = torch.index_select(token_values, 0, lm_indices)
# Compute the PPO loss
token_prob_ratio = torch.exp(new_token_probs) / torch.exp(token_policy_probs.to(pytorch.device))
a = token_prob_ratio * token_advantages.to(pytorch.device)
b = torch.clamp(token_prob_ratio, 1 - epsilon, 1 + epsilon) * token_advantages
token_ppo_loss = -1 * torch.mean(torch.min(a, b))
# Compute the value function loss
# Clipped loss - same idea as PPO loss, don't allow value to move too
# far from where it was previously
value_pred_clipped = token_values + (new_token_values - token_values).clamp(-epsilon, epsilon)
value_losses = (new_token_values - token_reward_to_go) ** 2
value_losses_clipped = (value_pred_clipped - token_reward_to_go) ** 2
value_loss = 0.5 * torch.max(value_losses, value_losses_clipped)
token_value_loss = value_loss.mean()
token_entropy_loss = torch.mean(new_token_entropy)
# Compute the final loss and backward.
token_loss = token_ppo_loss + value_loss_coeff * token_value_loss - entropy_coeff * token_entropy_loss
token_loss.backward()
mean_token_loss += token_loss.item()
token_backwards += 1
torch.nn.utils.clip_grad_norm_(self.token_actor.parameters(), .5)
torch.nn.utils.clip_grad_norm_(self.token_critic.parameters(), .5)
self.token_optim.step()
try:
mean_action_loss = mean_action_loss / action_backwards
except ZeroDivisionError:
mean_action_loss = 0.0
try:
mean_token_loss = mean_token_loss / token_backwards
except ZeroDivisionError:
mean_token_loss = 0.0
return mean_action_loss, mean_token_loss
def rollout(self,
env : env.Environment,
num_episodes : int,
steps_per_episode : int,
gamma : float,
) -> typing.Tuple[torch.Tensor]:
"""
1. Initialize all tensors [(num_episodes x batch_size?) x steps_per_episode x state_tensor_size]
2. for step in steps_per_episode:
a) slice state tensor
b) slice action tensor
c) Pass through model
d) env.step and assign new state to state tensor.
e) Compute rewards and rtgs.
"""
## Reset the environment.
state = env.reset()
self.action_actor.eval()
self.action_critic.eval()
self.token_actor.eval()
self.token_critic.eval()
seq_len, feat_seq_len = len(state.encoded_code), len(state.encoded_features)
## Create state and action tensors.
# State workload inputs.
batch_feature_ids = torch.LongTensor(state.encoded_features).unsqueeze(0).unsqueeze(0).repeat(num_episodes, steps_per_episode, 1) # Input features for workload
batch_input_ids = torch.zeros((num_episodes, steps_per_episode, seq_len), dtype = torch.long) # Input code for workload
batch_input_ids[:, 0] = torch.LongTensor(state.encoded_code) # Initialization of empty code for all episode's starting point of trajectory.
batch_masked_input_ids = torch.zeros((num_episodes, steps_per_episode, seq_len), dtype = torch.long) # Initialization of masked input ids tensor for token model.
final_state = torch.zeros((num_episodes, seq_len), dtype = torch.long) # The final state of all trajectories.
# Action, token predictions and probs, critic values.
action_predictions = torch.zeros((num_episodes, steps_per_episode, 1), dtype = torch.long) # All action predictions per episode, per state.
action_policy_probs = torch.zeros((num_episodes, steps_per_episode, 1), dtype = torch.float32) # Probs of all actions predicted.
action_values = torch.zeros((num_episodes, steps_per_episode, 1), dtype = torch.float32) # Values from critic for actions.
token_predictions = torch.zeros((num_episodes, steps_per_episode, 1), dtype = torch.long) # All token predictions per episode, per state.
token_policy_probs = torch.zeros((num_episodes, steps_per_episode, 1), dtype = torch.float32) # Probs of all tokens predicted.
token_values = torch.zeros((num_episodes, steps_per_episode, 1), dtype = torch.float32) # Values from critic for tokens.
use_lm = torch.zeros((num_episodes, steps_per_episode), dtype = torch.bool) # Indices where LM was indeed used (action was 'add' or 'replace')
## Reward placeholders.
rewards = torch.zeros((num_episodes, steps_per_episode), dtype = torch.float32) # Rewards per episode, per action.
discounted_rewards = torch.zeros((num_episodes, steps_per_episode), dtype = torch.float32) # The aggregated-discounted rewards as the trajectories proceed.
traj_disc_rewards = torch.zeros((num_episodes), dtype = torch.float32) # The latest aggregated discounted reward computed.
feature_dists = torch.full((num_episodes,), -1, dtype = torch.float32) # A tensor with the last updated euclidean distance from feature target.
done = torch.zeros((num_episodes, steps_per_episode), dtype = torch.bool) # Done boolean tensor.
## Run execution loop.
for step in tqdm.tqdm(range(steps_per_episode), total = steps_per_episode, desc = "Rollout {} episodes".format(num_episodes)):
## This loop unfolds all batch_size trajectories.
# Input tensors
feature_ids = batch_feature_ids[:, step]
feature_mask = feature_ids != self.feature_tokenizer.padToken
feature_pos = torch.arange(feat_seq_len, dtype = torch.long).repeat(feature_ids.shape[0], 1)
input_ids = batch_input_ids[:, step]
input_mask = input_ids != self.tokenizer.padToken
input_pos = torch.arange(seq_len, dtype = torch.long).repeat(input_ids.shape[0], 1)
# Actor model returns logits of action.
step_action_actor_out = self.action_actor(
encoder_feature_ids = feature_ids.to(pytorch.device),
encoder_feature_mask = feature_mask.to(pytorch.device),
encoder_position_ids = feature_pos.to(pytorch.device),
decoder_input_ids = input_ids.to(pytorch.device),
decoder_input_mask = input_mask.to(pytorch.device),
decoder_position_ids = input_pos.to(pytorch.device),
)
step_action_logits, step_action_probs = step_action_actor_out['action_logits'], step_action_actor_out['action_probs']
# Critic model returns value logit.
step_action_critic_out = self.action_critic(
encoder_feature_ids = feature_ids.to(pytorch.device),
encoder_feature_mask = feature_mask.to(pytorch.device),
encoder_position_ids = feature_pos.to(pytorch.device),
decoder_input_ids = input_ids.to(pytorch.device),
decoder_input_mask = input_mask.to(pytorch.device),
decoder_position_ids = input_pos.to(pytorch.device),
)
step_action_values, step_action_values_probs = step_action_critic_out['action_logits'], step_action_critic_out['action_probs']
# Sample the most likely action.
actual_lengths = torch.where(input_ids == self.tokenizer.endToken)
step_actions = self.policy.SampleActions(step_action_logits, actual_lengths)
# Collect the probability of said selected action, per episode.
step_action_probs = step_action_probs[(torch.arange(step_action_probs.shape[0]), step_actions)]
# Declare here the augmented token vectors.
augmented_step_token_values = torch.zeros((num_episodes, 1), dtype = torch.float32)
augmented_step_tokens = torch.zeros((num_episodes, 1), dtype = torch.long)
augmented_step_token_probs = torch.zeros((num_episodes, 1), dtype = torch.float32)
## Find which sequences need to sample a token.
step_use_lm, masked_input_ids = env.intermediate_step(input_ids, step_actions)
if torch.any(step_use_lm):
## If the language model needs to be invoked ('add' or 'replace')
## Fix the necessary batch of elements here.
# Indices of starting tensors that need the LM.
lm_indices = torch.where(step_use_lm == True)[0]
# Input tensors.
lm_feature_ids = torch.index_select(feature_ids, 0, lm_indices)
lm_feature_mask = lm_feature_ids != self.feature_tokenizer.padToken
lm_feat_pos_ids = torch.arange(feat_seq_len, dtype = torch.long).repeat(lm_feature_ids.shape[0], 1)
lm_input_ids = torch.index_select(masked_input_ids, 0, lm_indices)
lm_input_mask = lm_input_ids != self.tokenizer.padToken
lm_input_pos_ids = torch.arange(seq_len, dtype = torch.long).repeat(lm_input_ids.shape[0], 1)
# Keep the hole indices to dereference the prediction logits.
ep_idx, seq_idx = torch.where(lm_input_ids == self.tokenizer.holeToken)
# Run the token actor, get token logits.
step_token_actor_out = self.token_actor(
encoder_feature_ids = lm_feature_ids.to(pytorch.device),
encoder_feature_mask = lm_feature_mask.to(pytorch.device),
encoder_position_ids = lm_feat_pos_ids.to(pytorch.device),
decoder_input_ids = lm_input_ids.to(pytorch.device),
decoder_input_mask = lm_input_mask.to(pytorch.device),
decoder_position_ids = lm_input_pos_ids.to(pytorch.device),
)
step_token_logits, step_token_probs = step_token_actor_out['token_logits'], step_token_actor_out['token_probs']
# Keep the prediction scores only for the masked token.
step_token_logits = step_token_logits[(ep_idx, seq_idx)]
step_token_probs = step_token_probs[(ep_idx, seq_idx)]
# Collect value logit from critic.
step_token_critic_out = self.token_critic(
encoder_feature_ids = lm_feature_ids.to(pytorch.device),
encoder_feature_mask = lm_feature_mask.to(pytorch.device),
encoder_position_ids = lm_feat_pos_ids.to(pytorch.device),
decoder_input_ids = lm_input_ids.to(pytorch.device),
decoder_input_mask = lm_input_mask.to(pytorch.device),
decoder_position_ids = lm_input_pos_ids.to(pytorch.device),
)
step_token_values, step_token_values_probs = step_token_critic_out['token_logits'], step_token_critic_out['token_probs']
# Get the critic's value only for masked index.
step_token_values = step_token_values[(ep_idx, seq_idx)]
step_token_values_probs = step_token_values_probs[(ep_idx, seq_idx)]
# According to policy, select the best token.
step_tokens = self.policy.SampleTokens(step_token_logits)
for inp in lm_input_ids:
l.logger().info(self.tokenizer.tokensToString([int(x) for x in inp], ignore_token = self.tokenizer.padToken))
for preds in step_tokens:
l.logger().info(self.tokenizer.tokensToString([int(preds)], ignore_token = self.tokenizer.padToken))
input()
# Get probability of said token, per episode.
step_token_probs = step_token_probs[(torch.arange(step_token_probs.shape[0]), step_tokens)]
# First extend to original dimensions.
# Store the modified - with token LM - codes to the original tensors.
for nidx, lm_idx in zip(range(step_tokens.shape[0]), lm_indices):
augmented_step_token_values[lm_idx] = step_token_values[nidx]
augmented_step_tokens[lm_idx] = step_tokens[nidx]
augmented_step_token_probs[lm_idx] = step_token_probs[nidx]
# Here is the appropriate storing back.
batch_masked_input_ids[:, step] = masked_input_ids
token_values [:, step] = augmented_step_token_values.detach().cpu()
token_predictions [:, step] = augmented_step_tokens.detach().cpu()
token_policy_probs [:, step] = augmented_step_token_probs.detach().cpu()
## Step environment and compute rewards.
input_ids, reward, discounted_reward, d, step_use_lm = env.new_step(
input_ids,
step_actions,
augmented_step_tokens,
traj_disc_rewards,
feature_dists,
step_use_lm,
gamma
)
## Save data to rollout buffers.
if step < steps_per_episode - 1:
batch_input_ids [:, step+1] = input_ids
else:
final_state = input_ids
action_values [:, step] = step_action_values.detach().cpu()
action_predictions [:, step] = step_actions.unsqueeze(0).reshape((-1, 1)).detach().cpu()
action_policy_probs[:, step] = step_action_probs.unsqueeze(0).reshape((-1, 1)).detach().cpu()
use_lm [:, step] = step_use_lm
rewards [:, step] = reward
traj_disc_rewards = discounted_reward
discounted_rewards [:, step] = traj_disc_rewards
done [:, step] = d
return (
batch_input_ids, # source code states.
final_state, # The state of the trajectory after the last applied action.
batch_masked_input_ids, # Masked source code for the language model.
batch_feature_ids, # Target feature vector state.
action_values, # Critic action logits.
action_predictions, # Actor sampled label actions.
action_policy_probs, # Actor probabilities of sampled actions.
token_values, # Critic token values.
token_predictions, # Actor sampled label tokens.
token_policy_probs, # Actor probabilities of sampled tokens.
use_lm, # Indices of actions that required language model.
rewards, # Rewards of each step.
discounted_rewards, # Discounted rewards of each step.
done, # Whether this step concludes the episode.
)
def gae(self, rewards, action_values, token_values, use_lm, episode_ends, gamma, lam):
"""
Compute generalized advantage estimate.
rewards: a list of rewards at each step.
values: the value estimate of the state at each step.
episode_ends: an array of the same shape as rewards, with a 1 if the
episode ended at that step and a 0 otherwise.
gamma: the discount factor.
lam: the GAE lambda parameter.
"""
# Invert episode_ends to have 0 if the episode ended and 1 otherwise
episode_ends = (episode_ends * -1) + 1
action_values = action_values.squeeze(-1)
token_values = token_values.squeeze(-1)
N = rewards.shape[0]
T = rewards.shape[1]
action_gae_step = torch.zeros((N, ))
token_gae_step = torch.zeros((N, ))
action_advantages = torch.zeros((N, T))
token_advantages = torch.zeros((N, T))
for t in reversed(range(T - 1)):
# First compute delta, which is the one-step TD error
action_delta = rewards[:, t] + gamma * action_values[:, t + 1] * episode_ends[:, t] - action_values[:, t]
token_delta = rewards[:, t] + gamma * token_values[:, t + 1] * episode_ends[:, t] - token_values[:, t]
# Then compute the current step's GAE by discounting the previous step
# of GAE, resetting it to zero if the episode ended, and adding this
# step's delta
# And store it
action_gae_step = action_delta + gamma * lam * episode_ends[:, t] * action_gae_step
token_gae_step = token_delta + gamma * lam * episode_ends[:, t] * token_gae_step
action_advantages[:, t] = action_delta + gamma * lam * episode_ends[:, t] * action_gae_step
token_advantages[:, t] = token_delta + gamma * lam * episode_ends[:, t] * token_gae_step
return action_advantages, token_advantages
def saveCheckpoint(self) -> None:
"""
Save agent state.
"""
if self.is_world_process_zero():
ckpt_comp = lambda prefix, x: self.ckpt_path / "{}{}_model-{}.pt".format(prefix, x, self.ckpt_step)
if pytorch.torch_tpu_available:
if pytorch.torch_xla_model.rendezvous("saving_checkpoint"):
pytorch.torch_xla_model.save(self.action_actor, ckpt_comp("actor", "action"))
pytorch.torch_xla_model.save(self.action_critic, ckpt_comp("critic", "action"))
pytorch.torch_xla_model.save(self.action_optim, ckpt_comp("action", "optimizer"))
pytorch.torch_xla_model.save(self.token_optim, ckpt_comp("token", "optimizer"))
pytorch.torch_xla.rendezvous("saving_optimizer_states")
else:
if isinstance(self.action_actor, torch.nn.DataParallel):
torch.save(self.action_actor.module.state_dict(), ckpt_comp("actor", "action"))
else:
torch.save(self.action_actor.state_dict(), ckpt_comp("action", "action"))
if isinstance(self.action_critic, torch.nn.DataParallel):
torch.save(self.action_critic.module.state_dict(), ckpt_comp("critic", "action"))
else:
torch.save(self.action_critic.state_dict(), ckpt_comp("critic", "action"))
if isinstance(self.token_actor, torch.nn.DataParallel):
torch.save(self.token_actor.module.state_dict(), ckpt_comp("actor", "token"))
else:
torch.save(self.token_actor.state_dict(), ckpt_comp("action", "token"))
if isinstance(self.token_critic, torch.nn.DataParallel):
torch.save(self.token_critic.module.state_dict(), ckpt_comp("critic", "token"))
else:
torch.save(self.token_critic.state_dict(), ckpt_comp("critic", "token"))
torch.save(self.action_optim.state_dict(), ckpt_comp("action", "optimizer"))
torch.save(self.token_optim.state_dict(), ckpt_comp("token", "optimizer"))
with open(self.ckpt_path / "checkpoint.meta", 'a') as mf:
mf.write("train_step: {}\n".format(self.ckpt_step))
self.ckpt_step += 1
distrib.barrier()
return
def loadCheckpoint(self) -> None:
"""
Load agent state.
"""
if not (self.ckpt_path / "checkpoint.meta").exists():
return -1
with open(self.ckpt_path / "checkpoint.meta", 'w') as mf:
get_step = lambda x: int(x.replace("\n", "").replace("train_step: ", ""))
lines = mf.readlines()
entries = set({get_step(x) for x in lines})
ckpt_step = max(entries)
ckpt_comp = lambda prefix, x: self.ckpt_path / "{}{}_model-{}.pt".format(prefix, x, ckpt_step)
if isinstance(self.action_actor, torch.nn.DataParallel):
try:
self.action_actor.module.load_state_dict(torch.load(ckpt_comp("actor", "action")))
except RuntimeError:
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in torch.load(ckpt_comp("actor", "action")).items():
if k[:7] == "module.":
name = k[7:]
else:
name = "module." + k
new_state_dict[name] = k
self.action_actor.module.load_state_dict(new_state_dict)
else:
try:
self.action_actor.module.load_state_dict(torch.load(ckpt_comp("actor", "action")))
except RuntimeError:
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in torch.load(ckpt_comp("actor", "action")).items():
if k[:7] == 'module.':
name = k[7:] # remove `module.`
else:
name = 'module.' + k # Add 'module.'
new_state_dict[name] = v
self.action_actor.load_state_dict(new_state_dict)
if isinstance(self.action_critic, torch.nn.DataParallel):
try:
self.action_critic.module.load_state_dict(torch.load(ckpt_comp("actor", "critic")))
except RuntimeError:
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in torch.load(ckpt_comp("actor", "critic")).items():
if k[:7] == "module.":
name = k[7:]
else:
name = "module." + k
new_state_dict[name] = k
self.action_critic.module.load_state_dict(new_state_dict)
else:
try:
self.action_critic.module.load_state_dict(torch.load(ckpt_comp("actor", "critic")))
except RuntimeError:
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in torch.load(ckpt_comp("actor", "critic")).items():
if k[:7] == 'module.':
name = k[7:] # remove `module.`
else:
name = 'module.' + k # Add 'module.'
new_state_dict[name] = v
self.action_critic.load_state_dict(new_state_dict)
if isinstance(self.token_actor, torch.nn.DataParallel):
try:
self.token_actor.module.load_state_dict(torch.load(ckpt_comp("token", "action")))
except RuntimeError:
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in torch.load(ckpt_comp("token", "action")).items():
if k[:7] == "module.":
name = k[7:]
else:
name = "module." + k
new_state_dict[name] = k
self.token_actor.module.load_state_dict(new_state_dict)
else:
try:
self.token_actor.module.load_state_dict(torch.load(ckpt_comp("token", "action")))
except RuntimeError:
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in torch.load(ckpt_comp("token", "action")).items():
if k[:7] == 'module.':
name = k[7:] # remove `module.`
else:
name = 'module.' + k # Add 'module.'
new_state_dict[name] = v
self.token_actor.load_state_dict(new_state_dict)
if isinstance(self.token_critic, torch.nn.DataParallel):
try:
self.token_critic.module.load_state_dict(torch.load(ckpt_comp("token", "critic")))
except RuntimeError:
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in torch.load(ckpt_comp("token", "critic")).items():
if k[:7] == "module.":
name = k[7:]
else:
name = "module." + k
new_state_dict[name] = k
self.token_critic.module.load_state_dict(new_state_dict)
else:
try:
self.token_critic.module.load_state_dict(torch.load(ckpt_comp("token", "critic")))
except RuntimeError:
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in torch.load(ckpt_comp("token", "critic")).items():
if k[:7] == 'module.':
name = k[7:] # remove `module.`
else:
name = 'module.' + k # Add 'module.'
new_state_dict[name] = v
self.token_critic.load_state_dict(new_state_dict)
if self.action_optim is not None and self.token_optim is not None and ckpt_step > 0:
self.action_optim.load_state_dict(
torch.load(ckpt_comp("action", "optimizer"), map_location = pytorch.device)
)
self.token_optim.load_state_dict(
torch.load(ckpt_comp("token", "optimizer"), map_location = pytorch.device)
)
self.action_actor.eval()
self.action_critic.eval()
self.token_actor.eval()
self.token_critic.eval()
return ckpt_step
def is_world_process_zero(self) -> bool:
"""
Whether or not this process is the global main process (when training in a distributed fashion on
several machines, this is only going to be :obj:`True` for one process).
"""
if pytorch.torch_tpu_available:
return pytorch.torch_xla_model.is_master_ordinal(local=False)
elif pytorch.num_nodes > 1:
return torch.distributed.get_rank() == 0
else:
return True
| 48,376 | 46.945491 | 184 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/reinforcement_learning/visuals.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. | 595 | 41.571429 | 74 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/reinforcement_learning/env.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
RL Environment for the task of targeted benchmark generation.
"""
# import gym
import typing
import pathlib
import pickle
import numpy as np
from deeplearning.benchpress.corpuses import tokenizers
from deeplearning.benchpress.corpuses import corpuses
from deeplearning.benchpress.features import extractor
from deeplearning.benchpress.features import feature_sampler
from deeplearning.benchpress.preprocessors import opencl
from deeplearning.benchpress.reinforcement_learning import interactions
from deeplearning.benchpress.reinforcement_learning import memory
from deeplearning.benchpress.reinforcement_learning import data_generator
from deeplearning.benchpress.proto import reinforcement_learning_pb2
from deeplearning.benchpress.util import environment
from deeplearning.benchpress.util import distrib
from deeplearning.benchpress.util import pytorch
from deeplearning.benchpress.util import logging as l
torch = pytorch.torch
from absl import flags
class Environment(object):
"""
Environment representation for RL Agents.
"""
metadata = {
'render_modes' : ['human'],
'render_fps' : 4,
}
@property
def init_code_state(self) -> np.array:
return np.array(
[self.tokenizer.startToken, self.tokenizer.endToken]
+ ([self.tokenizer.padToken] * (self.max_position_embeddings - 2))
)
def __init__(self,
config : reinforcement_learning_pb2.RLModel,
max_position_embeddings : int,
corpus : corpuses.Corpus,
tokenizer : tokenizers.TokenizerBase,
feature_tokenizer : tokenizers.FeatureTokenizer,
cache_path : pathlib.Path,
) -> None:
self.config = config
self.tokenizer = tokenizer
self.feature_tokenizer = feature_tokenizer
self.max_position_embeddings = max_position_embeddings
self.feature_sequence_length = self.config.agent.feature_tokenizer.feature_sequence_length
self.cache_path = cache_path / "environment"
self.ckpt_path = cache_path / "checkpoint"
if environment.WORLD_RANK == 0:
self.cache_path.mkdir(exist_ok = True, parents = True)
self.ckpt_path.mkdir(exist_ok = True, parents = True)
self.current_state = None
self.feature_dataset = None
self.loadCheckpoint()
if self.feature_dataset is None:
self.feature_dataset = []
if self.config.HasField("train_set"):
data = corpus.GetTrainingFeatures()
for dp in data:
for k, v in dp.items():
if v:
self.feature_dataset.append((k, v))
elif self.config.HasField("random"):
self.feature_dataset = []
return
def intermediate_step(self,
state_code : torch.LongTensor,
step_actions : torch.LongTensor,
) -> typing.Tuple[torch.Tensor]:
"""
The environment reads the predicted index, and makes
necessary transformations to the input ids so they can be
fed into the language model, if need be.
"""
num_episodes = step_actions.shape[0]
lm_input_ids = torch.zeros(state_code.shape, dtype = torch.long)
use_lm = torch.zeros((num_episodes), dtype = torch.bool)
for idx, (code, action) in enumerate(zip(state_code, step_actions)):
act_type = int(action) % len(interactions.ACTION_TYPE_SPACE)
act_index = int(action) // len(interactions.ACTION_TYPE_SPACE)
if act_type == interactions.ACTION_TYPE_SPACE['ADD']:
if torch.any(code == self.tokenizer.padToken):
# ADD is only valid if there is room for new tokens, i.e. at least one [PAD] exists.
new_code = torch.cat((code[:act_index + 1], torch.LongTensor([self.tokenizer.holeToken]), code[act_index + 1:]))
new_code = new_code[:code.shape[0]]
lm_input_ids[idx] = new_code
use_lm[idx] = True
elif act_type == interactions.ACTION_TYPE_SPACE['REPLACE']:
if int(code[act_index]) not in self.tokenizer.metaTokenValues:
# REPLACE is only valid if the token it is trying to raplce is not meta token.
new_code = torch.clone(code)
new_code[act_index] = self.tokenizer.holeToken
lm_input_ids[idx] = new_code
use_lm[idx] = True
return use_lm, lm_input_ids
def new_step(self,
state_code : torch.LongTensor,
step_actions : torch.LongTensor,
step_tokens : torch.LongTensor,
traj_disc_rewards : torch.FloatTensor,
feature_dists : torch.FloatTensor,
use_lm : torch.BoolTensor,
gamma : float,
) -> typing.Tuple[torch.Tensor]:
"""
Step the environment, compute the reward.
"""
num_episodes = step_actions.shape[0]
reward = torch.zeros((num_episodes), dtype = torch.float32)
discounted_reward = torch.zeros((num_episodes), dtype = torch.float32)
done = torch.zeros((num_episodes), dtype = torch.bool)
for idx, (code, act, tok, dr, lm) in enumerate(zip(state_code, step_actions, step_tokens, discounted_reward, use_lm)):
act_type = int(act) % len(interactions.ACTION_TYPE_SPACE)
act_index = int(act) // len(interactions.ACTION_TYPE_SPACE)
token_id = int(tok)
lm = bool(lm)
try:
real_len = torch.where(code == self.tokenizer.endToken)[0][0]
except Exception as e:
# This exception is raised because you remove the endToken
l.logger().warn(code)
l.logger().error(torch.where(code == self.tokenizer.endToken))
l.logger().critical("No ENDTOKEN has been found.")
raise e
if act_index >= real_len and act_type != interactions.ACTION_TYPE_SPACE['COMP']:
l.logger().critical(self.tokenizer.tokensToString([int(x) for x in code]))
l.logger().critical(act_type)
l.logger().critical(act_index)
l.logger().critical(real_len)
raise ValueError("Why did this run out of bounds ?")
## ADD
if act_type == interactions.ACTION_TYPE_SPACE['ADD']:
if int(token_id) not in self.tokenizer.metaTokenValues and torch.any(code == self.tokenizer.padToken):
# ADD is only valid if predicted token is not a meta token.
# Also out-of-bounds restriction, also applied by intermediate step.
new_code = torch.cat((code[:act_index + 1], torch.LongTensor([token_id]), code[act_index + 1:]))
new_code = new_code[:code.shape[0]]
state_code[idx] = new_code
else:
# Unflag current sequence as LM-ready.
use_lm[idx] = False
reward[idx] = -0.1
## REMOVE
elif act_type == interactions.ACTION_TYPE_SPACE['REM']:
if int(code[act_index]) not in self.tokenizer.metaTokenValues:
new_code = torch.cat((code[:act_index], code[act_index + 1:], torch.LongTensor([self.tokenizer.padToken])))
state_code[idx] = new_code
## REPLACE
elif act_type == interactions.ACTION_TYPE_SPACE['REPLACE']:
if int(token_id) not in self.tokenizer.metaTokenValues and int(code[act_index]) not in self.tokenizer.metaTokenValues:
# REPLACE is valid if predicted token is not a meta token.
# Also if to-be-replaced token is not a meta token.
state_code[idx][act_index] = token_id
else:
# Unflag current sequence as LM-ready.
use_lm[idx] = False
reward[idx] = -0.1
## COMPILE
elif act_type == interactions.ACTION_TYPE_SPACE['COMP']:
src = self.tokenizer.ArrayToCode([int(x) for x in code])
try:
_ = opencl.Compile(src)
features = extractor.ExtractFeatures(src, ext = [self.current_state.feature_space])
compiles = True
except ValueError:
compiles = False
features = None
if compiles and len(src) > 0:
cur_dist = feature_sampler.calculate_distance(
features[self.current_state.feature_space],
self.current_state.target_features,
)
if feature_dists[idx] == -1 or cur_dist < feature_dists[idx]:
reward[idx] = +0.5
if cur_dist == 0:
done[idx] = True
else:
reward[idx] = -0.5
else:
raise ValueError("Invalid action type: {}".format(act_type))
discounted_reward = traj_disc_rewards * gamma + reward
traj_disc_rewards[torch.where(done == True)] = 0.0
return state_code, reward, discounted_reward, done, use_lm
def reset(self, recycle: bool = True) -> interactions.State:
"""
Reset the state of the environment.
"""
if recycle and self.current_state:
l.logger().warn("Remember to remove this line when you take training seriously.")
return self.current_state
self.feature_dataset.append(
(self.current_state.feature_space, self.current_state.target_features)
)
next = self.feature_dataset.pop(0)
self.current_state = interactions.State(
target_features = next[1],
feature_space = next[0],
encoded_features = self.feature_tokenizer.TokenizeFeatureVector(next[1], next[0], self.feature_sequence_length),
code = "",
encoded_code = self.init_code_state,
comment = "State: \nCode:\n\nFeatures:\n{}".format(next[1]),
)
return self.current_state
def get_state(self) -> interactions.State:
"""
Get the current state of the environment.
"""
return self.current_state
def loadCheckpoint(self) -> None:
"""
Load environment checkpoint.
"""
if (self.ckpt_path / "environment.pkl").exists():
distrib.lock()
with open(self.ckpt_path / "environment.pkl", 'rb') as inf:
self.current_state = pickle.load(inf)
distrib.unlock()
distrib.barrier()
if (self.ckpt_path / "feature_loader.pkl").exists():
distrib.lock()
with open(self.ckpt_path / "feature_loader.pkl", 'rb') as inf:
self.feature_loader = pickle.load(inf)
distrib.unlock()
distrib.barrier()
return
def saveCheckpoint(self) -> None:
"""
Save environment state.
"""
if environment.WORLD_RANK == 0:
with open(self.ckpt_path / "environment.pkl", 'wb') as outf:
pickle.dump(self.current_state, outf)
with open(self.ckpt_path / "feature_loader.pkl", 'wb') as outf:
pickle.dump(self.feature_loader, outf)
distrib.barrier()
return
| 11,257 | 40.389706 | 126 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/features/evaluate_cand_database.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module for databases of BenchPress samples."""
import contextlib
import math
import pathlib
import datetime
import typing
import sqlite3
import numpy as np
import sqlalchemy as sql
from sqlalchemy.ext import declarative
from absl import app, flags
from deeplearning.benchpress.util import crypto
from deeplearning.benchpress.util import sqlutil
from deeplearning.benchpress.util import plotter as plt
from deeplearning.benchpress.util import logging as l
FLAGS = flags.FLAGS
Base = declarative.declarative_base()
flags.DEFINE_string(
"eval_cand_db",
"",
"Set path of candidatae Database to evaluate."
)
class SearchCandidate(Base, sqlutil.ProtoBackedMixin):
"""A database row representing a BenchPress sample.
This is the clgen.Sample protocol buffer in SQL format.
"""
__tablename__ = "search_candidates"
# entry id
id : int = sql.Column(sql.Integer, primary_key = True)
# unique hash of sample text
sha256 : str = sql.Column(sql.String(64), nullable = False, index = True)
# sample_hash
sample_sha256 : str = sql.Column(sql.String(64), nullable = False)
# generation id of sample.
generation_id : int = sql.Column(sql.Integer, nullable = False)
# Frequency of specific sample.
frequency : int = sql.Column(sql.Integer, nullable = False)
# hole length in terms of actual tokens hidden
abs_hole_lengths : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# hole length in terms of percentage of kernel's actual length.
rel_hole_lengths : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# how many tokens were used to fill that hole
hole_ind_length : int = sql.Column(sql.Integer, nullable = False)
# Original input feed pre-masking
input_feed : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# Input Ids with the hole placed.
input_ids : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# Encoded original input
encoded_input_ids : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# Feature vector of input_feed
input_features : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# Score-distance of input from target benchmark
input_score : float = sql.Column(sql.Float, nullable = False)
# Output sample
sample : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# indices filled in the hole.
sample_indices : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# Actual length of sample, excluding pads.
num_tokens : int = sql.Column(sql.Integer, nullable = False)
# Sample's vector of features.
output_features : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# The runtime features it must achieve.
runtime_features : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# Sample distance from target benchmark.
sample_score : float = sql.Column(sql.Float, nullable = False)
# Name and contents of target benchmark specified.
target_benchmark : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# Feature vector of target benchmark.
target_features : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# Whether sample compiles or not.
compile_status : bool = sql.Column(sql.Boolean, nullable = False)
# Percentage delta of output score compared to input score.
score_delta : float = sql.Column(sql.Float, nullable = False)
# Delta between feature of sample - input.
features_delta : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# Date
date_added : datetime.datetime = sql.Column(sql.DateTime, nullable = False)
@classmethod
def FromArgs(cls,
tokenizer,
id : int,
input_feed : np.array,
input_ids : np.array,
input_features : typing.Dict[str, float],
input_score : float,
hole_lengths : typing.List[int],
sample : np.array,
sample_indices : np.array,
output_features : typing.Dict[str, float],
runtime_features : typing.Dict[str, float],
sample_score : float,
target_benchmark : typing.Tuple[str, str],
target_features : typing.Dict[str, float],
compile_status : bool,
generation_id : int,
# timestep : int,
) -> typing.TypeVar("SearchCandidate"):
"""Construt SearchCandidate table entry from argumentns."""
str_input_feed = tokenizer.tokensToString(input_ids, ignore_token = tokenizer.padToken, with_formatting = True)
str_sample = tokenizer.ArrayToCode(sample, with_formatting = True)
len_indices = len(sample_indices)
sample_indices = tokenizer.tokensToString(sample_indices, ignore_token = tokenizer.padToken)
num_tokens = len(sample)
if tokenizer.padToken in sample:
num_tokens = np.where(sample == tokenizer.padToken)[0][0]
actual_length = len(input_ids) - 3
if tokenizer.padToken in input_ids:
actual_length = np.where(input_ids == tokenizer.padToken)[0][0] - 3
return SearchCandidate(
id = id,
sha256 = crypto.sha256_str(str_input_feed + str_sample + str(hole_lengths) + target_benchmark[0]),
sample_sha256 = crypto.sha256_str(str_sample),
generation_id = generation_id,
frequency = 1,
abs_hole_lengths = ','.join([str(hl) for hl in hole_lengths if hl >= 0]),
rel_hole_lengths = ','.join([str(hl / (hl + actual_length)) for hl in hole_lengths if hl >= 0]),
hole_ind_length = len_indices,
input_feed = tokenizer.ArrayToCode(input_feed, with_formatting = True),
input_ids = str_input_feed,
encoded_input_ids = ','.join([str(x) for x in input_ids]),
input_features = '\n'.join(["{}:{}".format(k, v) for k, v in input_features.items()]) if input_features else "None",
input_score = input_score,
sample = str_sample,
sample_indices = sample_indices,
num_tokens = int(num_tokens),
output_features = '\n'.join(["{}:{}".format(k, v) for k, v in output_features.items()]) if output_features else "None",
runtime_features = '\n'.join(["{}:{}".format(k, v) for k, v in runtime_features.items()]) if runtime_features else "None",
sample_score = sample_score,
target_benchmark = "// {}\n{}".format(target_benchmark[0], target_benchmark[1]),
target_features = '\n'.join(["{}:{}".format(k, v) for k, v in target_features.items()]) if target_features else "None",
compile_status = compile_status,
score_delta = (sample_score - input_score) / input_score if not math.isinf(input_score) and input_score > 0 else math.inf,
features_delta = '\n'.join(["{}:{}".format(k, output_features[k] - input_features[k]) for k in input_features.keys() if (output_features[k] - input_features[k] != 0)]) if input_features and output_features else math.inf,
date_added = datetime.datetime.utcnow(),
)
class SearchCandidateDatabase(sqlutil.Database):
"""A database for analysis of search generations and candidates."""
def __init__(self, url: str, must_exist: bool = False):
super(SearchCandidateDatabase, self).__init__(url, Base, must_exist = must_exist)
@property
def count(self):
"""Number of input feeds in DB."""
with self.Session() as s:
count = s.query(SearchCandidate).count()
return count
@property
def get_data(self):
"""Return all database in list format"""
with self.Session() as s:
return s.query(SearchCandidate).all()
@property
def get_target_benchmarks(self):
"""Return all unique target benchmarks"""
with self.Session() as s:
return s.query(SearchCandidate.target_benchmark).all()
def input_samples_distribution(data) -> None:
# 1) Frequency per generation.
# x-axis: times occured, y-axis: how many samples did hit these freq.
# One group of these distributions per generation.
freqd = {}
for dp in data:
gen, f = dp.generation_id, dp.frequency
if gen in freqd:
if f in freqd[gen]:
freqd[gen][f] += 1
else:
freqd[gen][f] = 1
else:
freqd[gen] = {}
freqd[gen][f] = 1
for k, v in freqd.items():
freqd[k] = (list(v.keys()), list(v.values()))
plt.GrouppedBars(
groups = freqd, # Dict[Dict[int, int]]
plot_name = "freq_input_samples_per_gen",
path = pathlib.Path(FLAGS.eval_cand_db).absolute().parent,
title = "Repetition of input/samples pair per generation",
x_name = "# of repetitions",
)
return
def samples_distribution(data) -> None:
freqd = {}
for dp in data:
gen, sam = dp.generation_id, dp.sample
hsm = crypto.sha256_str(sam)
if gen in freqd:
if hsm in freqd[gen]:
freqd[gen][hsm] += 1
else:
freqd[gen][hsm] = 1
else:
freqd[gen] = {}
freqd[gen][hsm] = 1
for k, v in freqd.items():
gdict = {}
for samp, freq in v.items():
if freq in gdict:
gdict[freq] += 1
else:
gdict[freq] = 1
freqd[k] = (list(gdict.keys()), list(gdict.values()))
plt.GrouppedBars(
groups = freqd, # Dict[Dict[int, int]]
plot_name = "freq_samples_per_gen",
path = pathlib.Path(FLAGS.eval_cand_db).absolute().parent,
title = "Repetition of samples per generation",
x_name = "# of repetitions",
)
return
def rel_length_distribution(data) -> None:
# 2) Relative hole length distribution.
rhl_dist = {}
for dp in data:
rhl = dp.rel_hole_lengths
try:
rounded = int(100*float(rhl))
if rounded not in rhl_dist:
rhl_dist[rounded] = 1
else:
rhl_dist[rounded] += 1
except Exception:
continue
plt.FrequencyBars(
x = list(rhl_dist.keys()),
y = list(rhl_dist.values()),
plot_name = "perc_hole_length_distribution",
path = pathlib.Path(FLAGS.eval_cand_db).absolute().parent,
title = "% hole length distribution",
x_name = "percentile",
)
return
def token_delta_per_gen(data) -> None:
# 3) Per generation: delta of (filled_tokens - hole_length)
logger.warn("Filled tokens - hole length will be wrong for multiple holes!")
logger.warn("For now, I am assigning every hole to the total of sample indices length.")
gen_hole_deltas = {} # gen -> list of deltas.
for dp in data:
gen, ahl, lind = dp.generation_id, dp.abs_hole_lengths, dp.hole_ind_length
try:
ahl = sum([int(x) for x in ahl.split(',') if x])
if gen not in gen_hole_deltas:
gen_hole_deltas[gen] = []
gen_hole_deltas[gen].append(lind - int(ahl))
except Exception:
continue
plt.CategoricalViolin(
x = list(gen_hole_deltas.keys()),
y = list(gen_hole_deltas.values()),
plot_name = "hole_delta_vs_gen",
path = pathlib.Path(FLAGS.eval_cand_db).absolute().parent,
title = "Hole delta vs generation",
x_name = "Generation id",
)
return
def comp_token_delta_per_gen(data) -> None:
# 3) Per generation: delta of (filled_tokens - hole_length)
gen_hole_deltas = {} # gen -> list of deltas.
for dp in data:
if dp.compile_status == 1:
gen, ahl, lind = dp.generation_id, dp.abs_hole_lengths, dp.hole_ind_length
try:
ahl = sum([int(x) for x in ahl.split(',') if x])
if gen not in gen_hole_deltas:
gen_hole_deltas[gen] = []
gen_hole_deltas[gen].append(lind - int(ahl))
except Exception:
continue
plt.CategoricalViolin(
x = list(gen_hole_deltas.keys()),
y = list(gen_hole_deltas.values()),
plot_name = "comp_hole_delta_vs_gen",
path = pathlib.Path(FLAGS.eval_cand_db).absolute().parent,
title = "Hole delta vs generation for compilable samples",
x_name = "Generation id",
)
return
def nohole_in_end_token_delta_per_gen(data) -> None:
# 3) Per generation: delta of (filled_tokens - hole_length)
gen_hole_deltas = {} # gen -> list of deltas.
for dp in data:
if "[HOLE][END]" not in dp.input_ids.replace('\n', '').replace(' ', ''):
gen, ahl, lind = dp.generation_id, dp.abs_hole_lengths, dp.hole_ind_length
try:
ahl = sum([int(x) for x in ahl.split(',') if x])
if gen not in gen_hole_deltas:
gen_hole_deltas[gen] = []
gen_hole_deltas[gen].append(lind - int(ahl))
except Exception:
continue
plt.CategoricalViolin(
x = list(gen_hole_deltas.keys()),
y = list(gen_hole_deltas.values()),
plot_name = "nohole_end_token_delta_vs_gen",
path = pathlib.Path(FLAGS.eval_cand_db).absolute().parent,
title = "Hole delta vs generation for input ids with no hole before end",
x_name = "Generation id",
)
return
def token_score_delta_scatter(data) -> None:
# 4) 2D scatter: token delta vs score delta.
tds, sds = [], []
for dp in data:
td = dp.hole_ind_length - sum([int(x) for x in dp.abs_hole_lengths.split(',') if x])
sd = dp.score_delta if not math.isinf(dp.score_delta) else None
if sd is not None and td is not None:
tds.append(td)
sds.append(sd)
plt.ScatterPlot(
x = tds,
y = sds,
plot_name = "token_delta_vs_score_delta",
path = pathlib.Path(FLAGS.eval_cand_db).absolute().parent,
title = "Token Delta VS Score Delta",
x_name = "Token Delta",
y_name = "Score Delta",
)
return
def score_vs_token_delta(data) -> None:
# 5) Bar plot: 6 linear combinations of sign of token delta and score delta (neg, pos, 0.0).
groups = {
'better score' : [['token delta > 0', 'token delta < 0', 'token delta == 0'], [0, 0, 0]],
'worse score' : [['token delta > 0', 'token delta < 0', 'token delta == 0'], [0, 0, 0]],
'same score' : [['token delta > 0', 'token delta < 0', 'token delta == 0'], [0, 0, 0]],
}
nsum = 0
for dp in data:
td = dp.hole_ind_length - sum([int(x) for x in dp.abs_hole_lengths.split(',') if x])
sd = dp.score_delta if not math.isinf(dp.score_delta) else None
if sd is not None and td is not None:
nsum += 1
if sd < 0:
if td > 0:
groups['better score'][1][0] += 1
elif td < 0:
groups['better score'][1][1] += 1
else:
groups['better score'][1][2] += 1
elif sd > 0:
if td > 0:
groups['worse score'][1][0] += 1
elif td < 0:
groups['worse score'][1][1] += 1
else:
groups['worse score'][1][2] += 1
else:
if td > 0:
groups['same score'][1][0] += 1
elif td < 0:
groups['same score'][1][1] += 1
else:
groups['same score'][1][2] += 1
for k, v in groups.items():
for idx, nv in enumerate(v[1]):
groups[k][1][idx] = 100 * (nv / nsum)
plt.GrouppedBars(
groups = groups,
plot_name = "token_score_deltas",
path = pathlib.Path(FLAGS.eval_cand_db).absolute().parent,
title = "Sample Frequency % VS token & score delta",
x_name = "category",
)
return
def comp_vs_token_delta(data) -> None:
# 6) Bar plot: 4 linear combinations of compilability and token delta.
groups = {
'token delta > 0': [['compile', 'not-compile'], [0, 0]],
'token delta < 0': [['compile', 'not-compile'], [0, 0]],
'token delta == 0': [['compile', 'not-compile'], [0, 0]],
}
nsum = 0
for dp in data:
td = dp.hole_ind_length - sum([int(x) for x in dp.abs_hole_lengths.split(',') if x])
cs = dp.compile_status
if td is not None and cs is not None:
nsum += 1
if td > 0:
if cs == 1:
groups['token delta > 0'][1][0] += 1
else:
groups['token delta > 0'][1][1] += 1
elif td < 0:
if cs == 1:
groups['token delta < 0'][1][0] += 1
else:
groups['token delta < 0'][1][1] += 1
else:
if cs == 1:
groups['token delta == 0'][1][0] += 1
else:
groups['token delta == 0'][1][1] += 1
for k, v in groups.items():
for idx, nv in enumerate(v[1]):
groups[k][1][idx] = 100 * (nv / nsum)
plt.GrouppedBars(
groups = groups,
plot_name = "comp_token_delta",
path = pathlib.Path(FLAGS.eval_cand_db).absolute().parent,
title = "Sample Frequency % VS Compilability & token delta",
x_name = "category",
)
return
def rel_length_score(data) -> None:
# 7) 2D scatter per generation: rel hole length vs score delta.
rhl_lens = []
scd = []
for dp in data:
try:
rhl = int(100*float(dp.rel_hole_lengths))
sd = dp.score_delta
if rhl is not None and not math.isinf(sd):
rhl_lens.append(rhl)
scd.append(sd)
except Exception:
continue
plt.ScatterPlot(
x = rhl_lens,
y = scd,
plot_name = "rel_hl_score_delta",
path = pathlib.Path(FLAGS.eval_cand_db).absolute().parent,
x_name = "Relative Hole Length",
y_name = "Score Delta",
title = "Relative Hole Length VS Score Delta",
)
return
def token_delta_vs_len_input(data) -> None:
# 8) token delta vs len_input_feed.
feed_len = []
token_deltas = []
for dp in data:
td = dp.hole_ind_length - sum([int(x) for x in dp.abs_hole_lengths.split(',') if x])
if td is not None:
token_deltas.append(td)
feed_len.append(len([int(x) for x in dp.encoded_input_ids.split(',') if x]))
plt.ScatterPlot(
x = feed_len,
y = token_deltas,
plot_name = "feed_len_token_delta",
path = pathlib.Path(FLAGS.eval_cand_db).absolute().parent,
x_name = "Input Feed Length",
y_name = "Token Delta",
title = "Input Length VS Token Delta",
)
return
def token_vs_rel_len(data) -> None:
# Token Delta vs Relative hole length percentile.
tds = []
rhl_list = []
for dp in data:
try:
rhl = dp.rel_hole_lengths
rounded = int(100*float(rhl))
td = dp.hole_ind_length - sum([int(x) for x in dp.abs_hole_lengths.split(',') if x])
if td is not None:
tds.append(td)
rhl_list.append(rhl)
except Exception:
continue
plt.ScatterPlot(
x = rhl_list,
y = tds,
plot_name = "rel_hl_token_delta",
path = pathlib.Path(FLAGS.eval_cand_db).absolute().parent,
x_name = "Relative Hole length %",
y_name = "Token Delta",
title = "Rel. Hole length VS Token Delta",
)
return
def score_per_abs_hlen(data) -> None:
"""
Groupped bars of a) better, b) same, c) worse score, per absolute hole length unit.
"""
abshl = []
score_ds = []
groups = {
'better score' : {},
'worse score' : {},
'same score' : {},
}
max_abs = 0
for dp in data:
try:
ahl = sum([int(x) for x in dp.abs_hole_lengths.split(',') if x])
max_abs = max(max_abs, ahl)
sd = dp.score_delta
if not math.isinf(sd):
if sd > 0:
k = 'worse score'
elif sd < 0:
k = 'better score'
else:
k = 'same score'
if str(ahl) not in groups[k]:
groups[k][str(ahl)] = 1
else:
groups[k][str(ahl)] += 1
except Exception as e:
logger.error(e)
continue
for l in range(0, max_abs):
total = 0
for k, v in groups.items():
if str(l) in v:
total += v[str(l)]
for k, v in groups.items():
if str(l) in v:
groups[k][str(l)] = 100 * (v[str(l)] / total)
for k, v in groups.items():
groups[k] = (list(v.keys()), list(v.values()))
plt.GrouppedBars(
groups = groups, # Dict[Dict[int, int]]
plot_name = "score_per_abs_hlen",
path = pathlib.Path(FLAGS.eval_cand_db).absolute().parent,
title = "Score Direction (%) per Absolute Hole Length",
x_name = "Size of Hole",
)
def comp_vs_len_indices(data) -> None:
"""
Groupped bars of a) better, b) same, c) worse score, per absolute hole length unit.
"""
abshl = []
score_ds = []
groups = {
'compile' : {},
'not-compile' : {},
}
max_len_ind = 0
for dp in data:
try:
len_ind = dp.hole_ind_length
max_len_ind = max(max_len_ind, len_ind)
cs = dp.compile_status
if cs ==1:
k = 'compile'
else:
k = 'not-compile'
if str(len_ind) not in groups[k]:
groups[k][str(len_ind)] = 1
else:
groups[k][str(len_ind)] += 1
except Exception as e:
logger.error(e)
continue
# for l in range(0, max_len_ind):
# total = 0
# for k, v in groups.items():
# if str(l) in v:
# total += v[str(l)]
# for k, v in groups.items():
# if str(l) in v:
# groups[k][str(l)] = 100 * (v[str(l)] / total)
for k, v in groups.items():
groups[k] = (list(v.keys()), list(v.values()))
plt.GrouppedBars(
groups = groups, # Dict[Dict[int, int]]
plot_name = "comp_per_len_indices",
path = pathlib.Path(FLAGS.eval_cand_db).absolute().parent,
title = "Compilability VS Length of Indices",
x_name = "Length of Indices",
)
return
def comp_vs_len_indices_over_len_input(data) -> None:
"""
Groupped bars of a) better, b) same, c) worse score, per absolute hole length unit.
"""
abshl = []
score_ds = []
groups = {
'compile' : {},
'not-compile' : {},
}
max_len_ind = 0.0
for dp in data:
try:
len_ratio = dp.hole_ind_length / len([int(x) for x in dp.encoded_input_ids.split(',') if x])
len_ratio = round(len_ratio, 1)
max_len_ind = max(max_len_ind, len_ratio)
cs = dp.compile_status
if cs ==1:
k = 'compile'
else:
k = 'not-compile'
if str(len_ratio) not in groups[k]:
groups[k][str(len_ratio)] = 1
else:
groups[k][str(len_ratio)] += 1
except Exception as e:
logger.error(e)
continue
# for l in range(0, max_len_ind):
# total = 0
# for k, v in groups.items():
# if str(l) in v:
# total += v[str(l)]
# for k, v in groups.items():
# if str(l) in v:
# groups[k][str(l)] = 100 * (v[str(l)] / total)
for k, v in groups.items():
groups[k] = (list(v.keys()), list(v.values()))
plt.GrouppedBars(
groups = groups, # Dict[Dict[int, int]]
plot_name = "comp_per_indices_input_len_ratio",
path = pathlib.Path(FLAGS.eval_cand_db).absolute().parent,
title = "Compilability VS (Length of Indices / Length of Input)",
x_name = "Length of Indices / Length of Input",
)
return
def comp_vs_num_tokens(data) -> None:
"""
Groupped bars of a) better, b) same, c) worse score, per absolute hole length unit.
"""
abshl = []
score_ds = []
groups = {
'compile' : {},
'not-compile' : {},
}
max_numtok = 0
for dp in data:
try:
numtok = dp.num_tokens
max_numtok = max(max_numtok, numtok)
cs = dp.compile_status
if cs ==1:
k = 'compile'
else:
k = 'not-compile'
if str(numtok) not in groups[k]:
groups[k][str(numtok)] = 1
else:
groups[k][str(numtok)] += 1
except Exception as e:
logger.error(e)
continue
for l in range(0, max_numtok):
total = 0
for k, v in groups.items():
if str(l) in v:
total += v[str(l)]
for k, v in groups.items():
if str(l) in v:
groups[k][str(l)] = 100 * (v[str(l)] / total)
for k, v in groups.items():
groups[k] = (list(v.keys()), list(v.values()))
plt.GrouppedBars(
groups = groups, # Dict[Dict[int, int]]
plot_name = "comp_per_len_sample",
path = pathlib.Path(FLAGS.eval_cand_db).absolute().parent,
title = "Compilability % VS Length of Sample",
x_name = "Length of Sample",
)
return
def score_per_rel_hlen(data) -> None:
"""
Groupped bars of a) better, b) same, c) worse score, per absolute hole length unit.
"""
abshl = []
score_ds = []
groups = {
'better score' : {},
'worse score' : {},
'same score' : {},
}
max_abs = 0
for dp in data:
try:
rhl = dp.rel_hole_lengths
rounded = int(100*float(rhl))
max_abs = max(max_abs, rounded)
sd = dp.score_delta
if not math.isinf(sd):
if sd > 0:
k = 'worse score'
elif sd < 0:
k = 'better score'
else:
k = 'same score'
if str(rounded) not in groups[k]:
groups[k][str(rounded)] = 1
else:
groups[k][str(rounded)] += 1
except Exception as e:
continue
for l in range(0, max_abs):
total = 0
for k, v in groups.items():
if str(l) in v:
total += v[str(l)]
for k, v in groups.items():
if str(l) in v:
groups[k][str(l)] = 100 * (v[str(l)] / total)
for k, v in groups.items():
groups[k] = (list(v.keys()), list(v.values()))
plt.GrouppedBars(
groups = groups, # Dict[Dict[int, int]]
plot_name = "score_per_rel_hlen",
path = pathlib.Path(FLAGS.eval_cand_db).absolute().parent,
title = "Score Direction (%) per Relative Hole Length",
x_name = "Size of Hole %",
)
def score_direction_categorical_distrib_per_rel_hlen(data) -> None:
"""
Groupped bars of a) better, b) same, c) worse score, per absolute hole length unit.
"""
abshl = []
score_ds = []
groups = {
'better score' : {},
'worse score' : {},
'same score' : {},
}
normalizers = {
'better score' : 0,
'worse score' : 0,
'same score' : 0,
}
max_abs = 0
for dp in data:
try:
rhl = dp.rel_hole_lengths
rounded = int(100*float(rhl))
max_abs = max(max_abs, rounded)
sd = dp.score_delta
if not math.isinf(sd):
if sd > 0:
k = 'worse score'
normalizers[k] += 1
elif sd < 0:
k = 'better score'
normalizers[k] += 1
else:
k = 'same score'
normalizers[k] += 1
if str(rounded) not in groups[k]:
groups[k][str(rounded)] = 1
else:
groups[k][str(rounded)] += 1
except Exception as e:
continue
for k, v in groups.items():
for rhlk, rhlv in v.items():
groups[k][rhlk] = groups[k][rhlk] / normalizers[k]
for k, v in groups.items():
groups[k] = (list(v.keys()), list(v.values()))
plt.GrouppedBars(
groups = groups, # Dict[Dict[int, int]]
plot_name = "score_cat_distrib_rel_hlen",
path = pathlib.Path(FLAGS.eval_cand_db).absolute().parent,
title = "Score Direction Distribution per Category VS Relative Hole Length",
x_name = "Size of Hole %",
)
def run_db_evaluation(db: SearchCandidateDatabase) -> None:
data = db.get_data
input_samples_distribution(data)
samples_distribution(data)
rel_length_distribution(data)
comp_token_delta_per_gen(data)
nohole_in_end_token_delta_per_gen(data)
token_delta_per_gen(data)
token_score_delta_scatter(data)
score_vs_token_delta(data)
comp_vs_num_tokens(data)
comp_vs_token_delta(data)
comp_vs_len_indices(data) ######## I also need per (len_indices / len input feed) ratio.
comp_vs_len_indices_over_len_input(data)
rel_length_score(data)
token_delta_vs_len_input(data)
token_vs_rel_len(data)
score_per_abs_hlen(data)
score_per_rel_hlen(data)
score_direction_categorical_distrib_per_rel_hlen(data)
# raise NotImplementedError("bars of better, same, worse score per rel hole length.")
return
def initMain(*args, **kwargs):
l.initLogger(name = "eval_cand_db")
db_path = pathlib.Path(FLAGS.eval_cand_db).absolute()
if not db_path.exists():
raise FileNotFoundError(str(db_path))
db = SearchCandidateDatabase(url = "sqlite:///{}".format(str(db_path)), must_exist = True)
run_db_evaluation(db)
return
if __name__ == "__main__":
app.run(initMain)
sys.exit(0) | 28,668 | 32.728235 | 229 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/features/feature_sampler.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Feature space sampling of source code.
"""
import typing
import pathlib
import pickle
import math
import time
import numpy as np
from numpy.random import default_rng
from deeplearning.benchpress.features import normalizers
from deeplearning.benchpress.corpuses import corpuses
from deeplearning.benchpress.corpuses import benchmarks
from deeplearning.benchpress.util import logging as l
from deeplearning.benchpress.util import distrib
from deeplearning.benchpress.util import environment
from absl import flags
FLAGS = flags.FLAGS
flags.DEFINE_integer(
"randomize_selection",
None,
"Debugging integer flag that abolishes euclidean distance priority and picks X randomly selected elements to return as top-X."
)
def grid_walk_generator(feature_space: str) -> typing.Iterator[typing.Dict[str, float]]:
"""
Walk through feature space and generate
target feature instances to approximate.
"""
step_size = 100
target = normalizers.normalizer[feature_space]
for i in range(1, step_size+1):
ct = {}
for k in target.keys():
if k != "F2:coalesced/mem" and k != "F4:comp/mem":
ct[k] = int(i * (target[k] / step_size))
if "F2:coalesced/mem" in ct:
ct["F2:coalesced/mem"] = ct["coalesced"] / max(1, ct["mem"])
if "F4:comp/mem" in ct:
ct["F4:comp/mem"] = ct["comp"] / max(1, ct["mem"])
yield ct
def calculate_distance(infeat: typing.Dict[str, float],
tarfeat: typing.Dict[str, float],
feature_space: str = None,
) -> float:
"""
Euclidean distance between sample feature vector
and current target benchmark.
"""
try:
d = 0
for key in tarfeat.keys():
n = 1.0 # tarfeat[key] if tarfeat[key] != 0 and key != "F2:coalesced/mem" and key != "F4:comp/mem" else 1# normalizers.normalizer[feature_space][key]
i = float(infeat[key]) / n
t = float(tarfeat[key]) / n
d += abs((t**2) - (i**2))
return math.sqrt(d)
except KeyError as e:
l.logger().error("InFeats: {}".format(infeat))
l.logger().error("TargetFeats: {}".format(tarfeat))
raise e
class Benchmark(typing.NamedTuple):
path : pathlib.Path
name : str
contents : str
features : typing.Dict[str, float]
runtime_features : typing.Dict[str, float]
class FeatureSampler(object):
"""
Abstract class for sampling features.
"""
@property
def is_active(self):
return False
def __init__(self,
workspace : pathlib.Path,
feature_space : str,
target : str,
seed : int = None,
):
self.workspace = workspace
self.feature_space = feature_space
self.target = target
self.benchmarks = []
self.target_benchmark = None
self.rng = default_rng(seed)
return
def calculate_distance(self, infeat: typing.Dict[str, float]) -> float:
"""
Euclidean distance between sample feature vector
and current target benchmark.
"""
return calculate_distance(infeat, self.target_benchmark.features, self.feature_space)
def topK_candidates(self,
candidates : typing.List[typing.TypeVar("ActiveSample")],
K : int,
dropout_prob : float,
) -> typing.List[typing.TypeVar("ActiveSample")]:
"""
Return top-K candidates.
"""
if FLAGS.randomize_selection is None:
sorted_cands = sorted(candidates, key = lambda x: x.score) # [:K]
if dropout_prob > 0.0 and len(sorted_cands) > K:
for kidx in range(K): # if K > len(sorted_cands) because your LM's compilation rate sucks, this will give you an IndexError.
visited = set()
if self.rng.random() <= dropout_prob and len(visited) < len(sorted_cands) and sorted_cands[kidx].score > 0.0:
swap_idx = self.rng.choice(list(set(range(K, len(sorted_cands))) - visited))
sorted_cands[kidx], sorted_cands[swap_idx] = sorted_cands[swap_idx], sorted_cands[kidx]
visited.add(swap_idx)
return sorted_cands[:K]
else:
if FLAGS.randomize_selection == 0:
raise ValueError("randomize_selection, {}, cannot be 0.".format(FLAGS.randomize_selection))
l.logger().warn("Randomized generation selection has been activated. You must know what you are doing!")
kf = min(FLAGS.randomize_selection, len(candidates))
indices = set(self.rng.choice(len(candidates), size = kf, replace = False))
return [c for idx, c in enumerate(candidates) if idx in indices]
def sample_from_set(self,
candidates : typing.List[typing.TypeVar("ActiveSample")],
search_width : int,
dropout_prob : float,
only_unique : bool = True,
) -> bool:
"""
Find top K candidates by getting minimum
euclidean distance from set of rodinia benchmarks.
"""
"""
for idx in range(len(candidates)):
candidates[idx] = candidates[idx]._replace(
score = self.calculate_distance(candidates[idx].features)
)
"""
if only_unique:
hset = set()
unique_candidates = []
for c in candidates:
sample_str = ','.join([str(x) for x in c.sample])
if sample_str not in hset:
unique_candidates.append(c)
hset.add(sample_str)
candidates = unique_candidates
return self.topK_candidates(candidates, search_width, dropout_prob = dropout_prob)
def step_generation(self, candidates: typing.List['ActiveSample']) -> None:
"""
End of LM generation's epoch hook.
"""
return
def iter_benchmark(self, *unused_args, **unused_kwargs) -> None:
"""
Override this method to set how new parts of the feature space are going to
be targetted.
"""
raise NotImplementedError("Abstract class.")
def is_terminated(self) -> bool:
raise NotImplementedError
def saveCheckpoint(self) -> None:
"""
Save feature sampler state.
"""
state_dict = {
'benchmarks' : self.benchmarks,
'target_benchmark' : self.target_benchmark,
}
if environment.WORLD_RANK == 0:
with open(self.workspace / "feature_sampler_state.pkl", 'wb') as outf:
pickle.dump(state_dict, outf)
return
def loadCheckpoint(self) -> None:
"""
Override to select checkpoints are going to be loaded.
"""
raise NotImplementedError("Abstract class.")
class BenchmarkSampler(FeatureSampler):
"""
This is a shitty experimental class to work with benchmark targeting.
Will be refactored obviously.
"""
def __init__(self,
workspace : pathlib.Path,
feature_space : str,
target : str,
git_corpus : corpuses.Corpus = None,
seed : int = None,
):
super(BenchmarkSampler, self).__init__(workspace, feature_space, target, seed)
if self.target != "grid_walk":
self.path = pathlib.Path(benchmarks.targets[target]).resolve()
self.reduced_git_corpus = [
(cf, feats[self.feature_space])
for cf, feats in git_corpus.getFeaturesContents(sequence_length = 768)
if self.feature_space in feats and feats[self.feature_space]
]
self.loadCheckpoint()
try:
if self.target_benchmark is None:
self.target_benchmark = self.benchmarks.pop(0)
# l.logger().info("Target benchmark: {}\nTarget fetures: {}".format(self.target_benchmark.name, self.target_benchmark.features))
except IndexError:
self.target_benchmark = None
return
def iter_benchmark(self, *unused_args, **unused_kwargs):
"""
When it's time, cycle through the next target benchmark.
"""
# self.benchmarks.append(self.benchmarks.pop(0))
del unused_args
del unused_kwargs
try:
self.target_benchmark = self.benchmarks.pop(0)
# l.logger().info("Target benchmark: {}\nTarget fetures: {}".format(self.target_benchmark.name, self.target_benchmark.features))
except IndexError:
self.target_benchmark = None
self.saveCheckpoint()
return
def is_terminated(self) -> bool:
if not self.target_benchmark:
return True
return False
def loadCheckpoint(self) -> None:
"""
Load feature sampler state.
"""
if (self.workspace / "feature_sampler_state.pkl").exists():
distrib.lock()
with open(self.workspace / "feature_sampler_state.pkl", 'rb') as infile:
state_dict = pickle.load(infile)
distrib.unlock()
self.benchmarks = state_dict['benchmarks']
self.target_benchmark = state_dict['target_benchmark']
else:
self.benchmarks = []
self.target_benchmark = None
if self.target == "grid_walk":
for target_features in grid_walk_generator(self.feature_space):
self.benchmarks.append(
Benchmark(
"",
"",
"",
target_features,
{}
)
)
self.saveCheckpoint()
else:
if environment.WORLD_RANK == 0:
kernels = benchmarks.yield_cl_kernels(self.path)
# pool = multiprocessing.Pool()
# for benchmark in pool.map(
# functools.partial(
# benchmarks.benchmark_worker,
# feature_space = self.feature_space,
# reduced_git_corpus = self.reduced_git_corpus
# ), kernels
# ):
# if benchmark:
# self.benchmarks.append(benchmark)
# pool.close()
for kernel in kernels:
benchmark = benchmarks.benchmark_worker(kernel, self.feature_space, self.reduced_git_corpus)
if benchmark:
self.benchmarks.append(benchmark)
benchmarks.resolve_benchmark_names(self.benchmarks)
self.benchmarks = sorted(self.benchmarks, key = lambda b: b.name)
distrib.broadcast(self.benchmarks)
else:
self.benchmarks = distrib.broadcast()
distrib.barrier()
l.logger().info("Loaded {}, {} benchmarks".format(self.target, len(self.benchmarks)))
l.logger().info(', '.join([x for x in set([x.name for x in self.benchmarks])]))
return
class ActiveSampler(FeatureSampler):
"""
Euclidean distance-based feature space sampler for active learning.
The downstream task and active learner are encapsulated.
This class is the API between the language model's searching method/generation
and the active learner's query by committee.
"""
@property
def is_active(self):
return True
def __init__(self,
workspace : pathlib.Path,
feature_space : str,
active_learner : 'active_models.Model',
tokenizer : 'tokenizers.TokenizerBase',
seed : int = None,
):
super(ActiveSampler, self).__init__(workspace, feature_space, str(active_learner.downstream_task), seed)
self.active_learner = active_learner
self.loadCheckpoint()
try:
if self.target_benchmark is None:
self.target_benchmark = self.benchmarks.pop(0)
# l.logger().info("Target benchmark: {}\nTarget fetures: {}".format(self.target_benchmark.name, self.target_benchmark.features))
except IndexError:
self.benchmarks = self.sample_active_learner()
self.target_benchmark = self.benchmarks.pop(0)
self.tokenizer = tokenizer
self.saveCheckpoint()
return
def sample_active_learner(self,
keep_top_k : int = 1,
num_samples : int = None,
) -> typing.List[Benchmark]:
"""
Sample active learner for num_samples and sort by highest entropy.
"""
return [
Benchmark("", "", "", sample['static_features'], sample['runtime_features'])
for sample in self.active_learner.Sample(num_samples = num_samples)
][:keep_top_k]
def teach_active_learner(self, target_samples: typing.List['ActiveSample']) -> None:
"""
Update active learner with targetted generated samples by the language model.
"""
if FLAGS.disable_active_learning:
return
upd_samples, upd_loader = self.active_learner.downstream_task.UpdateDataGenerator(
target_samples, self.target_benchmark.features, self.tokenizer
)
if len(upd_loader) > 0:
self.active_learner.UpdateLearn(upd_loader)
self.active_learner.downstream_task.UpdateTrainDataset(upd_loader)
else:
l.logger().info("Skipping empty update dataset.")
return
def step_generation(self, candidates: typing.List['ActiveSample']) -> None:
"""
End of LM generation's epoch hook.
Sends the epoch candidates for runtime label computation.
"""
self.active_learner.downstream_task.step_generation(candidates)
return
def iter_benchmark(self, target_samples: typing.List['ActiveSample'] = None) -> None:
"""
Set the next item from list to target.
If doesn't exist, ask from the active learner for new stuff,
unless a termination criteria has been met.
At the first iteration, target samples is None.
But when the LM generates targets and calls for iter_benchmark()
it provides the generated samples and gives them to the active_learner
for updated learning.
"""
if target_samples:
self.teach_active_learner(target_samples)
try:
self.target_benchmark = self.benchmarks.pop(0)
# l.logger().info("Target fetures: {}".format(self.target_benchmark.features))
except IndexError:
l.logger().warn("Implement a termination criteria here.")
self.benchmarks = self.sample_active_learner()
self.iter_benchmark()
return
self.saveCheckpoint()
return
def is_terminated(self) -> bool:
l.logger().warn("You need to find a termination criteria for the active learner.")
return False
def saveCheckpoint(self) -> None:
if environment.WORLD_RANK == 0:
super(ActiveSampler, self).saveCheckpoint()
distrib.barrier()
return
def loadCheckpoint(self) -> None:
"""
Load pickled list of benchmarks, if exists.
Otherwise, ask the first batch of features from the active learner.
"""
if (self.workspace / "feature_sampler_state.pkl").exists():
distrib.lock()
try:
with open(self.workspace / "feature_sampler_state.pkl", 'rb') as infile:
state_dict = pickle.load(infile)
infile.close()
self.benchmarks = state_dict['benchmarks']
self.target_benchmark = state_dict['target_benchmark']
while not infile.closed:
time.sleep(1)
except EOFError:
distrib.unlock()
self.loadCheckpoint()
return
distrib.unlock()
else:
self.benchmarks = []
self.target_benchmark = None
l.logger().info("Loaded {}, {} benchmarks".format(self.target, len(self.benchmarks)))
return
| 15,820 | 35.203661 | 155 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/features/instcount.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Feature Extraction module for LLVM InstCount pass.
"""
import typing
from deeplearning.benchpress.preprocessors import opencl
from deeplearning.benchpress.util import environment
INSTCOUNT = ["-load", environment.INSTCOUNT, "-InstCount"]
class InstCountFeatures(object):
"""
70-dimensional LLVM-IR feature vector,
describing Total Funcs, Total Basic Blocks, Total Instructions
and count of all different LLVM-IR instruction types.
"""
def __init__(self):
return
@classmethod
def ExtractFeatures(cls,
src: str,
header_file : str = None,
use_aux_headers : bool = True,
extra_args : typing.List[str] = [],
**kwargs,
) -> typing.Dict[str, float]:
return cls.RawToDictFeats(cls.ExtractRawFeatures(src, header_file = header_file, use_aux_headers = use_aux_headers, extra_args = extra_args))
@classmethod
def ExtractIRFeatures(cls, bytecode: str, **kwargs) -> typing.Dict[str, float]:
return cls.RawToDictFeats(cls.ExtractIRRawFeatures(bytecode))
@classmethod
def ExtractRawFeatures(cls,
src: str,
header_file : str = None,
use_aux_headers : bool = True,
extra_args : typing.List[str] = [],
**kwargs,
) -> str:
try:
return opencl.CompileOptimizer(src, INSTCOUNT, header_file = header_file, use_aux_headers = use_aux_headers, extra_args = extra_args)
except ValueError:
return ""
@classmethod
def ExtractIRRawFeatures(cls, bytecode: str, **kwargs) -> str:
try:
return opencl.CompileOptimizerIR(bytecode, INSTCOUNT)
except ValueError:
return ""
@classmethod
def RawToDictFeats(cls, str_feats: str, **kwargs) -> typing.Dict[str, float]:
return {feat.split(' : ')[0]: int(feat.split(' : ')[1]) for feat in str_feats.split('\n') if ' : ' in feat}
| 2,631 | 36.070423 | 145 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/features/grewe.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Feature Extraction module for Dominic Grewe features.
"""
import subprocess
import tempfile
import typing
from deeplearning.benchpress.util import environment
from deeplearning.benchpress.util import crypto
from deeplearning.benchpress.util import logging as l
from absl import flags
FLAGS = flags.FLAGS
GREWE = environment.GREWE
KEYS = [
"comp",
"rational",
"mem",
"localmem",
"coalesced",
"atomic",
"F2:coalesced/mem",
"F4:comp/mem",
]
class GreweFeatures(object):
"""
Source code features as defined in paper
"Portable Mapping of Data Parallel Programs to OpenCL for Heterogeneous Systems"
by D.Grewe, Z.Wang and M.O'Boyle.
"""
@classmethod
def KEYS(cls: 'GreweFeatures') -> typing.List[str]:
return KEYS
def __init__(self):
return
@classmethod
def ExtractFeatures(cls,
src: str,
header_file : str = None,
use_aux_headers : bool = True,
extra_args : typing.List[str] = [],
**kwargs,
) -> typing.Dict[str, float]:
"""
Invokes clgen_features extractor on source code and return feature mappings
in dictionary format.
If the code has syntax errors, features will not be obtained and empty dict
is returned.
"""
str_features = cls.ExtractRawFeatures(src, header_file = header_file, use_aux_headers = use_aux_headers, extra_args = extra_args)
return cls.RawToDictFeats(str_features)
@classmethod
def ExtractFeaturesIter(cls,
srcs: typing.List[str],
header_file : str = None,
use_aux_headers : bool = True,
extra_args : typing.List[str] = [],
**kwargs,
) -> typing.Iterator[typing.Dict[str, float]]:
"""
Invokes clgen_features extractor on source code and return feature mappings
in dictionary format.
If the code has syntax errors, features will not be obtained and empty dict
is returned.
"""
for src in srcs:
str_features = cls.ExtractRawFeatures(src, header_file = header_file, use_aux_headers = use_aux_headers, extra_args = extra_args)
yield cls.RawToDictFeats(str_features)
@classmethod
def ExtractIRFeatures(cls, bytecode: str, **kwargs) -> typing.Dict[str, float]:
"""
Bytecode input in text-level feature space makes no sense. Therefore this function is just a decoy.
"""
return {}
@classmethod
def ExtractRawFeatures(cls,
src: str,
header_file : str = None,
use_aux_headers : bool = True,
extra_args : typing.List[str] = [],
**kwargs,
) -> str:
"""
Invokes clgen_features extractor on a single kernel.
Params:
src: (str) Kernel in string format.
Returns:
Feature vector and diagnostics in str format.
"""
try:
tdir = FLAGS.local_filesystem
except Exception:
tdir = None
with tempfile.NamedTemporaryFile('w', prefix = "feat_ext_", suffix = '.cl', dir = tdir) as f:
f.write(src)
f.flush()
arguments = []
if header_file:
htf = tempfile.NamedTemporaryFile('w', prefix = "feat_ext_head_", suffix = '.h', dir = tdir)
htf.write(header_file)
htf.flush()
arguments = ["-extra-arg=-include{}".format(htf.name)]
if extra_args:
for arg in extra_args:
arguments.append("--extra-arg={}".format(arg))
cmd = [str(GREWE)] + arguments + [f.name]
process = subprocess.Popen(
cmd,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
universal_newlines = True,
)
stdout, stderr = process.communicate()
return stdout
@classmethod
def ExtractIRRawFeatures(cls, bytecode: str, **kwargs) -> str:
"""
Bytecode input in text-level feature space makes no sense. Therefore this function is just a decoy.
"""
return ""
@classmethod
def RawToDictFeats(cls, str_feats: str, **kwargs) -> typing.Dict[str, float]:
"""
Converts clgen_features subprocess output from raw string
to a mapped dictionary of feature -> value.
"""
try:
lines = str_feats.split('\n')
# header, cumvs = lines[0].split(',')[2:], lines[-2].split(',')[2:]
header, values = lines[0].split(',')[2:], [l for l in lines[1:] if l != '' and l != '\n']
cumvs = [0] * 8
try:
for vv in values:
for idx, el in enumerate(vv.split(',')[2:]):
cumvs[idx] = float(el)
if len(header) != len(cumvs):
raise ValueError("Bad alignment of header-value list of features. This should never happen.")
return {key: float(value) for key, value in zip(header, cumvs)}
except ValueError as e:
raise ValueError("{}, {}".format(str(e), str_feats))
except Exception as e:
return {} | 5,683 | 32.046512 | 135 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/features/hidden_state.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Feature Extraction module for Dominic Grewe features.
"""
import math
import typing
from absl import flags
from deeplearning.benchpress.util import pytorch
from deeplearning.benchpress.util.pytorch import torch
from deeplearning.benchpress.util import logging as l
from deeplearning.benchpress.models import backends
FLAGS = flags.FLAGS
KEYS = None
LANGUAGE_MODEL = None
def setup_lm(lm: backends.BackendBase) -> None:
"""
Initialize the language model that will be used as a feature extractor.
Also, the keys of the feature space (they are parametric to the hidden size).
"""
global LANGUAGE_MODEL
global KEYS
KEYS = ["f{}".format(x) for x in range(lm.hidden_state_size)]
LANGUAGE_MODEL = lm
return
class HiddenStateFeatures(object):
"""
Source code features as defined in paper
"Portable Mapping of Data Parallel Programs to OpenCL for Heterogeneous Systems"
by D.Grewe, Z.Wang and M.O'Boyle.
"""
def __init__(self):
return
@classmethod
def ExtractFeatures(cls,
src: str,
header_file : str = None,
use_aux_headers : bool = True,
extra_args : typing.List[str] = [],
**kwargs,
) -> typing.Dict[str, float]:
"""
Invokes clgen_features extractor on source code and return feature mappings
in dictionary format.
If the code has syntax errors, features will not be obtained and empty dict
is returned.
"""
raw_features = cls.ExtractRawFeatures(src)
return cls.RawToDictFeats(raw_features)
@classmethod
def ExtractFeaturesIter(cls,
srcs: typing.List[str],
header_file : str = None,
use_aux_headers : bool = True,
extra_args : typing.List[str] = [],
**kwargs,
) -> typing.Iterator[typing.Dict[str, float]]:
"""
Invokes clgen_features extractor on source code and return feature mappings
in dictionary format.
If the code has syntax errors, features will not be obtained and empty dict
is returned.
"""
batch_size = kwargs.get('batch_size', 256)
for bidx in range(0, len(srcs), batch_size):
batch = srcs[bidx: bidx + batch_size]
batch_feats = cls.ExtractRawFeatures(batch)
for feat_vec in batch_feats:
yield cls.RawToDictFeats(feat_vec)
@classmethod
def ExtractIRFeatures(cls, bytecode: str, **kwargs) -> typing.Dict[str, float]:
"""
Bytecode input in text-level feature space makes no sense. Therefore this function is just a decoy.
"""
raise NotImplementedError("I must not be called.")
return {}
@classmethod
def ExtractRawFeatures(cls, src: typing.Union[str, typing.List[str]], **kwargs) -> typing.Union[typing.List[float], typing.List[typing.List[float]]]:
"""
Invokes BenchPress to collect hidden softmax activations.
Params:
src: (str) Kernel in string format.
Returns:
Feature vector and diagnostics in str format.
"""
global LANGUAGE_MODEL
if not isinstance(src, list):
encoded = LANGUAGE_MODEL.EncodeInputs([src])
hidden_state = LANGUAGE_MODEL.ExtractHidden(encoded).squeeze(0)
else:
encoded = LANGUAGE_MODEL.EncodeInputs(src)
hidden_state = LANGUAGE_MODEL.ExtractHidden(encoded)
return list(hidden_state.detach().cpu().numpy())
@classmethod
def ExtractIRRawFeatures(cls, bytecode: str, **kwargs) -> str:
"""
Bytecode input in text-level feature space makes no sense. Therefore this function is just a decoy.
"""
raise NotImplementedError("I must not be called.")
return ""
@classmethod
def RawToDictFeats(cls, hidden_states: typing.List[float], **kwargs) -> typing.Dict[str, float]:
"""
Converts clgen_features subprocess output from raw string
to a mapped dictionary of feature -> value.
"""
return {
"{}".format(k): (v) for k, v in zip(KEYS, hidden_states)
} | 4,677 | 33.397059 | 151 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/features/active_feed_database.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module for databases of search-based generation."""
import tqdm
import pathlib
import multiprocessing
import datetime
import typing
import numpy as np
import sqlalchemy as sql
from sqlalchemy.ext import declarative
from absl import app, flags
from deeplearning.benchpress.features import extractor
from deeplearning.benchpress.samplers import samples_database
from deeplearning.benchpress.proto import model_pb2
from deeplearning.benchpress.util import crypto
from deeplearning.benchpress.util import sqlutil
from deeplearning.benchpress.util import environment
from deeplearning.benchpress.util import distrib
from deeplearning.benchpress.util import logging as l
FLAGS = flags.FLAGS
flags.DEFINE_string(
"active_mergeable_databases",
None,
"Comma separated paths of ActiveFeedDatabase to merge into one."
)
flags.DEFINE_string(
"output_active_db",
None,
"Specify output of active merged database."
)
flags.DEFINE_string(
"output_samples_db",
None,
"Specify output of samples merged database."
)
flags.DEFINE_string(
"active_feed_mode",
None,
"Select module's operation. Choices: \"active_to_samples\" and \"merge_active\""
)
Base = declarative.declarative_base()
class ActiveSamplingSpecs(Base):
__tablename__ = "specifications"
"""
DB Table for concentrated online/active sampling results.
"""
sha256 : str = sql.Column(sql.String(1024), primary_key=True)
active_search_depth : int = sql.Column(sql.Integer, nullable = False)
active_search_width : int = sql.Column(sql.Integer, nullable = False)
feature_space : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
@classmethod
def FromArgs(cls,
act_s_dep : int,
act_s_wid : int,
feat_space : str
) -> typing.TypeVar("ActiveSamplingSpecs"):
return ActiveSamplingSpecs(
sha256 = crypto.sha256_str(str(act_s_dep) + str(act_s_wid) + feat_space),
active_search_depth = act_s_dep,
active_search_width = act_s_wid,
feature_space = feat_space,
)
class ActiveInput(Base, sqlutil.ProtoBackedMixin):
"""
A database for all original inputs used for beam search sampling.
"""
__tablename__ = "input_feeds"
# entry id
id : int = sql.Column(sql.Integer, primary_key = True)
# unique hash of sample text
sha256 : str = sql.Column(sql.String(64), nullable = False, index = True)
# Text original input
input_feed : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# Encoded original input
encoded_feed : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# Feature vector of input_feed
input_features : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# Actual length of sample, excluding pads.
num_tokens : int = sql.Column(sql.Integer, nullable = False)
# Date
date_added : datetime.datetime = sql.Column(sql.DateTime, nullable = False)
@classmethod
def FromArgs(cls,
tokenizer,
id : int,
input_feed : np.array,
input_features : typing.Dict[str, float],
) -> typing.TypeVar("ActiveInput"):
"""Construt ActiveFeed table entry from argumentns."""
str_input_feed = tokenizer.tokensToString(input_feed, ignore_token = tokenizer.padToken)
if tokenizer.padToken in input_feed:
num_tokens = np.where(input_feed == tokenizer.padToken)[0][0]
else:
num_tokens = len(input_feed)
return ActiveInput(
id = id,
sha256 = crypto.sha256_str(str_input_feed),
input_feed = str_input_feed,
encoded_feed = ','.join([str(x) for x in input_feed]),
input_features = '\n'.join(["{}:{}".format(k, v) for k, v in input_features.items()]),
num_tokens = int(num_tokens),
date_added = datetime.datetime.utcnow(),
)
class ActiveFeed(Base, sqlutil.ProtoBackedMixin):
"""
A database row representing a search-based generational sample.
"""
__tablename__ = "active_feeds"
# entry id
id : int = sql.Column(sql.Integer, primary_key = True)
# unique hash of sample text
sha256 : str = sql.Column(sql.String(64), nullable = False, index = True)
# Text original input
input_feed : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# Encoded original input
encoded_feed : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# Feature vector of input_feed
input_features : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# Output sample
sample : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# Actual length of sample, excluding pads.
num_tokens : int = sql.Column(sql.Integer, nullable = False)
# Sample's vector of features.
output_features : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# Whether the generated sample is of good quality or not.
sample_quality : float = sql.Column(sql.Float, nullable = False)
# Name and contents of target benchmark specified.
target_benchmark : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# Feature vector of target benchmark.
target_features : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# Whether sample compiles or not.
compile_status : bool = sql.Column(sql.Boolean, nullable = False)
# Number of generation for sample
generation_id : int = sql.Column(sql.Integer, nullable = False)
# Date
date_added : datetime.datetime = sql.Column(sql.DateTime, nullable = False)
@classmethod
def FromArgs(cls,
tokenizer,
id : int,
input_feed : np.array,
input_features : typing.Dict[str, float],
sample : np.array,
output_features : typing.Dict[str, float],
sample_quality : float,
target_benchmark : typing.Tuple[str, str],
target_features : typing.Dict[str, float],
compile_status : bool,
generation_id : int,
) -> typing.TypeVar("ActiveFeed"):
"""Construt ActiveFeed table entry from argumentns."""
str_input_feed = tokenizer.tokensToString(input_feed, ignore_token = tokenizer.padToken, with_formatting = True)
str_sample = tokenizer.ArrayToCode(sample, with_formatting = True)
num_tokens = len(sample)
if tokenizer.padToken in sample:
num_tokens = np.where(sample == tokenizer.padToken)[0][0]
return ActiveFeed(
id = id,
sha256 = crypto.sha256_str(str_input_feed + str_sample),
input_feed = str_input_feed,
encoded_feed = ','.join([str(x) for x in input_feed]),
input_features = '\n'.join(["{}:{}".format(k, v) for k, v in input_features.items()]),
sample = str_sample,
num_tokens = int(num_tokens),
output_features = '\n'.join(["{}:{}".format(k, v) for k, v in output_features.items()]) if output_features else "None",
target_benchmark = "// {}\n{}".format(target_benchmark[0], target_benchmark[1]),
target_features = '\n'.join(["{}:{}".format(k, v) for k, v in target_features.items()]) if target_features else "None",
sample_quality = sample_quality,
compile_status = compile_status,
generation_id = generation_id,
date_added = datetime.datetime.utcnow(),
)
@classmethod
def FromActiveFeed(cls,
id : int,
sha256 : str,
input_feed : str = "",
encoded_feed : str = "",
input_features : str = "",
sample : str = "",
num_tokens : int = -1,
output_features : str = "",
target_benchmark : str = "",
target_features : str = "",
sample_quality : float = -1,
compile_status : bool = False,
generation_id : int = -1,
date_added : datetime.datetime = datetime.datetime.utcnow()
) -> typing.TypeVar("ActiveFeed"):
return ActiveFeed(
id = id,
sha256 = sha256,
input_feed = input_feed,
encoded_feed = encoded_feed,
input_features = input_features,
sample = sample,
num_tokens = num_tokens,
output_features = output_features,
target_benchmark = target_benchmark,
target_features = target_features,
sample_quality = sample_quality,
compile_status = compile_status,
generation_id = generation_id,
date_added = date_added,
)
class ActiveFeedDatabase(sqlutil.Database):
"""A database monitoring search-based generation process."""
def __init__(self, url: str, must_exist: bool = False, is_replica = False):
if environment.WORLD_RANK == 0 or is_replica:
super(ActiveFeedDatabase, self).__init__(url, Base, must_exist = must_exist)
if environment.WORLD_SIZE > 1 and not is_replica:
# Conduct engine connections to replicated preprocessed chunks.
self.base_path = pathlib.Path(url.replace("sqlite:///", "")).resolve().parent
hash_id = self.base_path.name
try:
tdir = pathlib.Path(FLAGS.local_filesystem).resolve() / hash_id / "node_active_feed"
except Exception:
tdir = pathlib.Path("/tmp").resolve() / hash_id / "node_active_feed"
try:
tdir.mkdir(parents = True, exist_ok = True)
except Exception:
pass
self.replicated_path = tdir / "active_feeds_{}.db".format(environment.WORLD_RANK)
self.replicated = ActiveFeedDatabase(
url = "sqlite:///{}".format(str(self.replicated_path)),
must_exist = must_exist,
is_replica = True
)
distrib.barrier()
return
@property
def input_count(self):
"""Number of input feeds in DB."""
with self.get_session() as s:
count = s.query(ActiveInput).count()
return count
@property
def get_data(self):
"""Return all database in list format"""
with self.get_session() as s:
return s.query(ActiveFeed).all()
@property
def get_features(self):
"""Return all feature vectors of compiling samples."""
with self.get_session() as s:
return [x.output_features for x in s.query(ActiveFeed).yield_per(1000)]
@property
def active_count(self):
"""Number of active samples in DB."""
with self.get_session() as s:
count = s.query(ActiveFeed).count()
return count
@property
def get_session(self):
"""
get proper DB session.
"""
if environment.WORLD_SIZE == 1 or environment.WORLD_RANK == 0:
return self.Session
else:
return self.replicated.Session
def get_data_features(self, target_name: str = None) -> typing.List[typing.Tuple[str, typing.Dict[str, float]]]:
"""Return tuple of code + feature vectors"""
with self.get_session() as s:
r = [(x.sample, self.DictToRawFeats(x.output_features)) for x in s.query(ActiveFeed).yield_per(1000) if (target_name is None or target_name in x.target_benchmark)]
if len(r) == 0:
l.logger().info([target_name])
l.logger().error("{} screwed up.".format(target_name))
return r
def DictToRawFeats(self, dict_feats: str) -> str:
"""Convert dict based feats to Raw feats"""
if " : " not in dict_feats:
dict_feats = dict_feats.replace(":", " : ")
lines = dict_feats.split('\n')
if len(lines) == 8:
flist = ["GreweFeatures:"]
flist.append("file,kernel,comp,rational,mem,localmem,coalesced,atomic,F2:coalesced/mem,F4:comp/mem")
feats = "temp.cl,A"
for l in lines:
feats += "," + l.split(':')[-1]
flist.append(feats)
elif len(lines) == 70:
flist = ["InstCountFeatures:"]
flist += lines
elif len(lines) == 56:
flist = ["AutophaseFeatures:"]
flist += lines
else:
raise ValueError(dict_feats)
return '\n'.join(flist)
def merge_databases(dbs: typing.List[ActiveFeedDatabase], out_db: ActiveFeedDatabase) -> None:
"""
Merges a list of active_feed_databases to a single one, specified in out_db.
Arguments:
dbs: List of active feed databases.
out_db: Exported output database.
Returns:
None
"""
sdir = {}
new_id = out_db.active_count
existing = [dp.sha256 for dp in out_db.get_data]
for db in dbs:
data = db.get_data
for dp in data:
if dp.sha256 not in sdir and dp.sha256 not in existing:
sdir[dp.sha256] = ActiveFeed.FromActiveFeed(
id = new_id,
sha256 = dp.sha256,
input_feed = dp.input_feed,
encoded_feed = dp.encoded_feed,
input_features = dp.input_features,
sample = dp.sample,
num_tokens = dp.num_tokens,
output_features = dp.output_features,
target_benchmark = dp.target_benchmark,
target_features = dp.target_features,
sample_quality = dp.sample_quality,
compile_status = dp.compile_status,
generation_id = dp.generation_id,
date_added = dp.date_added,
)
new_id += 1
with out_db.Session() as s:
bar = tqdm.tqdm(total = len(sdir.values()), desc = "Merged DB")
for dp in bar(sdir.values()):
s.add(s.merge(dp))
s.commit()
return
def ToProto(dp: ActiveFeed) -> samples_database.Sample:
return samples_database.Sample(
**samples_database.Sample.FromProto(0, model_pb2.Sample(
train_step = -1,
text = dp.sample,
sample_indices = "",
encoded_sample_indices = "",
original_input = "",
sample_feed = dp.input_feed,
encoded_text = "",
sample_time_ms = 0,
feature_vector = extractor.ExtractRawFeatures(dp.sample),
num_tokens = dp.num_tokens,
compile_status = dp.compile_status,
categorical_sampling = 1,
date_added = dp.date_added.strftime("%m/%d/%Y, %H:%M:%S"),
)
)
)
def active_convert_samples(dbs: typing.List[ActiveFeedDatabase], out_db: samples_database.SamplesDatabase) -> None:
"""
Merges a list of active_feed_databases to a SamplesDatabase db.
Arguments:
dbs: List of active feed databases.
out_db: Exported output samples database.
Returns:
None
"""
sdir = {}
new_id = out_db.count
existing = [dp.sha256 for dp in out_db.get_data]
for db in dbs:
data = []
pool = multiprocessing.Pool()
for dp in tqdm.tqdm(pool.imap_unordered(ToProto, db.get_data), total = db.active_count, desc = "{}".format(pathlib.Path(db.url).name)):
data.append(dp)
for dp in data:
if dp.sha256 not in sdir and dp.sha256 not in existing:
dp.id = new_id
sdir[dp.sha256] = dp
new_id += 1
with out_db.Session() as s:
for dp in tqdm.tqdm(sdir.values(), total = len(sdir.values()), desc = "Output DB"):
s.add(dp)
s.commit()
return
def initMain(*args, **kwargs):
"""
Setup module's operations.
"""
if not FLAGS.active_mergeable_databases:
raise ValueError("Please input active feed databases to merge as a comma separated list.")
db_paths = [pathlib.Path(p).absolute() for p in FLAGS.active_mergeable_databases.replace(" ", "").split(",")]
for p in db_paths:
if not p.exists():
raise FileNotFoundError(p)
dbs = [ActiveFeedDatabase(url = "sqlite:///{}".format(str(p)), must_exist = True) for p in db_paths]
if FLAGS.active_feed_mode == "merge_active":
if not FLAGS.output_active_db:
raise ValueError("Specify out path for merged database")
out_path = pathlib.Path(FLAGS.output_active_db).absolute()
if out_path.suffix != '.db':
raise ValueError("output_active_db must end in a valid database name (.db extension): {}".format(out_path))
out_path.parent.mkdir(exist_ok = True, parents = True)
out_db = ActiveFeedDatabase(url = "sqlite:///{}".format(str(out_path)), must_exist = False)
merge_databases(dbs, out_db)
elif FLAGS.active_feed_mode == "active_to_samples":
out_path = pathlib.Path(FLAGS.output_samples_db).absolute()
if out_path.suffix != '.db':
raise ValueError("output_samples_db must end in a valid database name (.db extension)")
out_path.parent.mkdir(exist_ok = True, parents = True)
out_db = samples_database.SamplesDatabase(url = "sqlite:///{}".format(str(out_path)), must_exist = False)
active_convert_samples(dbs, out_db)
else:
raise ValueError("Invalid value for FLAGS.active_feed_mode: {}".format(FLAGS.active_feed_mode))
return
if __name__ == "__main__":
app.run(initMain)
sys.exit(0)
| 18,002 | 37.883369 | 169 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/features/extractor.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Feature extraction tools for active learning.
"""
import typing
from deeplearning.benchpress.features import grewe
from deeplearning.benchpress.features import instcount
from deeplearning.benchpress.features import autophase
from deeplearning.benchpress.features import hidden_state
from deeplearning.benchpress.util import crypto
from eupy.hermes import client
extractors = {
'GreweFeatures' : grewe.GreweFeatures,
'InstCountFeatures' : instcount.InstCountFeatures,
'AutophaseFeatures' : autophase.AutophaseFeatures,
'HiddenState' : hidden_state.HiddenStateFeatures,
}
def ExtractFeatures(src: str,
ext: typing.List[str] = None,
header_file : str = None,
use_aux_headers : bool = True,
extra_args : typing.List[str] = [],
**kwargs,
) -> typing.Dict[str, typing.Dict[str, float]]:
"""
Wrapper method for core feature functions.
Returns a mapping between extractor type(string format) and feature data collected.
"""
if not ext:
ext = [k for k in extractors.keys() if k != 'HiddenState']
return {xt: extractors[xt].ExtractFeatures(src, header_file = header_file, use_aux_headers = use_aux_headers, extra_args = extra_args, **kwargs) for xt in ext}
def ExtractFeaturesIter(srcs: typing.List[str],
ext: typing.List[str] = None,
header_file : str = None,
use_aux_headers : bool = True,
extra_args : typing.List[str] = [],
**kwargs,
) -> typing.Dict[str, typing.Iterator[typing.Dict[str, float]]]:
"""
Wrapper method for core feature functions.
Returns a mapping between extractor type(string format) and feature data collected.
"""
if not ext:
ext = [k for k in extractors.keys() if k != 'HiddenState']
return {xt: extractors[xt].ExtractFeaturesIter(srcs, header_file = header_file, use_aux_headers = use_aux_headers, extra_args = extra_args, **kwargs) for xt in ext}
def ExtractIRFeatures(bytecode: str,
ext: typing.List[str] = None,
) -> typing.Dict[str, typing.Dict[str, float]]:
"""
Wrapper method for core feature functions.
Returns a mapping between extractor type(string format) and feature data collected.
Works for LLVM-IR as an input.
"""
if not ext:
ext = [k for k in extractors.keys() if k != 'HiddenState']
return {xt: extractors[xt].ExtractIRFeatures(bytecode, **kwargs) for xt in ext}
def ExtractIRFeaturesIter(bytecodes: typing.List[str],
ext: typing.List[str] = None,
**kwargs,
) -> typing.Dict[str, typing.Iterator[typing.Dict[str, float]]]:
"""
Wrapper method for core feature functions.
Returns a mapping between extractor type(string format) and feature data collected.
Works for LLVM-IR as an input.
"""
if not ext:
ext = [k for k in extractors.keys() if k != 'HiddenState']
return {xt: extractors[xt].ExtractIRFeaturesIter(bytecodes, **kwargs) for xt in ext}
def ExtractRawFeatures(src: str,
ext: typing.List[str] = None,
header_file : str = None,
use_aux_headers : bool = True,
extra_args : typing.List[str] = [],
**kwargs,
) -> str:
"""
Wrapper method for core feature functions.
Returns a mapping between extractor type(string format) and feature data collected.
"""
if not ext:
ext = [k for k in extractors.keys() if k != 'HiddenState']
if ext and not isinstance(ext, list):
raise TypeError("Requested feature space extractors must be a list, {} received".format(type(ext)))
return '\n'.join(["{}:\n{}".format(xt, extractors[xt].ExtractRawFeatures(src, header_file = header_file, use_aux_headers = use_aux_headers, extra_args = extra_args, **kwargs)) for xt in ext])
def ExtractRawFeaturesIter(srcs: typing.List[str],
ext: typing.List[str] = None,
header_file : str = None,
use_aux_headers : bool = True,
extra_args : typing.List[str] = [],
**kwargs,
) -> typing.Iterator[str]:
"""
Wrapper method for core feature functions.
Returns a mapping between extractor type(string format) and feature data collected.
"""
if not ext:
ext = [k for k in extractors.keys() if k != 'HiddenState']
if ext and not isinstance(ext, list):
raise TypeError("Requested feature space extractors must be a list, {} received".format(type(ext)))
return '\n'.join(["{}:\n{}".format(xt, extractors[xt].ExtractRawFeaturesIter(srcs, header_file = header_file, use_aux_headers = use_aux_headers, extra_args = extra_args, **kwargs)) for xt in ext])
def ExtractIRRawFeatures(bytecode: str,
ext: typing.List[str] = None,
**kwargs,
) -> str:
"""
Wrapper method for core feature functions.
Returns a mapping between extractor type(string format) and feature data collected.
Works for LLVM-IR as an input.
"""
if not ext:
ext = [k for k in extractors.keys() if k != 'HiddenState']
if ext and not isinstance(ext, list):
raise TypeError("Requested feature space extractors must be a list, {} received".format(type(ext)))
return '\n'.join(["{}:\n{}".format(xt, extractors[xt].ExtractIRRawFeatures(bytecode, **kwargs)) for xt in ext])
def ExtractIRRawFeaturesIter(bytecodes: typing.List[str],
ext: typing.List[str] = None,
**kwargs,
) -> typing.Iterator[str]:
"""
Wrapper method for core feature functions.
Returns a mapping between extractor type(string format) and feature data collected.
Works for LLVM-IR as an input.
"""
if not ext:
ext = [k for k in extractors.keys() if k != 'HiddenState']
if ext and not isinstance(ext, list):
raise TypeError("Requested feature space extractors must be a list, {} received".format(type(ext)))
return '\n'.join(["{}:\n{}".format(xt, extractors[xt].ExtractIRRawFeaturesIter(bytecodes, **kwargs)) for xt in ext])
def RawToDictFeats(str_feats: typing.Union[str, typing.List[float]],
ext: typing.List[str] = None,
**kwargs,
) -> typing.Dict[str, typing.Dict[str, float]]:
"""
Wrapper method for core feature functions.
Returns a mapping between extractor type(string format) and feature data collected.
"""
if not ext:
ext = [k for k in extractors.keys() if k != 'HiddenState']
if ext and not isinstance(ext, list):
raise TypeError("Requested feature space extractors must be a list, {} received".format(type(ext)))
if ext != ['HiddenState']:
feats = {b.split(":\n")[0]: ''.join(b.split(':\n')[1:]) for b in str_feats.split('\n\n') if b.split(':\n')[1:]}
else:
feats = {ext[0]: str_feats}
return {xt: extractors[xt].RawToDictFeats(feat, **kwargs) for xt, feat in feats.items()}
| 7,868 | 43.965714 | 198 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/features/normalizers.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
GreweFeatures = {
'comp': 254,
'rational': 61,
'mem': 107,
'localmem': 104,
'coalesced': 100,
'atomic': 20,
'F2:coalesced/mem': 1,
'F4:comp/mem': 1,
}
InstCountFeatures = {
'TotalInsts' : 523,
'TotalBlocks' : 52,
'TotalFuncs' : 66,
'Ret' : 4,
'Br' : 1,
'Switch' : 1,
'IndirectBr' : 1,
'Invoke' : 1, # That is indeed 1.
'Resume' : 1,
'Unreachable' : 1,
'CleanupRet' : 1,
'CatchRet' : 1,
'CatchSwitch' : 14,
'CallBr' : 117,
'FNeg' : 89,
'Add' : 55,
'FAdd' : 55,
'Sub' : 56,
'FSub' : 91,
'Mul' : 8,
'FMul' : 55,
'UDiv' : 64,
'SDiv' : 11,
'FDiv' : 8,
'URem' : 1,
'SRem' : 36,
'FRem' : 21,
'Shl' : 28,
'LShr' : 35,
'AShr' : 61,
'And' : 76,
'Or' : 42,
'Xor' : 90,
'Alloca' : 84,
'Load' : 112,
'Store' : 1,
'GetElementPtr' : 1,
'Fence' : 1,
'AtomicCmpXchg' : 30,
'AtomicRMW' : 96,
'Trunc' : 52,
'ZExt' : 8,
'SExt' : 30,
'FPToUI' : 27,
'FPToSI' : 17,
'UIToFP' : 64,
'SIToFP' : 64,
'FPTrunc' : 28,
'FPExt' : 13,
'PtrToInt' : 66,
'IntToPtr' : 43,
'BitCast' : 1,
'AddrSpaceCast' : 1,
'CleanupPad' : 42,
'CatchPad' : 21,
'ICmp' : 111,
'FCmp' : 129,
'PHI' : 39,
'Call' : 1,
'Select' : 1,
'UserOp1' : 1,
'UserOp2' : 60,
'VAArg' : 44,
'ExtractElement' : 47,
'InsertElement' : 26,
'ShuffleVector' : 1,
'ExtractValue' : 0,
'InsertValue' : 1,
'LandingPad' : 1,
'Freeze' : 1,
}
AutophaseFeatures = {
'BBNumArgsHi' : 25,
'BBNumArgsLo' : 19,
'onePred' : 34,
'onePredOneSuc' : 32,
'onePredTwoSuc' : 26,
'oneSuccessor' : 32,
'twoPred' : 31,
'twoPredOneSuc' : 17,
'twoEach' : 31,
'twoSuccessor' : 34,
'morePreds' : 9,
'BB03Phi' : 21,
'BBHiPhi' : 25,
'BBNoPhi' : 43,
'BeginPhi' : 32,
'BranchCount' : 50,
'returnInt' : 44,
'CriticalCount' : 57,
'NumEdges' : 84,
'const32Bit' : 135,
'const64Bit' : 262,
'numConstZeroes' : 119,
'numConstOnes' : 65,
'UncondBranches' : 32,
'binaryConstArg' : 97,
'AShr' : 28,
'Add' : 117,
'Alloca' : 42,
'And' : 35,
'BlockMid' : 11,
'BlockLow' : 51,
'BitCast' : 66,
'Br' : 50,
'Call' : 129,
'GetElementPtr' : 112,
'ICmp' : 42,
'LShr' : 21,
'Load' : 90,
'Mul' : 56,
'Or' : 61,
'PHI' : 111,
'Ret' : 11,
'SExt' : 52,
'Select' : 39,
'Shl' : 36,
'Store' : 84,
'Sub' : 55,
'Trunc' : 30,
'Xor' : 76,
'ZExt' : 96,
'TotalBlocks' : 51,
'TotalInsts' : 523,
'TotalMemInst' : 269,
'TotalFuncs' : 11,
'ArgsPhi' : 230,
'testUnary' : 229,
}
normalizer = {
'GreweFeatures' : GreweFeatures,
'InstCountFeatures' : InstCountFeatures,
'AutophaseFeatures' : AutophaseFeatures,
}
| 3,238 | 18.871166 | 74 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/features/autophase.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Feature Extraction module for Autophase paper features.
"""
import typing
from deeplearning.benchpress.preprocessors import opencl
from deeplearning.benchpress.util import environment
AUTOPHASE = ["-load", environment.AUTOPHASE, "-autophase"]
class AutophaseFeatures(object):
"""
TODO write description.
"""
def __init__(self):
return
@classmethod
def ExtractFeatures(cls,
src: str,
header_file : str = None,
use_aux_headers : bool = True,
extra_args : typing.List[str] = [],
**kwargs,
) -> typing.Dict[str, float]:
return cls.RawToDictFeats(cls.ExtractRawFeatures(src, header_file = header_file, use_aux_headers = use_aux_headers, extra_args = extra_args))
@classmethod
def ExtractIRFeatures(cls, bytecode: str) -> typing.Dict[str, float]:
return cls.RawToDictFeats(cls.ExtractRawFeatures(src, header_file = header_file, use_aux_headers = use_aux_headers, extra_args = extra_args))
@classmethod
def ExtractRawFeatures(cls,
src: str,
header_file : str = None,
use_aux_headers : bool = True,
extra_args : typing.List[str] = [],
**kwargs,
) -> str:
try:
return opencl.CompileOptimizer(src, AUTOPHASE, header_file = header_file, use_aux_headers = use_aux_headers, extra_args = extra_args)
except ValueError:
return ""
@classmethod
def ExtractIRRawFeatures(cls, bytecode: str, **kwargs) -> str:
try:
return opencl.CompileOptimizerIR(bytecode, AUTOPHASE)
except ValueError:
return ""
@classmethod
def RawToDictFeats(cls, str_feats: str, **kwargs) -> typing.Dict[str, float]:
return {feat.split(' : ')[0]: int(feat.split(' : ')[1]) for feat in str_feats.split('\n') if ' : ' in feat}
| 2,570 | 36.26087 | 145 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/preprocessors/normalizer.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python entry point to the clang_rewriter binary."""
import os
import subprocess
import tempfile
import typing
from deeplearning.benchpress.util import environment
from absl import flags
from deeplearning.benchpress.util import logging as l
FLAGS = flags.FLAGS
# Path of the clang rewriter binary.
CLANG_REWRITER = environment.CLANG_REWRITER
SEQ_CLANG_REWRITER = environment.SEQ_CLANG_REWRITER
# On Linux we must preload the LLVM libraries.
CLANG_REWRITER_ENV = os.environ.copy()
libclang = os.path.join(environment.LLVM, "lib/libclang.so")
liblto = os.path.join(environment.LLVM, "lib/libLTO.so")
CLANG_REWRITER_ENV["LD_PRELOAD"] = f"{libclang}:{liblto}"
def NormalizeIdentifiers(text: str,
suffix: str,
cflags: typing.List[str],
sequential_rewrite = False,
timeout_seconds: int = 60
) -> str:
"""Normalize identifiers in source code.
An LLVM rewriter pass which renames all functions and variables with short,
unique names. The variables and functions defined within the input text
are rewritten, with the sequence 'A', 'B', ... 'AA', 'AB'... being used for
function names, and the sequence 'a', 'b', ... 'aa', 'ab'... being used for
variable names. Functions and variables which are defined in #include files
are not renamed. Undefined function and variable names are not renamed.
Args:
text: The source code to rewrite.
suffix: The suffix to append to the source code temporary file. E.g. '.c'
for a C program.
cflags: A list of flags to be passed to clang.
timeout_seconds: The number of seconds to allow before killing the rewriter.
Returns:
Source code with identifier names normalized.
Raises:
RewriterException: If rewriter found nothing to rewrite.
ClangTimeout: If rewriter fails to complete within timeout_seconds.
"""
try:
tdir = FLAGS.local_filesystem
except Exception:
tdir = None
with tempfile.NamedTemporaryFile("w", suffix=suffix, dir = tdir) as f:
f.write(text)
f.flush()
REWRITER = SEQ_CLANG_REWRITER if sequential_rewrite else CLANG_REWRITER
cmd = (
["timeout", "-s9", str(timeout_seconds), str(REWRITER), f.name]
+ ["-extra-arg=" + x for x in cflags]
+ ["--"]
)
l.logger().debug("$ {}{}".format(
f'LD_PRELOAD={CLANG_REWRITER_ENV["LD_PRELOAD"]} '
if "LD_PRELOAD" in CLANG_REWRITER_ENV
else "",
" ".join(cmd),
),
)
process = subprocess.Popen(
cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
env=CLANG_REWRITER_ENV,
)
stdout, stderr = process.communicate()
l.logger().debug("stdout: {}".format(stdout))
l.logger().debug("stderr: {}".format(stderr))
# If there was nothing to rewrite, the rewriter exits with error code:
EUGLY_CODE = 204
if process.returncode == EUGLY_CODE:
# Propagate the error:
raise ValueError(stderr)
elif process.returncode == 9:
raise ValueError(
f"clang_rewriter failed to complete after {timeout_seconds}s"
)
# The rewriter process can still fail because of some other compilation
# problem, e.g. for some reason the 'enable 64bit support' pragma which should
# be included in the shim isn't being propogated correctly to the rewriter.
# However, the rewriter will still correctly process the input, so we ignore
# all error codes except the one we care about (EUGLY_CODE).
return stdout
| 4,229 | 36.105263 | 80 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/preprocessors/public.py | # coding=utf-8
# Copyright 2022 Chris Cummins and Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file defines the decorator for marking a BenchPress preprocessor function."""
import typing
from absl import flags
FLAGS = flags.FLAGS
# Type hint for a preprocessor function. See @benchpress_preprocess for details.
PreprocessorFunction = typing.Callable[[str], str]
def benchpress_preprocessor(func: PreprocessorFunction) -> PreprocessorFunction:
"""A decorator which marks a function as a BenchPress preprocessor.
A BenchPress preprocessor is accessible using GetPreprocessFunction(), and is a
function which accepts a single parameter 'text', and returns a string.
Type hinting is used to ensure that any function wrapped with this decorator
has the appropriate argument and return type. If the function does not, an
InternalError is raised at the time that the module containing the function
is imported.
Args:
func: The preprocessor function to decorate.
Returns:
The decorated preprocessor function.
Raises:
InternalError: If the function being wrapped does not have the signature
'def func(text: str) -> str:'.
"""
type_hints = typing.get_type_hints(func)
if not (type_hints == {"text": str, "return": str} or type_hints == {"text": str, "return": typing.List[str]} or type_hints == {"text": str, "return": typing.List[typing.Tuple[str, str]]}):
raise SystemError(
f"Preprocessor {func.__name__} does not have signature "
f'"def {func.__name__}(text: str) -> str".'
f"or"
f'"def {func.__name__}(text: str) -> typing.List[str]".'
)
func.__dict__["is_benchpress_preprocessor"] = True
return func
| 2,213 | 37.842105 | 191 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/preprocessors/clang.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas and Chris Cummins.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains utility code for working with clang.
This module does not expose any preprocessor functions for BenchPress. It contains
wrappers around Clang binaries, which preprocessor functions can use to
implement specific behavior. See deeplearning.clgen.preprocessors.cxx.Compile()
for an example.
"""
import json
import re
import pathlib
import humanize
import subprocess
import tempfile
import typing
import string
import clang.cindex
from absl import flags
from deeplearning.benchpress.util import environment
from eupy.native import logger as l
from absl import flags
FLAGS = flags.FLAGS
# The marker used to mark stdin from clang pre-processor output.
CLANG_STDIN_MARKER = re.compile(r'# \d+ "<stdin>" 2')
# Options to pass to clang-format.
# See: http://clang.llvm.org/docs/ClangFormatStyleOptions.html
CLANG_FORMAT_CONFIG = {
"BasedOnStyle": "Google",
"ColumnLimit": 5000,
"IndentWidth": 2,
"AllowShortBlocksOnASingleLine": False,
"AllowShortCaseLabelsOnASingleLine": False,
"AllowShortFunctionsOnASingleLine": False,
"AllowShortLoopsOnASingleLine": False,
"AllowShortIfStatementsOnASingleLine": False,
"DerivePointerAlignment": False,
"PointerAlignment": "Left",
"BreakAfterJavaFieldAnnotations": True,
"BreakBeforeInheritanceComma": False,
"BreakBeforeTernaryOperators": False,
"AlwaysBreakAfterReturnType": "None",
"AlwaysBreakAfterDefinitionReturnType": "None",
}
clang.cindex.Config.set_library_path(environment.LLVM_LIB)
if environment.LLVM_VERSION != 6:
# LLVM 9 needs libclang explicitly defined.
clang.cindex.Config.set_library_file(environment.LLVM_LIB + "/libclang.so.{}".format(environment.LLVM_VERSION))
CLANG = environment.CLANG
CLANG_FORMAT = environment.CLANG_FORMAT
OPT = environment.OPT
LLVM_EXTRACT = environment.LLVM_EXTRACT
LLVM_DIS = environment.LLVM_DIS
def StripPreprocessorLines(src: str) -> str:
"""Strip preprocessor remnants from clang frontend output.
Args:
src: Clang frontend output.
Returns:
The output with preprocessor output stripped.
"""
lines = src.split("\n")
# Determine when the final included file ends.
for i in range(len(lines) - 1, -1, -1):
if CLANG_STDIN_MARKER.match(lines[i]):
break
else:
return ""
# Strip lines beginning with '#' (that's preprocessor stuff):
return "\n".join([line for line in lines[i:] if not line.startswith("#")])
def Preprocess(
src: str,
cflags: typing.List[str],
timeout_seconds: int = 60,
strip_preprocessor_lines: bool = True,
):
"""Run input code through the compiler frontend to expand macros.
This uses the repository clang binary.
Args:
src: The source code to preprocess.
cflags: A list of flags to be passed to clang.
timeout_seconds: The number of seconds to allow before killing clang.
strip_preprocessor_lines: Whether to strip the extra lines introduced by
the preprocessor.
Returns:
The preprocessed code.
Raises:
ClangException: In case of an error.
ClangTimeout: If clang does not complete before timeout_seconds.
"""
cmd = [
"timeout",
"-s9",
str(timeout_seconds),
str(CLANG),
"-E",
"-c",
"-",
"-o",
"-",
] + cflags
process = subprocess.Popen(
cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
)
stdout, stderr = process.communicate(src)
if process.returncode == 9:
raise ValueError(
f"Clang preprocessor timed out after {timeout_seconds}s"
)
elif process.returncode != 0:
raise ValueError(stderr)
if strip_preprocessor_lines:
return StripPreprocessorLines(stdout)
else:
return stdout
def CompileLlvmBytecode(src: str,
suffix: str,
cflags: typing.List[str],
header_file: str = None,
timeout_seconds: int = 60
) -> str:
"""Compile input code into textual LLVM byte code using clang system binary.
Args:
src: The source code to compile.
suffix: The suffix to append to the source code temporary file. E.g. '.c'
for a C program.
cflags: A list of flags to be passed to clang.
timeout_seconds: The number of seconds to allow before killing clang.
Returns:
The textual LLVM byte code.
Raises:
ValueError: In case of an error.
ValueError: If clang does not complete before timeout_seconds.
"""
builtin_cflags = ["-S", "-emit-llvm", "-o", "-"]
try:
tdir = FLAGS.local_filesystem
except Exception:
tdir = None
with tempfile.NamedTemporaryFile("w", prefix="benchpress_preprocessors_clang_", suffix=suffix, dir = tdir) as f:
f.write(src)
f.flush()
extra_args = []
if header_file:
htf = tempfile.NamedTemporaryFile('w', prefix = "benchpress_preprocessors_clang_header_", suffix = ".h", dir = tdir)
htf.write(header_file)
htf.flush()
extra_args = ['-include{}'.format(htf.name)]
cmd = (
["timeout", "-s9", str(timeout_seconds), str(CLANG), f.name]
+ builtin_cflags
+ cflags
+ extra_args
)
process = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
)
stdout, stderr = process.communicate()
if process.returncode == 9:
raise ValueError(f"Clang timed out after {timeout_seconds}s")
elif process.returncode != 0:
raise ValueError("/*\n{}\n*/\n{}".format(stderr, src))
return stdout
def CompileStdin(src: str,
suffix: str,
cflags: typing.List[str],
header_file: str = None,
timeout_seconds: int = 60
) -> str:
"""Compile input code into textual LLVM byte code using clang system binary from standard input.
Args:
src: The source code to compile.
suffix: The suffix to append to the source code temporary file. E.g. '.c'
for a C program.
cflags: A list of flags to be passed to clang.
timeout_seconds: The number of seconds to allow before killing clang.
Returns:
The textual LLVM byte code.
Raises:
ValueError: In case of an error.
ValueError: If clang does not complete before timeout_seconds.
"""
builtin_cflags = ["-S", "-emit-llvm", "-o", "-"]
try:
tdir = FLAGS.local_filesystem
except Exception:
tdir = None
extra_args = []
if header_file:
htf = tempfile.NamedTemporaryFile('w', prefix = "benchpress_preprocessors_clang_header_", suffix = ".h", dir = tdir)
htf.write(header_file)
htf.flush()
extra_args = ['-include{}'.format(htf.name)]
cmd = (
["timeout", "-s9", str(timeout_seconds), str(CLANG)]
+ builtin_cflags
+ cflags
+ extra_args
+ ["-"]
)
process = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stdin = subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
)
stdout, stderr = process.communicate(input = src)
if process.returncode == 9:
raise ValueError(f"Clang timed out after {timeout_seconds}s")
elif process.returncode != 0:
raise ValueError("/*\n{}\n*/\n{}".format(stderr, src))
return stdout
def HumanReadableBytecode(bc_path: pathlib.Path, timeout_seconds: int = 60) -> str:
"""Run llvm-dis to disassemble binary bytecode file to human readable format.
Args:
bc_path: The path to bytecode.
timeout_seconds: The number of seconds to allow before killing clang.
Returns:
The textual LLVM byte code.
Raises:
ValueError: In case of an error.
ValueError: If clang does not complete before timeout_seconds.
"""
try:
tdir = FLAGS.local_filesystem
except Exception:
tdir = None
with tempfile.NamedTemporaryFile("w", prefix="human_readable_ll", suffix='.ll', dir = tdir) as f:
cmd = (
["timeout",
"-s9",
str(timeout_seconds),
str(LLVM_DIS),
str(bc_path),
"-o",
str(f.name)]
)
process = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
)
stdout, stderr = process.communicate()
readable_bc = open(str(f.name), 'r').read()
if process.returncode == 9:
raise ValueError(f"Clang timed out after {timeout_seconds}s")
elif process.returncode != 0:
raise ValueError("/*\n{}\n*/\n{}".format(stderr, str(bc_path)))
return readable_bc
def CompileOptimizer(src: str,
suffix: str,
cflags: typing.List[str],
optimization: typing.List[str],
header_file: str = None,
timeout_seconds: int = 60,
) -> str:
"""Compile source code to IR and apply optimization pass to source code.
Args:
src: The source code to compile.
suffix: The suffix to append to the source code temporary file. E.g. '.c'
for a C program.
cflags: A list of flags to be passed to clang.
timeout_seconds: The number of seconds to allow before killing clang.
Returns:
Dictionary with 70-dimensional InstCount feature vector.
Raises:
ValueError: In case of an error.
ValueError: If clang does not complete before timeout_seconds.
"""
try:
bc = CompileLlvmBytecode(src, suffix, cflags, header_file, timeout_seconds)
except ValueError as e:
raise ValueError("Compilation failed")
try:
tdir = FLAGS.local_filesystem
except Exception:
tdir = None
with tempfile.NamedTemporaryFile("w", prefix="benchpress_preprocessors_clang_", suffix='.ll', dir = tdir) as f:
f.write(bc)
f.flush()
if header_file:
"""
If the investigated kernel needs header files to be included,
then, call llvm-extract afterwards, extract that kernel and write
it to f.name.
"""
# Hacky way, but llvm-extract requires exact kernel function name
k_name = src.split('kernel void')[1].split()
k_name = k_name[1] if "attribute" in k_name[0] else k_name[0]
k_name = k_name.split('(', 1)[0]
ext_cmd = (
["timeout", "-s9", str(timeout_seconds), str(LLVM_EXTRACT)]
+ [f.name, "--func={}".format(k_name), "-o", f.name]
)
ext_proc = subprocess.Popen(
ext_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
)
ext_out, ext_err = ext_proc.communicate()
if ext_err:
raise ValueError(ext_err)
cmd = (
["timeout", "-s9", str(timeout_seconds), str(OPT)]
+ optimization
+ [f.name, "-o", "/dev/null"]
)
process = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
)
stdout, stderr = process.communicate()
if process.returncode == 9:
raise ValueError(f"Clang timed out after {timeout_seconds}s")
elif process.returncode != 0:
raise ValueError("/*\n{}\n*/\n{}".format(stderr, src))
return stdout
def CompileOptimizerStdin(src: str,
suffix: str,
cflags: typing.List[str],
optimization: typing.List[str],
header_file: str = None,
timeout_seconds: int = 60,
) -> str:
"""Compile source code to IR and apply optimization pass to source code.
Args:
src: The source code to compile.
suffix: The suffix to append to the source code temporary file. E.g. '.c'
for a C program.
cflags: A list of flags to be passed to clang.
timeout_seconds: The number of seconds to allow before killing clang.
Returns:
Dictionary with 70-dimensional InstCount feature vector.
Raises:
ValueError: In case of an error.
ValueError: If clang does not complete before timeout_seconds.
"""
try:
bc = CompileStdin(src, suffix, cflags, header_file, timeout_seconds)
except ValueError as e:
raise ValueError("Compilation failed")
try:
tdir = FLAGS.local_filesystem
except Exception:
tdir = None
if header_file:
"""
If the investigated kernel needs header files to be included,
then, call llvm-extract afterwards, extract that kernel and write
it to f.name.
"""
# Hacky way, but llvm-extract requires exact kernel function name
k_name = src.split('kernel void')[1].split()
k_name = k_name[1] if "attribute" in k_name[0] else k_name[0]
k_name = k_name.split('(', 1)[0]
raise NotImplementedError("pipe stdin this subprocess")
ext_cmd = (
["timeout", "-s9", str(timeout_seconds), str(LLVM_EXTRACT)]
+ [f.name, "--func={}".format(k_name), "-o", f.name]
)
ext_proc = subprocess.Popen(
ext_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
)
ext_out, ext_err = ext_proc.communicate()
if ext_err:
raise ValueError(ext_err)
cmd = (
["timeout", "-s9", str(timeout_seconds), str(OPT)]
+ optimization
+ ["-o", "/dev/null"]
+ ["-"]
)
process = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stdin = subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
)
stdout, stderr = process.communicate(input = bc)
if process.returncode == 9:
raise ValueError(f"Clang timed out after {timeout_seconds}s")
elif process.returncode != 0:
raise ValueError("/*\n{}\n*/\n{}".format(stderr, src))
return stdout
def CompileOptimizerIR(bytecode: str,
suffix: str,
optimization: typing.List[str],
timeout_seconds: int = 60,
) -> str:
"""Apply optimization pass directly to LLVM-IR bytecode.
Args:
bytecode: The source code to compile.
suffix: The suffix to append to the source code temporary file. E.g. '.c'
for a C program.
timeout_seconds: The number of seconds to allow before killing clang.
Returns:
Dictionary with 70-dimensional InstCount or 58-dimensional Autophase feature vector.
Raises:
ValueError: In case of an error.
ValueError: If clang does not complete before timeout_seconds.
"""
try:
tdir = FLAGS.local_filesystem
except Exception:
tdir = None
with tempfile.NamedTemporaryFile("w", prefix="benchpress_preprocessors_clang_", suffix='.ll', dir = tdir) as f:
f.write(bc)
f.flush()
cmd = (
["timeout", "-s9", str(timeout_seconds), str(OPT)]
+ optimization
+ [f.name, "-o", "/dev/null"]
)
process = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
)
stdout, stderr = process.communicate()
if process.returncode == 9:
raise ValueError(f"Clang timed out after {timeout_seconds}s")
elif process.returncode != 0:
raise ValueError("/*\n{}\n*/\n{}".format(stderr, bytecode))
return stdout
def Compile(src: str,
suffix: str,
cflags: typing.List[str],
header_file: str = None,
return_diagnostics: bool = False,
) -> str:
"""Check input source code for if it compiles.
Args:
src: The source code to compile.
suffix: The suffix to append to the source code temporary file. E.g. '.c'
for a C program.
cflags: A list of flags to be passed to clang.
Returns:
The text, unmodified.
Raises:
ValueError: In case of an error.
"""
builtin_cflags = ["-S", "-emit-llvm", "-o", "-"]
try:
tdir = FLAGS.local_filesystem
except Exception:
tdir = None
with tempfile.NamedTemporaryFile("w", prefix="benchpress_preprocessors_clang_", suffix=suffix, dir = tdir) as f:
f.write(src)
f.flush()
extra_args = []
if header_file:
htf = tempfile.NamedTemporaryFile('w', prefix = "benchpress_preprocessors_clang_header_", suffix = ".h", dir = tdir)
htf.write(header_file)
htf.flush()
extra_args = ['-include{}'.format(htf.name)]
try:
unit = clang.cindex.TranslationUnit.from_source(f.name, args = builtin_cflags + cflags + extra_args)
except clang.cindex.TranslationUnitLoadError as e:
raise ValueError(e)
diagnostics = [str(d) for d in unit.diagnostics if d.severity > 2]
# diagnostics = [str(d) for d in unit.diagnostics if d.severity > 2 and not "implicit declaration of function" not in str(d)]
if len(diagnostics) > 0:
if return_diagnostics:
return src, [(d.location.line, d.location.column) for d in unit.diagnostics if d.severity > 2]
else:
raise ValueError("/*\n{}\n*/\n{}".format('\n'.join(diagnostics), src))
else:
if return_diagnostics:
return src, []
else:
return src
def Parse(src: str,
suffix: str,
cflags: typing.List[str],
return_diagnostics: bool = False
) -> str:
"""Parse input code using clang.Cindex python module.
Args:
src: The source code to compile.
suffix: The suffix to append to the source code temporary file. E.g. '.c'
for a C program.
cflags: A list of flags to be passed to clang.
Returns:
The textual LLVM byte code.
Raises:
ValueError: In case of an error.
"""
try:
tdir = FLAGS.local_filesystem
except Exception:
tdir = None
with tempfile.NamedTemporaryFile("w", prefix="benchpress_preprocessors_clang_", suffix=suffix, dir = tdir) as f:
try:
unit = clang.cindex.TranslationUnit.from_source(f.name, args = cflags, unsaved_files = [(f.name, src)])
except clang.cindex.TranslationUnitLoadError as e:
raise ValueError(e)
diagnostics = [d for d in unit.diagnostics if d.severity > 2 and d.category_number in {1, 4}]
if len(diagnostics) > 0:
if return_diagnostics:
return src, [(d.location.line, d.location.column) for d in diagnostics]
else:
raise ValueError("/*\n{}\n*/\n{}".format('\n'.join([str(d) for d in diagnostics]), src))
else:
if return_diagnostics:
return src, []
else:
return src
def ClangFormat(src: str, suffix: str, timeout_seconds: int = 60) -> str:
"""Run clang-format on a source to enforce code style.
Args:
src: The source code to run through clang-format.
suffix: The suffix to append to the source code temporary file. E.g. '.c'
for a C program.
timeout_seconds: The number of seconds to allow clang-format to run for.
Returns:
The output of clang-format.
Raises:
ClangFormatException: In case of an error.
ClangTimeout: If clang-format does not complete before timeout_seconds.
"""
cmd = [
"timeout",
"-s9",
str(timeout_seconds),
str(CLANG_FORMAT),
"-assume-filename",
f"input{suffix}",
"-style={}".format(json.dumps(CLANG_FORMAT_CONFIG))
]
process = subprocess.Popen(
cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
)
stdout, stderr = process.communicate(src)
if process.returncode == 9:
raise ValueError(f"clang-format timed out after {timeout_seconds}s")
elif process.returncode != 0:
raise ValueError(stderr)
return stdout
def ExtractFunctions(src: str,
suffix: str,
cflags: typing.List[str]
) -> typing.List[str]:
"""Splits translation unit into separate functions using tokenizer.
WARNING! Functions might need formatting after this preprocessor,
if you care about formatting.
Args:
src: The source code to compile.
suffix: The suffix to append to the source code temporary file. E.g. '.c'
for a C program.
cflags: A list of flags to be passed to clang.
Returns:
List of separate string functions
Raises:
ValueError: In case of an error.
"""
try:
tdir = FLAGS.local_filesystem
except Exception:
tdir = None
with tempfile.NamedTemporaryFile(
"w", prefix="benchpress_preprocessors_clang_", suffix=suffix, dir = tdir
) as f:
try:
unit = clang.cindex.TranslationUnit.from_source(f.name, args = cflags, unsaved_files = [(f.name, src)])#, args = args + builtin_cflags)
except clang.cindex.TranslationUnitLoadError as e:
raise ValueError(e)
def next_token(token_iter):
"""Return None if iterator is consumed."""
try:
return next(token_iter)
except StopIteration:
return None
functions = []
tokiter = unit.get_tokens(extent = unit.cursor.extent)
token = next_token(tokiter)
while token:
# Do sth with token
cur = clang.cindex.Cursor.from_location(unit, token.extent.start)
if cur.kind == clang.cindex.CursorKind.FUNCTION_DECL:
# Found starting point of function declaration.
func = []
func.append(token.spelling)
token = next_token(tokiter)
while token and token.spelling != ")":
# Go until the closing parenthesis of parameters.
func.append(token.spelling)
token = next_token(tokiter)
while token and token.spelling != "{" and token.spelling != ";":
# Reject comments etc. until opening brace or semi-colon.
func.append(token.spelling)
token = next_token(tokiter)
if token and token.spelling == "{":
# Function with a body.
lbr, rbr = 1, 0
while token and lbr != rbr:
func.append(token.spelling)
token = next_token(tokiter)
if token and token.spelling == "{":
lbr += 1
elif token and token.spelling == "}":
rbr += 1
if token:
func.append(token.spelling)
functions.append(' '.join(func))
token = next_token(tokiter)
else:
# Just a function declaration.
token = next_token(tokiter)
else:
token = next_token(tokiter)
return functions
def ExtractStructs(src: str,
suffix: str,
cflags: typing.List[str]
) -> typing.List:
"""Splits translation unit into separate structs.
Args:
src: The source code to compile.
suffix: The suffix to append to the source code temporary file. E.g. '.c'
for a C program.
cflags: A list of flags to be passed to clang.
Returns:
List of separate string structs.
"""
try:
tdir = FLAGS.local_filesystem
except Exception:
tdir = None
with tempfile.NamedTemporaryFile(
"w", prefix="benchpress_preprocessors_clang_", suffix=suffix, dir = tdir
) as f:
try:
unit = clang.cindex.TranslationUnit.from_source(f.name, args = cflags, unsaved_files = [(f.name, src)])#, args = args + builtin_cflags)
except clang.cindex.TranslationUnitLoadError as e:
raise ValueError(e)
def next_token(token_iter):
"""Return None if iterator is consumed."""
try:
return next(token_iter)
except StopIteration:
return None
def parse_struct(unit, token, it, is_typedef = False):
"""
Token is starting point of struct. Accept the rest.
"""
cur = clang.cindex.Cursor.from_location(unit, token.extent.start)
if is_typedef:
if token.spelling != "typedef":
return None
token = next_token(it)
cur = clang.cindex.Cursor.from_location(unit, token.extent.start)
if token.spelling != "struct":
return None
else:
if cur.kind != clang.cindex.CursorKind.STRUCT_DECL:
return None
token = next_token(it)
cur = clang.cindex.Cursor.from_location(unit, token.extent.start)
if cur.kind != clang.cindex.CursorKind.STRUCT_DECL:
return None
if not cur.is_definition():
return None
if token.kind != clang.cindex.TokenKind.IDENTIFIER:
# For now accept only 'struct name {', although 'struct {} name;' exists as well.
return None
text = ["struct", token.spelling]
if is_typedef:
text = ["typedef"] + text
name = token.spelling
fields = []
token = next_token(it)
cur = clang.cindex.Cursor.from_location(unit, token.extent.start)
cur_field = []
cur_type = None
lbc, rbc = 0, 0
# while cur.kind in {clang.cindex.CursorKind.STRUCT_DECL, clang.cindex.CursorKind.FIELD_DECL, clang.cindex.CursorKind.TYPE_REF}:
try:
while lbc + rbc == 0 or lbc != rbc:
if token.spelling == "{":
lbc += 1
if token.spelling == "}":
rbc += 1
if cur.kind == clang.cindex.CursorKind.FIELD_DECL or token.spelling == ";":
if cur_type is None:
cur_type = token.spelling
if token.spelling == ",":
cur_field.append(";")
fields.append(cur_field)
text += cur_field
cur_field = []
token = next_token(it)
cur = clang.cindex.Cursor.from_location(unit, token.extent.start)
cur_field += [cur_type, token.spelling]
elif token.spelling == ";":
cur_field.append(token.spelling)
fields.append(cur_field)
text += cur_field
cur_field = []
cur_type = None
else:
cur_field.append(token.spelling)
else:
text.append(token.spelling)
token = next_token(it)
cur = clang.cindex.Cursor.from_location(unit, token.extent.start)
except AttributeError:
return None
if is_typedef:
if token.spelling != ";":
text.append(token.spelling)
name = token.spelling
token = next_token(it)
cur = clang.cindex.Cursor.from_location(unit, token.extent.start)
if token.spelling not in {";", "="}:
return None
raise TypeError("Expected ';' or '=' but found {} src:\n{}".format(token.spelling, src))
text.append(";")
return {
'text': text,
'name': name,
'fields': fields,
}
structs = []
tokiter = unit.get_tokens(extent = unit.cursor.extent)
token = next_token(tokiter)
while token:
struct = None
if token.spelling == "struct":
struct = parse_struct(unit, token, tokiter)
elif token.spelling == "typedef":
struct = parse_struct(unit, token, tokiter, is_typedef = True)
if struct:
structs.append(struct)
token = next_token(tokiter)
return structs
def DeriveSourceVocab(src: str,
token_list: typing.Set[str],
suffix: str,
cflags: typing.List[str],
) -> typing.Dict[str, str]:
"""Pass source code through clang's lexer and return set of tokens.
Args:
src: The source code to compile.
token_list: External set of grammar tokens for target language.
suffix: The suffix to append to the source code temporary file. E.g. '.c'
for a C program.
cflags: A list of flags to be passed to clang.
Returns:
Set of unique source code tokens
Raises:
ValueError: In case of an error.
"""
builtin_cflags = ["-S", "-emit-llvm", "-o", "-"]
try:
tdir = FLAGS.local_filesystem
except Exception:
tdir = None
with tempfile.NamedTemporaryFile(
"w", prefix="benchpress_preprocessors_clang_", suffix=suffix, dir = tdir
) as f:
f.write(src)
f.flush()
try:
unit = clang.cindex.TranslationUnit.from_source(f.name, args = builtin_cflags + cflags)
except clang.cindex.TranslationUnitLoadError as e:
raise ValueError(e)
tokens = {}
for ch in string.printable:
# Store all printable characters as char-based, to save time iterating literals.
tokens["{}-char-based".format(ch)] = ''
for idx, t in enumerate(unit.get_tokens(extent = unit.cursor.extent)):
str_t = str(t.spelling)
if str_t in token_list or t.kind in {clang.cindex.TokenKind.KEYWORD, clang.cindex.TokenKind.PUNCTUATION}:
tokens[str_t] = ' '
else:
if t.kind != clang.cindex.TokenKind.LITERAL and clang.cindex.Cursor.from_location(unit, t.extent.end).kind not in {clang.cindex.CursorKind.CALL_EXPR}:
tokens[str_t] = ' '
return tokens
def AtomizeSource(src: str,
vocab: typing.Set[str],
suffix: str,
cflags: typing.List[str],
) -> typing.List[str]:
"""
Split source code into token atoms with clang's lexer.
Args:
src: The source code to compile.
vocab: Optional set of learned vocabulary of tokenizer.
suffix: The suffix to append to the source code temporary file. E.g. '.c'
for a C program.
cflags: A list of flags to be passed to clang.
Returns:
Source code as a list of tokens.
Raises:
ValueError: In case of an error.
"""
builtin_cflags = ["-S", "-emit-llvm", "-o", "-"]
try:
tdir = FLAGS.local_filesystem
except Exception:
tdir = None
with tempfile.NamedTemporaryFile(
"w", prefix="benchpress_preprocessors_clang_", suffix=suffix, dir = tdir
) as f:
f.write(src)
f.flush()
try:
unit = clang.cindex.TranslationUnit.from_source(f.name, args = builtin_cflags + cflags)
except clang.cindex.TranslationUnitLoadError as e:
raise ValueError(e)
tokens = []
lookout_metaToken, cmt = False, None
for idx, t in enumerate(unit.get_tokens(extent = unit.cursor.extent)):
str_t = t.spelling
if str_t in {'START', 'MASK', 'HOLE', 'END', 'PAD'} and len(tokens) == 0:
l.logger().warn("Please inspect the following code, having triggered a meta token existence without left brace preceding:")
l.logger().warn(src)
if str_t in {'START', 'MASK', 'HOLE', 'END', 'PAD'} and len(tokens) > 0 and tokens[-1] == '[':
cmt = str_t
lookout_metaToken = True
elif str_t in vocab:
if lookout_metaToken and str_t == ']':
tokens[-1] = "[{}]".format(cmt)
lookout_metaToken = False
else:
tokens.append(str(t.spelling))
else:
for ch in str_t:
tokens.append("{}-char-based".format(ch))
return tokens
def GreweFeatureExtraction(src: str,
suffx: str,
cflags: typing.List[str]
) -> typing.Dict[str, float]:
"""
!!! Under construction.
"""
builtin_cflags = ["-S", "-emit-llvm", "-o", "-"]
try:
tdir = FLAGS.local_filesystem
except Exception:
tdir = None
with tempfile.NamedTemporaryFile(
"w", prefix="benchpress_preprocessors_clang_", suffix=suffix, dir = tdir
) as f:
f.write(src)
f.flush()
try:
unit = clang.cindex.TranslationUnit.from_source(f.name, args = builtin_cflags + cflags)
except clang.cindex.TranslationUnitLoadError as e:
return None
def next_token(token_iter):
"""Return None if iterator is consumed."""
try:
return next(token_iter)
except StopIteration:
return None
feat_vec = {
'comp': 0.0,
'rational': 0.0,
'mem': 0.0,
'localmem': 0.0,
'coalesced': 0.0,
'atomic': 0.0,
'F2:coalesced/mem': 0.0,
'F4:comp/mem': 0.0,
}
tokiter = unit.get_tokens(extent = unit.cursor.extent)
token = next_token(tokiter)
while token:
# Do sth with token
cur = clang.cindex.Cursor.from_location(unit, token.extent.start)
return {} | 31,870 | 30.649454 | 158 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/preprocessors/cxx.py | # coding=utf-8
# Copyright 2022 Chris Cummins Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Preprocessor functions for C++."""
import re
import sys
from deeplearning.benchpress.util import environment
from deeplearning.benchpress.preprocessors import clang
from deeplearning.benchpress.preprocessors import normalizer
from deeplearning.benchpress.preprocessors import public
from absl import flags
FLAGS = flags.FLAGS
LIBCXX_HEADERS = environment.LIBCXX_HEADERS
CLANG_HEADERS = environment.CLANG_HEADERS
C_COMMENT_RE = re.compile(
r'//.*?$|/\*.*?\*/|\'(?:\\.|[^\\\'])*\'|"(?:\\.|[^\\"])*"',
re.DOTALL | re.MULTILINE,
)
# Flags to compile C++ files with. I've replicated the default search path,
# but substituted the sandboxed header locations in place of the defaults.
# bazel-phd/bazel-out/*-py3-opt/bin/deeplearning/clgen/preprocessors/\
# cxx_test.runfiles/llvm_mac/bin/clang -xc++ -E - -v
CLANG = environment.CLANG
CLANG_ARGS = [
"-xc++",
"-isystem",
str(LIBCXX_HEADERS),
"-isystem",
"/usr/local/include",
"-isystem",
str(CLANG_HEADERS),
"-isystem",
"/usr/include",
"-Wno-ignored-pragmas",
"-ferror-limit=1",
"-Wno-implicit-function-declaration",
"-Wno-incompatible-library-redeclaration",
"-Wno-macro-redefined",
"-Wno-unused-parameter",
"-Wno-long-long",
"-Wcovered-switch-default",
"-Wdelete-non-virtual-dtor",
"-Wstring-conversion",
"-DLLVM_BUILD_GLOBAL_ISEL",
"-D__STDC_CONSTANT_MACROS",
"-D__STDC_FORMAT_MACROS",
"-D__STDC_LIMIT_MACROS",
"-D_LIBCPP_HAS_C_ATOMIC_IMP",
]
def Preprocess(src: str,
copts: typing.Optional[typing.List[str]] = None,
timeout_seconds: int = 60,
):
"""Run input code through the compiler frontend to inline macros.
Args:
src: The source code to preprocess.
copts: A list of flags to be passed to clang.
timeout_seconds: The number of seconds to allow before killing clang.
Returns:
The preprocessed code.
Raises:
ClangException: In case of an error.
ClangTimeout: If clang does not complete before timeout_seconds.
"""
copts = copts or []
cmd = ["timeout", "-s9", str(timeout_seconds), str(CLANG)] + ["-E", "-c", "-", "-o", "-"] + copts
process = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
universal_newlines=True,
)
stdout, stderr = process.communicate(stdin)
if process.returncode == 9:
raise TimeoutError(f"clang timed out after {timeout_seconds}s")
process.stdout = stdout
process.stderr = stderr
if process.returncode:
raise RuntimeError("{}: {}".format(process.stderr, process.returncode))
return process.stdout
@public.benchpress_preprocessor
def ClangPreprocess(text: str) -> str:
try:
return clang.StripPreprocessorLines(Preprocess(text, CLANG_ARGS))
except Exception as e:
raise e
def CompileLlvmBytecode(text: str) -> str:
"""A preprocessor which attempts to compile the given code.
Args:
text: Code to compile.
Returns:
LLVM IR of input source code.
"""
return clang.CompileLlvmBytecode(text, ".cpp", CLANG_ARGS)
def CompileOptimizer(text: str,
optimization: typing.List[str],
timeout_seconds: int = 60,
) -> str:
"""Compile source code to IR and apply optimization pass to source code.
Args:
src: The source code to compile.
optimization: optimization pass to apply.
Returns:
Dictionary with 70-dimensional InstCount feature vector.
"""
return clang.CompileOptimizer(text, ".cpp", CLANG_ARGS, optimization)
@public.benchpress_preprocessor
def Compile(text: str) -> str:
"""A preprocessor which attempts to compile the given code.
Args:
text: Code to compile.
Returns:
The input code, unmodified.
"""
return clang.Compile(text, ".cpp", CLANG_ARGS)
@public.benchpress_preprocessor
def ClangFormat(text: str) -> str:
"""Run clang-format on a source to enforce code style.
Args:
text: The source code to run through clang-format.
Returns:
The output of clang-format.
Raises:
ClangFormatException: In case of an error.
ClangTimeout: If clang-format does not complete before timeout_seconds.
"""
return clang.ClangFormat(text, ".cpp")
@public.benchpress_preprocessor
def NormalizeIdentifiers(text: str) -> str:
"""Normalize identifiers in C++ source code.
Args:
text: The source code to rewrite.
Returns:
Source code with identifier names normalized.
Raises:
RewriterException: If rewriter found nothing to rewrite.
ClangTimeout: If rewriter fails to complete within timeout_seconds.
"""
return normalizer.NormalizeIdentifiers(text, ".cpp", CLANG_ARGS)
@public.benchpress_preprocessor
def StripComments(text: str) -> str:
"""Strip C/C++ style comments.
Written by @markus-jarderot https://stackoverflow.com/a/241506/1318051
"""
def Replacer(match):
"""Regex replacement callback."""
s = match.group(0)
if s.startswith("/"):
return " " # note: a space and not an empty string
else:
return s
return C_COMMENT_RE.sub(Replacer, text)
| 5,691 | 27.603015 | 99 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/preprocessors/opencl.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas and Chris Cummins.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Preprocessor passes for the OpenCL programming language."""
import typing
import os
import pathlib
import io
import subprocess
import tempfile
import math
import pandas as pd
from deeplearning.benchpress.util import environment
from deeplearning.benchpress.util import crypto
from deeplearning.benchpress.preprocessors import clang
from deeplearning.benchpress.preprocessors import normalizer
from deeplearning.benchpress.preprocessors import public
from deeplearning.benchpress.util import logging as l
from absl import flags
FLAGS = flags.FLAGS
flags.DEFINE_boolean(
"verbose_cldrive",
False,
"Select to print verbose command messages for cldrive."
)
# LibCLC
LIBCLC = environment.LIBCLC
# OpenCL standard headers
OPENCL_HEADERS = environment.OPENCL_HEADERS
# Auxiliary .cl kernels that may need be included
AUX_INCLUDE = environment.AUX_INCLUDE
# CLDrive executable, if exists.
CLDRIVE = environment.CLDRIVE
CL_PLATFORMS = None
CL_H = os.path.join(OPENCL_HEADERS, "CL/cl.h")
OPENCL_H = os.path.join(environment.DATA_CL_INCLUDE, "opencl.h")
OPENCL_C_H = os.path.join(environment.DATA_CL_INCLUDE, "opencl-c.h")
OPENCL_C_BASE = os.path.join(environment.DATA_CL_INCLUDE, "opencl-c-base.h")
SHIMFILE = os.path.join(environment.DATA_CL_INCLUDE, "opencl-shim.h")
STRUCTS = os.path.join(environment.DATA_CL_INCLUDE, "structs.h")
def GetClangArgs(use_shim: bool, use_aux_headers: bool, extra_args: typing.List[str] = []) -> typing.List[str]:
"""Get the arguments to pass to clang for handling OpenCL.
Args:
use_shim: If true, inject the shim OpenCL header.
error_limit: The number of errors to print before arboting
Returns:
A list of command line arguments to pass to Popen().
"""
args = [
"-xcl",
"--target=nvptx64-nvidia-nvcl",
"-cl-std=CL2.0",
"-ferror-limit=0",
"-include{}".format(OPENCL_C_H),
"-include{}".format(OPENCL_C_BASE),
"-include{}".format(CL_H),
"-I{}".format(str(OPENCL_HEADERS)),
"-I{}".format(str(LIBCLC)),
"-Wno-everything",
"-O1",
]
if use_aux_headers:
args += [
"-include{}".format(STRUCTS),
"-I{}".format(str(AUX_INCLUDE)),
]
if use_shim:
args += ["-include", str(SHIMFILE)]
return args + extra_args
def getOpenCLPlatforms() -> None:
"""
Identify compatible OpenCL platforms for current system.
"""
global CL_PLATFORMS
CL_PLATFORMS = {
'CPU': None,
'GPU': None,
}
try:
cmd = subprocess.Popen(
"{} --clinfo".format(CLDRIVE).split(),
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
universal_newlines = True,
)
stdout, stderr = cmd.communicate()
if stderr:
raise ValueError(stderr)
except Exception as e:
l.logger().error(cmd)
l.logger().error(e)
lines = stdout.split('\n')
for line in lines:
if line and line[:3] == "GPU" and not CL_PLATFORMS['GPU']:
CL_PLATFORMS['GPU'] = line
elif line and line[:3] == "CPU" and not CL_PLATFORMS['CPU']:
CL_PLATFORMS['CPU'] = line
return
def _ClangPreprocess(text: str, use_shim: bool, use_aux_headers: bool, extra_args: typing.List[str]) -> str:
"""Private preprocess OpenCL source implementation.
Inline macros, removes comments, etc.
Args:
text: OpenCL source.
use_shim: Inject shim header.
Returns:
Preprocessed source.
"""
return clang.Preprocess(text, GetClangArgs(use_shim = use_shim, use_aux_headers = use_aux_headers, extra_args = extra_args))
def _ExtractTypedefs(text: str, dtype: str) -> str:
"""
Preprocessor extracts all struct type definitions.
Args:
text: The text to preprocess.
Returns:
The input text, with all whitespaces removed.
"""
text = text.split('typedef {}'.format(dtype))
dtypes = []
new_text = [text[0]]
for t in text[1:]:
lb, rb = 0, 0
ssc = False
for idx, ch in enumerate(t):
if ch == "{":
lb += 1
elif ch == "}":
rb += 1
elif ch == ";" and ssc == True:
dtypes.append("typedef {}".format(dtype) + t[:idx + 1])
new_text.append(t[idx + 1:])
break
if lb == rb and lb != 0:
ssc = True
print("\n\n".join(dtypes))
return ''.join(new_text)
def DeriveSourceVocab(text: str, token_list: typing.Set[str] = set(), extra_args: typing.List[str] = []) -> typing.Dict[str, str]:
"""Pass CL code through clang's lexer and return set of
tokens with appropriate delimiters for vocabulary construction.
Args:
text: Source code.
token_list: Optional external list of tokens for opencl grammar.
Returns:
Set of unique source code tokens.
"""
return clang.DeriveSourceVocab(text, token_list, ".cl", GetClangArgs(use_shim = False, use_aux_headers = True, extra_args = extra_args))
def AtomizeSource(text: str, vocab: typing.Set[str], extra_args: typing.List[str] = []) -> typing.List[str]:
"""
Atomize OpenCL source with clang's lexer into token atoms.
Args:
text: The source code to compile.
vocab: Optional set of learned vocabulary of tokenizer.
Returns:
Source code as a list of tokens.
"""
return clang.AtomizeSource(text, vocab, ".cl", GetClangArgs(use_shim = False, use_aux_headers = True, extra_args = extra_args))
def ContentHash(src: str) -> str:
"""
Re-write code with deterministic, sequential rewriter, remove whitespaces and new lines
and calculate the hash of the string.
Args:
src: The source code to compute.
Returns:
256-bit hash of pure source code string.
"""
rw = SequentialNormalizeIdentifiers(src)
return crypto.sha256_str(rw.replace(" ", "").replace("\n", ""))
def IRContentHash(src: str, header_file = None, use_aux_headers: bool = True) -> str:
"""
Collect optimized LLVM-IR of source code and compute its hash.
Args:
src: The source code to compute.
Returns:
256-bit hash of pure source code string.
"""
bc = CompileLlvmBytecode(src, header_file = header_file, use_aux_headers = use_aux_headers)
return crypto.sha256_str(''.join(bc.split('\n')[2:]))
def RunCLDrive(src: str,
header_file: str = None,
num_runs : int = 1000,
gsize : int = 4096,
lsize : int = 1024,
extra_args : typing.List[str] = [],
timeout : int = 0
) -> str:
"""
If CLDrive executable exists, run it over provided source code.
"""
if not CLDRIVE:
l.logger().warn("CLDrive executable has not been found. Skipping CLDrive execution.")
return ""
global CL_PLATFORMS
if not CL_PLATFORMS:
getOpenCLPlatforms()
try:
tdir = FLAGS.local_filesystem
except Exception:
tdir = None
with tempfile.NamedTemporaryFile("w", prefix="benchpress_opencl_cldrive", suffix = '.cl', dir = tdir) as f:
if header_file:
with tempfile.NamedTemporaryFile("w", prefix="benchpress_opencl_clheader", suffix = '.h', dir = tdir) as hf:
f.write("#include \"{}\"\n{}".format(pathlib.Path(hf.name).resolve().name, src))
f.flush()
hf.write(header_file)
hf.flush()
cmd = "{} {} --srcs={} --cl_build_opt=\"-I{}{}\" --num_runs={} --gsize={} --lsize={} --envs={},{}".format(
"timeout -s9 {}".format(timeout) if timeout > 0 else "",
CLDRIVE,
f.name,
pathlib.Path(hf.name).resolve().parent,
",{}".format(",".join(extra_args)) if len(extra_args) > 0 else "",
num_runs,
gsize,
lsize,
CL_PLATFORMS['CPU'],
CL_PLATFORMS['GPU']
)
if FLAGS.verbose_cldrive:
print(cmd)
print(src)
proc = subprocess.Popen(
cmd.split(),
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
universal_newlines = True,
)
stdout, stderr = proc.communicate()
else:
f.write(src)
f.flush()
cmd = "{} {} --srcs={} {} --num_runs={} --gsize={} --lsize={} --envs={},{}".format(
"timeout -s9 {}".format(timeout) if timeout > 0 else "",
CLDRIVE,
f.name,
"--cl_build_opt={}".format(",".join(extra_args)) if len(extra_args) > 0 else "",
num_runs,
gsize,
lsize,
CL_PLATFORMS['CPU'],
CL_PLATFORMS['GPU']
)
if FLAGS.verbose_cldrive:
print(cmd)
print(src)
proc = subprocess.Popen(
cmd.split(),
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
universal_newlines = True,
)
try:
stdout, stderr = proc.communicate()
except UnicodeDecodeError:
return "", ""
if proc.returncode == 9:
stderr = "TIMEOUT"
return stdout, stderr
def CollectCLDriveLabel(df: pd.DataFrame, stdout: str, stderr: str) -> str:
"""
Read data from CLDrive execution and compute label.
"""
cpu_error = None
gpu_error = None
if stderr == "TIMEOUT":
return "TIMEOUT"
if df is None:
return "I/O_ERROR"
try:
avg_time_cpu_ns = (df[df['device'].str.contains("CPU")].transfer_time_ns.mean() + df[df['device'].str.contains("CPU")].kernel_time_ns.mean())
avg_time_gpu_ns = (df[df['device'].str.contains("GPU")].transfer_time_ns.mean() + df[df['device'].str.contains("GPU")].kernel_time_ns.mean())
except Exception:
avg_time_cpu_ns = None
avg_time_gpu_ns = None
if avg_time_cpu_ns is None or avg_time_gpu_ns is None or math.isnan(avg_time_cpu_ns) or math.isnan(avg_time_gpu_ns):
label = "ERR"
if stdout == "":
cpu_error = "NO_STDOUT"
gpu_error = "NO_STDOUT"
label = "CPU-{}_GPU-{}".format(cpu_error, gpu_error)
elif "CL_OUT_OF_RESOURCES" in stderr:
cpu_error = "CL_OUT_OF_RESOURCES"
gpu_error = "CL_OUT_OF_RESOURCES"
label = "CPU-{}_GPU-{}".format(cpu_error, gpu_error)
elif df is not None:
try:
cpu_error = df[df['device'].str.contains("CPU")].outcome[0]
if cpu_error == "CL_ERROR" and "-9999" in stderr:
cpu_error = "INVALID_BUFFER_READ_WRITE"
except KeyError:
cpu_error = stderr
except ValueError:
cpu_error = stderr
except Exception:
cpu_error = stderr
try:
gpu_error = df[df['device'].str.contains("GPU")].outcome[1]
if gpu_error == "CL_ERROR" and "-9999" in stderr:
gpu_error = "INVALID_BUFFER_READ_WRITE"
except KeyError:
gpu_error = stderr
except ValueError:
gpu_error = stderr
except Exception:
gpu_error = stderr
label = "CPU-{}_GPU-{}".format(cpu_error, gpu_error)
else:
label = "GPU" if avg_time_cpu_ns > avg_time_gpu_ns else "CPU"
# if label == "ERR" or cpu_error == "CL_ERROR" or gpu_error == "CL_ERROR":
# l.logger().warn(stdout)
# l.logger().warn(stderr)
return label
def CLDrivePretty(src: str,
header_file = None,
num_runs: int = 5,
gsize: int = 4096,
lsize: int = 1024,
timeout: int = 0
) -> typing.Tuple[pd.DataFrame, str]:
"""
Run CLDrive with given configuration but pretty print stdout and stderror.
"""
stdout, stderr = RunCLDrive(src, header_file = header_file, num_runs = num_runs, gsize = gsize, lsize = lsize, timeout = timeout)
for x in stdout.split('\n'):
print(x)
for x in stderr.split('\n'):
print(x)
return stdout, stderr
def CLDriveDataFrame(src: str,
header_file: str = None,
num_runs : int = 5,
gsize : int = 4096,
lsize : int = 1024,
extra_args : typing.List[str] = [],
timeout : int = 0
) -> typing.Tuple[pd.DataFrame, str]:
"""
Run CLDrive with given configuration and return pandas dataframe along with collected label.
"""
stdout, stderr = RunCLDrive(src, header_file = header_file, num_runs = num_runs, gsize = gsize, lsize = lsize, extra_args = extra_args, timeout = timeout)
try:
df = pd.read_csv(io.StringIO(stdout), sep = ",")
except Exception as e:
df = None
return df, CollectCLDriveLabel(df, stdout, stderr)
def CLDriveNumBytes(src: str,
header_file = None,
gsize: int = 4096,
lsize: int = 1024,
timeout: int = 0) -> int:
"""
Run CLDrive once for given configuration to identify number of transferred bytes.
"""
stdout, stderr = RunCLDrive(src, header_file = header_file, num_runs = 5, gsize = gsize, lsize = lsize, timeout = timeout)
try:
df = pd.read_csv(io.StringIO(stdout), sep = ",")
except pd.errors.EmptyDataError:
return None
label = CollectCLDriveLabel(df, stdout, stderr)
return df[df['device'].str.contains("CPU")].transferred_bytes[0] if label in {"CPU", "GPU"} else None
def CLDriveExecutionTimes(src: str,
header_file = None,
num_runs: int = 1000,
gsize: int = 4096,
lsize: int = 1024,
timeout: int = 0
) -> typing.Tuple[typing.List[int], typing.List[int], typing.List[int], typing.List[int]]:
"""
Run CLDrive once for given configuration to identify number of transferred bytes.
"""
stdout, stderr = RunCLDrive(src, header_file = header_file, num_runs = num_runs, gsize = gsize, lsize = lsize, timeout = timeout)
try:
df = pd.read_csv(io.StringIO(stdout), sep = ",")
transfer_time_cpu = df[df['device'].str.contains("CPU")].transfer_time_ns
execution_time_cpu = df[df['device'].str.contains("CPU")].kernel_time_ns
transfer_time_gpu = df[df['device'].str.contains("GPU")].transfer_time_ns
execution_time_gpu = df[df['device'].str.contains("GPU")].kernel_time_ns
except pd.errors.EmptyDataError:
# CSV is empty which means src failed miserably.
transfer_time_cpu = None
execution_time_cpu = None
transfer_time_gpu = None
execution_time_gpu = None
except pd.errors.ParserError:
# CSV is empty which means src failed miserably.
transfer_time_cpu = None
execution_time_cpu = None
transfer_time_gpu = None
execution_time_gpu = None
return transfer_time_cpu, execution_time_cpu, transfer_time_gpu, execution_time_gpu
def CLDriveLabel(src: str,
header_file = None,
num_runs: int = 1000,
gsize: int = 4096,
lsize: int = 1024,
timeout: int = 0
) -> str:
"""
Run CLDrive on given configuration and compute whether it should run on CPU vs GPU based on where it will execute faster (transfer time + execution time).
"""
stdout, stderr = RunCLDrive(src, header_file = header_file, num_runs = num_runs, gsize = gsize, lsize = lsize, timeout = timeout)
df = None
try:
df = pd.read_csv(io.StringIO(stdout), sep = ",")
avg_time_cpu_ns = (df[df['device'].str.contains("CPU")].transfer_time_ns.mean() + df[df['device'].str.contains("CPU")].kernel_time_ns.mean())
avg_time_gpu_ns = (df[df['device'].str.contains("GPU")].transfer_time_ns.mean() + df[df['device'].str.contains("GPU")].kernel_time_ns.mean())
except pd.errors.EmptyDataError:
# CSV is empty which means src failed miserably.
avg_time_cpu_ns = None
avg_time_gpu_ns = None
except pd.errors.ParserError:
# Unexpected parsing error.
avg_time_cpu_ns = None
avg_time_gpu_ns = None
return CollectCLDriveLabel(df, stdout, stderr)
@public.benchpress_preprocessor
def ClangPreprocess(text: str, extra_args = []) -> str:
"""Preprocessor OpenCL source.
Args:
text: OpenCL source to preprocess.
Returns:
Preprocessed source.
"""
return _ClangPreprocess(text, False, True, extra_args = extra_args)
@public.benchpress_preprocessor
def ClangPreprocessWithShim(text: str, extra_args = []) -> str:
"""Preprocessor OpenCL source with OpenCL shim header injection.
Args:
text: OpenCL source to preprocess.
Returns:
Preprocessed source.
"""
return _ClangPreprocess(text, True, True, extra_args = extra_args)
def CompileLlvmBytecode(text: str, header_file = None, use_aux_headers: bool = True, extra_args: typing.List[str] = []) -> str:
"""A preprocessor which attempts to compile the given code.
Args:
text: Code to compile.
Returns:
LLVM IR of input source code.
"""
# We must override the flag -Wno-implicit-function-declaration from
# GetClangArgs() to ensure that undefined functions are treated as errors.
return clang.CompileLlvmBytecode(
text,
".cl",
GetClangArgs(use_shim = False, use_aux_headers = use_aux_headers, extra_args = extra_args),# + ["-Werror=implicit-function-declaration"],
header_file = header_file,
)
def CompileStdin(text: str, header_file = None, use_aux_headers: bool = True, extra_args: typing.List[str] = []) -> str:
"""A preprocessor which attempts to compile the given code.
Args:
text: Code to compile.
Returns:
LLVM IR of input source code.
"""
# We must override the flag -Wno-implicit-function-declaration from
# GetClangArgs() to ensure that undefined functions are treated as errors.
return clang.CompileStdin(
text,
".cl",
GetClangArgs(use_shim = False, use_aux_headers = use_aux_headers, extra_args = extra_args),# + ["-Werror=implicit-function-declaration"],
header_file = header_file,
)
def HumanReadableBytecode(bc_path: pathlib.Path) -> str:
"""Run llvm-dis to disassemble binary bytecode file to human readable format.
Args:
bc_path: The path to bytecode.
Returns:
The textual LLVM byte code.
"""
return clang.HumanReadableBytecode(bc_path)
def CompileOptimizer(text: str,
optimization : typing.List[str],
timeout_seconds : int = 60,
header_file : str = None,
use_aux_headers : bool = True,
extra_args : typing.List[str] = []
) -> str:
"""Compile source code to IR and apply optimization pass to source code.
Args:
src: The source code to compile.
optimization: optimization pass to apply.
Returns:
Dictionary with 70-dimensional InstCount feature vector.
"""
return clang.CompileOptimizer(
src = text,
suffix = ".cl",
cflags = GetClangArgs(use_shim = False, use_aux_headers = use_aux_headers, extra_args = extra_args),
optimization = optimization,
header_file = header_file,
)
def CompileOptimizerStdin(text: str,
optimization : typing.List[str],
timeout_seconds : int = 60,
header_file : str = None,
use_aux_headers : bool = True,
extra_args : typing.List[str] = []
) -> str:
"""Compile source code to IR and apply optimization pass to source code.
Args:
src: The source code to compile.
optimization: optimization pass to apply.
Returns:
Dictionary with 70-dimensional InstCount feature vector.
"""
return clang.CompileOptimizerStdin(
src = text,
suffix = ".cl",
cflags = GetClangArgs(use_shim = False, use_aux_headers = use_aux_headers, extra_args = extra_args),
optimization = optimization,
header_file = header_file,
)
def CompileOptimizerIR(bytecode: str,
optimization : typing.List[str],
timeout_seconds : int = 60,
) -> str:
"""Apply optimization pass to LLVM-IR bytecode file.
Args:
bytecode: The source code to optimize.
optimization: optimization pass to apply.
Returns:
Dictionary with 70-dimensional InstCount feature vector.
"""
return clang.CompileOptimizerIR(
bytecode = bytecode,
suffix = ".ll",
optimization = optimization,
)
@public.benchpress_preprocessor
def Compile(text: str, header_file = None, use_aux_headers = True, extra_args = [], return_diagnostics = False) -> str:
"""Check that the OpenCL source compiles.
This does not modify the input.
Args:
text: OpenCL source to check.
Returns:
Unmodified OpenCL source.
"""
# We must override the flag -Wno-implicit-function-declaration from
# GetClangArgs() to ensure that undefined functions are treated as errors.
return clang.Compile(
text,
".cl",
GetClangArgs(use_shim = False, use_aux_headers = use_aux_headers, extra_args = extra_args),# + ["-Werror=implicit-function-declaration"],
header_file = header_file,
return_diagnostics = return_diagnostics,
)
@public.benchpress_preprocessor
def ClangFormat(text: str) -> str:
"""Run clang-format on a source to enforce code style.
Args:
text: The source code to run through clang-format.
Returns:
The output of clang-format.
Raises:
ClangFormatException: In case of an error.
ClangTimeout: If clang-format does not complete before timeout_seconds.
"""
return clang.ClangFormat(text, ".cl")
@public.benchpress_preprocessor
def ExtractStructTypedefs(text: str) -> str:
"""
Preprocessor extracts all struct type definitions.
Args:
text: The text to preprocess.
Returns:
The input text, with all whitespaces removed.
"""
return _ExtractTypedefs(text, 'struct')
@public.benchpress_preprocessor
def ExtractUnionTypedefs(text: str) -> str:
"""
Preprocessor extracts all union type definitions.
Args:
text: The text to preprocess.
Returns:
The input text, with all whitespaces removed.
"""
return _ExtractTypedefs(text, 'union')
@public.benchpress_preprocessor
def RemoveTypedefs(text: str) -> str:
"""
Preprocessor removes all type aliases with typedefs, except typedef structs.
Args:
text: The text to preprocess.
Returns:
The input text, with all whitespaces removed.
"""
text = text.split('\n')
for i, l in enumerate(text):
if "typedef " in l and "typedef struct" not in l and "typedef enum" not in l and "typedef union" not in l:
text[i] = ""
return '\n'.join(text)
@public.benchpress_preprocessor
def InvertKernelSpecifier(text: str) -> str:
"""
Inverts 'void kernel' specifier to 'kernel void'.
Args:
text: The text to preprocess.
Returns:
The input text, with all whitespaces removed.
"""
return text.replace("void kernel ", "kernel void ")
@public.benchpress_preprocessor
def ExtractSingleKernels(text: str) -> typing.List[str]:
"""
A preprocessor that splits a single source file to discrete kernels
along with their potential global declarations.
Args:
text: The text to preprocess.
Returns:
List of kernels (strings).
"""
# OpenCL kernels can only be void
kernel_specifier = 'kernel void'
kernel_chunks = text.split(kernel_specifier)
actual_kernels, global_space = [], []
for idx, chunk in enumerate(kernel_chunks):
if idx == 0:
# There is no way the left-most part is not empty or global
if chunk != '':
global_space.append(chunk)
else:
# Given this preprocessor is called after compile,
# we are certain that brackets will be paired
num_lbrack, num_rbrack, chunk_idx = 0, 0, 0
while ((num_lbrack == 0
or num_lbrack != num_rbrack)
and chunk_idx < len(chunk)):
try:
cur_tok = chunk[chunk_idx]
except IndexError:
l.logger().warn(chunk)
if cur_tok == "{":
num_lbrack += 1
elif cur_tok == "}":
num_rbrack += 1
chunk_idx += 1
while chunk_idx < len(chunk):
# Without this line, global_space tends to gather lots of newlines and wspaces
# Then they are replicated and become massive. Better isolate only actual text there.
if chunk[chunk_idx] == ' ' or chunk[chunk_idx] == '\n':
chunk_idx += 1
else:
break
# Add to kernels all global space met so far + 'kernel void' + the kernel's body
actual_kernels.append(''.join(global_space) + kernel_specifier + chunk[:chunk_idx])
if ''.join(chunk[chunk_idx:]) != '':
# All the rest below are appended to global_space
global_space.append(chunk[chunk_idx:])
return actual_kernels
@public.benchpress_preprocessor
def ExtractSingleKernelsHeaders(text: str) -> typing.List[typing.Tuple[str, str]]:
"""
A preprocessor that splits a single source file
to tuples of (single kernels, their global space)
Args:
text: The text to preprocess.
Returns:
List of tuples of kernels, global space (strings).
"""
# OpenCL kernels can only be void
kernel_specifier = 'kernel void'
kernel_chunks = text.split(kernel_specifier)
actual_kernels, global_space = [], []
for idx, chunk in enumerate(kernel_chunks):
if idx == 0:
# There is no way the left-most part is not empty or global
if chunk != '':
global_space.append(chunk)
else:
# Given this preprocessor is called after compile,
# we are certain that brackets will be paired
num_lbrack, num_rbrack, chunk_idx = 0, 0, 0
while ((num_lbrack == 0
or num_lbrack != num_rbrack)
and chunk_idx < len(chunk)):
try:
cur_tok = chunk[chunk_idx]
except IndexError:
l.logger().warn(chunk)
if cur_tok == "{":
num_lbrack += 1
elif cur_tok == "}":
num_rbrack += 1
chunk_idx += 1
while chunk_idx < len(chunk):
# Without this line, global_space tends to gather lots of newlines and wspaces
# Then they are replicated and become massive. Better isolate only actual text there.
if chunk[chunk_idx] == ' ' or chunk[chunk_idx] == '\n':
chunk_idx += 1
else:
break
# Add to kernels all global space met so far + 'kernel void' + the kernel's body
actual_kernels.append((kernel_specifier + chunk[:chunk_idx], ''.join(global_space)))
if ''.join(chunk[chunk_idx:]) != '':
# All the rest below are appended to global_space
global_space.append(chunk[chunk_idx:])
return actual_kernels
@public.benchpress_preprocessor
def ExtractOnlySingleKernels(text: str) -> typing.List[str]:
"""
A preprocessor that splits a single source file to discrete kernels
along without any global declarations..
Args:
text: The text to preprocess.
Returns:
List of kernels (strings).
"""
# OpenCL kernels can only be void
kernel_specifier = 'kernel void'
kernel_chunks = text.split(kernel_specifier)
actual_kernels = []
for idx, chunk in enumerate(kernel_chunks):
if idx != 0:
is_declaration = False
# Given this preprocessor is called after compile,
# we are certain that brackets will be paired
num_lbrack, num_rbrack, chunk_idx = 0, 0, 0
while ((num_lbrack == 0
or num_lbrack != num_rbrack)
and chunk_idx < len(chunk)):
try:
cur_tok = chunk[chunk_idx]
except IndexError:
l.logger().warn(chunk)
if cur_tok == ";" and num_lbrack == 0:
is_declaration = True
break
elif cur_tok == "{":
num_lbrack += 1
elif cur_tok == "}":
num_rbrack += 1
chunk_idx += 1
if not is_declaration:
while chunk_idx < len(chunk):
# Without this line, global_space tends to gather lots of newlines and wspaces
# Then they are replicated and become massive. Better isolate only actual text there.
if chunk[chunk_idx] == ' ' or chunk[chunk_idx] == '\n':
chunk_idx += 1
else:
break
# Add to kernels all global space met so far + 'kernel void' + the kernel's body
actual_kernels.append(kernel_specifier + chunk[:chunk_idx])
return actual_kernels
@public.benchpress_preprocessor
def StringKernelsToSource(text: str) -> str:
"""
Preprocessor converts inlined C++ string kernels to OpenCL programs.
Args:
text: The text to preprocess.
Returns:
OpenCL kernel.
"""
if '\\n"' in text:
return ClangPreprocessWithShim(text.replace('\\n"', '').replace('"', ''))
else:
return text
@public.benchpress_preprocessor
def NormalizeIdentifiers(text: str, extra_args = []) -> str:
"""Normalize identifiers in OpenCL source code.
Args:
text: The source code to rewrite.
Returns:
Source code with identifier names normalized.
Raises:
RewriterException: If rewriter found nothing to rewrite.
ClangTimeout: If rewriter fails to complete within timeout_seconds.
"""
return normalizer.NormalizeIdentifiers(
text, ".cl", GetClangArgs(use_shim = False, use_aux_headers = True, extra_args = extra_args)
)
@public.benchpress_preprocessor
def SequentialNormalizeIdentifiers(text: str, extra_args = []) -> str:
"""Normalize identifiers sequentially in OpenCL source code.
Args:
text: The source code to rewrite.
Returns:
Source code with identifier names normalized.
Raises:
RewriterException: If rewriter found nothing to rewrite.
ClangTimeout: If rewriter fails to complete within timeout_seconds.
"""
return normalizer.NormalizeIdentifiers(
text, ".cl", GetClangArgs(use_shim = False, use_aux_headers = True, extra_args = extra_args), sequential_rewrite = True
)
@public.benchpress_preprocessor
def MinimumStatement1(text: str) -> str:
"""Check that file contains at least one statement.
Args:
text: The source to verify.
Returns:
src: The unmodified input src.
Raises:
NoCodeException: If src has no semi-colons.
"""
if ';' not in text:
raise ValueError
return text
@public.benchpress_preprocessor
def SanitizeKernelPrototype(text: str) -> str:
"""Sanitize OpenCL prototype.
Ensures that OpenCL prototype fits on a single line.
Args:
text: OpenCL source.
Returns:
Source code with sanitized prototypes.
"""
# Ensure that prototype is well-formed on a single line:
try:
prototype_end_idx = text.index("{") + 1
prototype = " ".join(text[:prototype_end_idx].split())
return prototype + text[prototype_end_idx:]
except ValueError:
# Ok so erm... if the '{' character isn't found, a ValueError
# is thrown. Why would '{' not be found? Who knows, but
# whatever, if the source file got this far through the
# preprocessing pipeline then it's probably "good" code. It
# could just be that an empty file slips through the cracks or
# something.
return text
@public.benchpress_preprocessor
def StripDoubleUnderscorePrefixes(text: str) -> str:
"""Remove the optional __ qualifiers on OpenCL keywords.
The OpenCL spec allows __ prefix for OpenCL keywords, e.g. '__global' and
'global' are equivalent. This preprocessor removes the '__' prefix on those
keywords.
Args:
text: The OpenCL source to preprocess.
Returns:
OpenCL source with __ stripped from OpenCL keywords.
"""
# List of keywords taken from the OpenCL 1.2. specification, page 169.
replacements = {
"__const": "const",
"__constant": "constant",
"__global": "global",
"__kernel": "kernel",
"__local": "local",
"__private": "private",
"__read_only": "read_only",
"__read_write": "read_write",
"__restrict": "restrict",
"__write_only": "write_only",
}
for old, new in replacements.items():
text = text.replace(old, new)
return text
| 32,339 | 31.699697 | 156 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/preprocessors/common.py | # coding=utf-8
# Copyright 2022 Chris Cummins and Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common preprocessor passes."""
import typing
# import pathlib
# from absl import flags
from deeplearning.benchpress.preprocessors import public
# from deeplearning.benchpress.util import crypto
from deeplearning.benchpress.util import logging as l
# FLAGS = flags.FLAGS
def _MinimumLineCount(text: str, min_line_count: int) -> str:
"""Private implementation of minimum number of lines.
Args:
text: The source to verify the line count of.
Returns:
src: The unmodified input src.
Raises:
NoCodeException: If src is less than min_line_count long.
"""
if len(text.strip().split("\n")) < min_line_count:
raise ValueError
return text
@public.benchpress_preprocessor
def MinimumLineCount3(text: str) -> str:
"""Check that file contains a minimum number of lines.
Args:
text: The source to verify the line count of.
Returns:
src: The unmodified input src.
Raises:
NoCodeException: If src is less than min_line_count long.
"""
return _MinimumLineCount(text, 3)
@public.benchpress_preprocessor
def StripDuplicateEmptyLines(text: str) -> str:
"""A preprocessor pass which removes duplicate empty lines.
Args:
text: The text to preprocess.
Returns:
The input text, where duplicate empty lines have been removed.
"""
last_line = None
lines = []
for line in text.split("\n"):
if line.strip() or last_line:
lines.append(line)
last_line = line.rstrip()
return "\n".join(lines)
@public.benchpress_preprocessor
def StripTrailingWhitespace(text: str) -> str:
"""A preprocessor pass which strips trailing whitespace from all lines.
Whitespace at the end of each line is removed, as is any trailing whitespace
at the end of the input.
Args:
text: The text to preprocess.
Returns:
The input text, with trailing whitespace removed.
"""
return "\n".join(l.rstrip() for l in text.split("\n")).rstrip()
@public.benchpress_preprocessor
def StripMultipleWhitespaces(text: str) -> str:
"""
Preprocessor replaces sequences of whitespaces with a single whitespace.
Args:
text: The text to preprocess.
Returns:
The input text, with trailing whitespace removed.
"""
while " " in text:
text = text.replace(' ', ' ')
return text
| 2,876 | 25.88785 | 78 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/preprocessors/c.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas and Chris Cummins.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Preprocessor functions for C."""
import re
import sys
import typing
import subprocess
from deeplearning.benchpress.util import environment
from deeplearning.benchpress.preprocessors import clang
from deeplearning.benchpress.preprocessors import normalizer
from deeplearning.benchpress.preprocessors import public
from absl import flags
FLAGS = flags.FLAGS
LIBCXX_HEADERS = environment.LIBCXX_HEADERS
CLANG_HEADERS = environment.CLANG_HEADERS
C_COMMENT_RE = re.compile(
r'//.*?$|/\*.*?\*/|\'(?:\\.|[^\\\'])*\'|"(?:\\.|[^\\"])*"',
re.DOTALL | re.MULTILINE,
)
# Flags to compile C++ files with. I've replicated the default search path,
# but substituted the sandboxed header locations in place of the defaults.
# bazel-phd/bazel-out/*-py3-opt/bin/deeplearning/clgen/preprocessors/\
# cxx_test.runfiles/llvm_mac/bin/clang -xc++ -E - -v
CLANG = environment.CLANG
CLANG_ARGS = [
"-xc",
"-isystem",
str(LIBCXX_HEADERS),
"-isystem",
"/usr/local/include",
"-isystem",
str(CLANG_HEADERS),
"-isystem",
"/usr/include",
"-Wno-ignored-pragmas",
"-ferror-limit=1",
"-Wno-implicit-function-declaration",
"-Wno-incompatible-library-redeclaration",
"-Wno-macro-redefined",
"-Wno-unused-parameter",
"-Wno-long-long",
"-Wcovered-switch-default",
"-Wdelete-non-virtual-dtor",
"-Wstring-conversion",
"-DLLVM_BUILD_GLOBAL_ISEL",
"-D__STDC_CONSTANT_MACROS",
"-D__STDC_FORMAT_MACROS",
"-D__STDC_LIMIT_MACROS",
"-D_LIBCPP_HAS_C_ATOMIC_IMP",
]
def Preprocess(src: str,
copts: typing.Optional[typing.List[str]] = None,
timeout_seconds: int = 60,
):
"""Run input code through the compiler frontend to inline macros.
Args:
src: The source code to preprocess.
copts: A list of flags to be passed to clang.
timeout_seconds: The number of seconds to allow before killing clang.
Returns:
The preprocessed code.
Raises:
ClangException: In case of an error.
ClangTimeout: If clang does not complete before timeout_seconds.
"""
copts = copts or []
cmd = ["timeout", "-s9", str(timeout_seconds), str(CLANG)] + ["-E", "-c", "-", "-o", "-"] + copts
process = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
universal_newlines=True,
)
stdout, stderr = process.communicate(src)
if process.returncode == 9:
raise TimeoutError(f"clang timed out after {timeout_seconds}s")
process.stdout = stdout
process.stderr = stderr
if process.returncode:
raise ValueError("{}: {}".format(process.stderr, process.returncode))
return process.stdout
@public.benchpress_preprocessor
def ClangPreprocess(text: str) -> str:
try:
return clang.StripPreprocessorLines(Preprocess(text))
except Exception as e:
raise e
def ContentHash(src: str) -> str:
"""
Re-write code with deterministic, sequential rewriter, remove whitespaces and new lines
and calculate the hash of the string.
Args:
src: The source code to compute.
Returns:
256-bit hash of pure source code string.
"""
rw = SequentialNormalizeIdentifiers(src)
return crypto.sha256_str(rw.replace(" ", "").replace("\n", ""))
def IRContentHash(src: str) -> str:
"""
Collect optimized LLVM-IR of source code and compute its hash.
Args:
src: The source code to compute.
Returns:
256-bit hash of pure source code string.
"""
bc = CompileLlvmBytecode(src)
return crypto.sha256_str(''.join(bc.split('\n')[2:]))
def CompileLlvmBytecode(text: str) -> str:
"""A preprocessor which attempts to compile the given code.
Args:
text: Code to compile.
Returns:
LLVM IR of input source code.
"""
return clang.CompileLlvmBytecode(text, ".c", CLANG_ARGS)
def CompileOptimizer(text: str,
optimization: typing.List[str],
timeout_seconds: int = 60,
) -> str:
"""Compile source code to IR and apply optimization pass to source code.
Args:
src: The source code to compile.
optimization: optimization pass to apply.
Returns:
Dictionary with 70-dimensional InstCount feature vector.
"""
return clang.CompileOptimizer(text, ".c", CLANG_ARGS, optimization)
@public.benchpress_preprocessor
def Compile(text: str) -> str:
"""A preprocessor which attempts to compile the given code.
Args:
text: Code to compile.
Returns:
The input code, unmodified.
"""
return clang.Compile(text, ".c", CLANG_ARGS)
@public.benchpress_preprocessor
def ClangFormat(text: str) -> str:
"""Run clang-format on a source to enforce code style.
Args:
text: The source code to run through clang-format.
Returns:
The output of clang-format.
Raises:
ClangFormatException: In case of an error.
ClangTimeout: If clang-format does not complete before timeout_seconds.
"""
return clang.ClangFormat(text, ".c")
@public.benchpress_preprocessor
def Parse(text: str) -> str:
"""A preprocessor which attempts to parse the given code.
Args:
text: Code to compile.
Returns:
The input code, unmodified, if parsing succeeds.
"""
clang.Parse(text, ".c", [])
return text
@public.benchpress_preprocessor
def NormalizeIdentifiers(text: str) -> str:
"""Normalize identifiers in C++ source code.
Args:
text: The source code to rewrite.
Returns:
Source code with identifier names normalized.
Raises:
RewriterException: If rewriter found nothing to rewrite.
ClangTimeout: If rewriter fails to complete within timeout_seconds.
"""
return normalizer.NormalizeIdentifiers(text, ".c", [])
@public.benchpress_preprocessor
def SequentialNormalizeIdentifiers(text: str) -> str:
"""Normalize identifiers sequentially in OpenCL source code.
Args:
text: The source code to rewrite.
Returns:
Source code with identifier names normalized.
Raises:
RewriterException: If rewriter found nothing to rewrite.
ClangTimeout: If rewriter fails to complete within timeout_seconds.
"""
return normalizer.NormalizeIdentifiers(
text, ".c", [], sequential_rewrite = True
)
@public.benchpress_preprocessor
def ExtractFunctions(text: str) -> str:
"""Splits translation unit into separate functions using tokenizer.
WARNING! Functions might need formatting after this preprocessor,
if you care about formatting.
Args:
text: The source code to extract functions from.
Returns:
List of separate string functions.
"""
return clang.ExtractFunctions(text, ".c", [])
def ExtractStructs(text: str) -> typing.List[typing.Dict[str, typing.List]]:
"""Splits translation unit into separate structs.
Args:
src: The source code to compile.
suffix: The suffix to append to the source code temporary file. E.g. '.c'
for a C program.
cflags: A list of flags to be passed to clang.
Returns:
List of separate string structs.
"""
return clang.ExtractStructs(text, ".c", CLANG_ARGS)
@public.benchpress_preprocessor
def MinimumStatement1(text: str) -> str:
"""Check that file contains at least one statement.
Args:
text: The source to verify.
Returns:
src: The unmodified input src.
Raises:
NoCodeException: If src has no semi-colons.
"""
if ';' not in text:
raise ValueError
return text
@public.benchpress_preprocessor
def StripIncludes(text: str) -> str:
"""Removes include statements from sourcecode.
Args:
text: The source code to strip includes from.
Returns:
Processed source code.
"""
lines = []
for line in text.split('\n'):
if not '#include ' in line and not '# include ' in line:
lines.append(line)
return '\n'.join(lines)
@public.benchpress_preprocessor
def StripComments(text: str) -> str:
"""Strip C/C++ style comments.
Written by @markus-jarderot https://stackoverflow.com/a/241506/1318051
"""
def Replacer(match):
"""Regex replacement callback."""
s = match.group(0)
if s.startswith("/"):
return " " # note: a space and not an empty string
else:
return s
return C_COMMENT_RE.sub(Replacer, text)
| 8,729 | 26.45283 | 99 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/preprocessors/preprocessors.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Preprocess source code files for machine learning."""
import importlib
import pathlib
import typing
from importlib import util as importlib_util
from detect_secrets import main as secrets_main
from detect_secrets.plugins.common import initialize as secrets_init
from deeplearning.benchpress.preprocessors import public
from absl import flags
from deeplearning.benchpress.util import logging as l
FLAGS = flags.FLAGS
# Import type alias to public module.
PreprocessorFunction = public.PreprocessorFunction
def _ImportPreprocessorFromFile(module_path: pathlib.Path, function_name: str):
"""Import module from an absolute path to file, e.g. '/foo/bar.py'."""
if not module_path.is_file():
raise ValueError(f"File not found: {module_path}")
try:
spec = importlib_util.spec_from_file_location("module", str(module_path))
module = importlib_util.module_from_spec(spec)
spec.loader.exec_module(module)
except ImportError as e:
raise ImportError(f"Failed to import module {module_path}: {e}")
if not hasattr(module, function_name):
raise AttributeError(
f"Function {function_name} not found in module {module_path}"
)
return getattr(module, function_name)
def _ImportPreprocessorFromModule(module_name: str, function_name: str):
"""Import module from a fully qualified module name, e.g. 'foo.bar'."""
try:
module = importlib.import_module(module_name)
except (ModuleNotFoundError, AttributeError):
raise AttributeError(f"Module {module_name} not found.")
if not hasattr(module, function_name):
raise AttributeError(
f"Function {function_name} not found in module {module_name}"
)
function_ = getattr(module, function_name)
if not function_.__dict__.get("is_benchpress_preprocessor"):
raise AttributeError(
f"Preprocessor {function_name} not decorated with @benchpress_preprocessor"
)
return function_
def GetPreprocessorFunction(name: str) -> public.PreprocessorFunction:
"""Lookup a preprocess function by name.
A preprocessor is a function which takes a single argument 'text' of type str,
and returns a str. The name is in the form <module>:<name>, where <name> is
the name of a python function, and <module> is either a fully qualified module
name, or an absolute path to the module file. For example, the name
'deeplearning.benchpress.preprocessors.cxx:Compile' will return the function
'Compile' in the module 'deeplearning.benchpress.preprocessors.cxx'. The name
'/tmp/my_preprocessors.py:Transform' will return the function Transform() in
the module defined at '/tmp/my_preprocessors.py'.
Args:
name: The name of the preprocessor to get.
Returns:
The python preprocessor function.
Raises:
UserError: If the requested name cannot be found or is not a
@benchpress_preprocessor decorated function.
"""
components = name.split(":")
if len(components) != 2:
raise ValueError(f"Invalid preprocessor name {name}")
module_name, function_name = components
if module_name[0] == "/":
return _ImportPreprocessorFromFile(pathlib.Path(module_name), function_name)
else:
return _ImportPreprocessorFromModule(module_name, function_name)
def Preprocess(text: str, preprocessors: typing.List[str]) -> str:
"""Preprocess a text using the given preprocessor pipeline.
If preprocessing succeeds, the preprocessed text is returned.
Args:
text: The input to be preprocessed.
preprocessors: The list of preprocessor functions to run. These will be
passed to GetPreprocessorFunction() to resolve the python implementations.
Returns:
Preprocessed source input as a string.
Raises:
ValueError, UnicodeError
"""
preprocessor_functions = [GetPreprocessorFunction(p) for p in preprocessors]
def PreprocessSingle(text, preprocessors: typing.List[public.benchpress_preprocessor]):
"""
This recursive generator is an elegant way to manage preprocessors that decide to split one text into many,
without destroying the whole preprocessing pipeline. The generator creates a stream of pre-processed files,
in case one - or more - preprocessing functions return a list of strings.
"""
preprocessor_success = True
for idx, pr in enumerate(preprocessors):
if isinstance(text, str):
try:
text = pr(text)
except ValueError as e:
yield str(e), False
return
except UnicodeError:
yield "UnicodeError", False
return
except OSError:
yield "OSError: Memory Allocation", False
return
elif isinstance(text, list):
for item in text:
for t, pc in PreprocessSingle(item, preprocessors[idx:]):
yield t, pc
return
else:
raise TypeError("Preprocessor has returned type: {}".format(type(text)))
if isinstance(text, str):
yield text, preprocessor_success
elif isinstance(text, list):
for i in text:
yield i, preprocessor_success
return PreprocessSingle(text, preprocessor_functions)
def PreprocessFile(
path: str, preprocessors: typing.List[str], inplace: bool
) -> str:
"""Preprocess a file and optionally update it.
Args:
text: The input to be preprocessed.
preprocessors: The list of preprocessor functions to run. These will be
passed to GetPreprocessorFunction() to resolve the python implementations.
inplace: If True, the input file is overwritten with the preprocessed code,
unless the preprocessing fails. If the preprocessing fails, the input
file is left unmodified.
Returns:
Preprocessed source input as a string.
Raises:
ValueError
"""
with open(path) as infile:
contents = infile.read()
preprocessed = Preprocess(contents, preprocessors)
if inplace:
with open(path, "w") as outfile:
outfile.write(preprocessed)
return preprocessed
@public.benchpress_preprocessor
def RejectSecrets(text: str) -> str:
"""Test for secrets such as private keys in a text.
Args:
text: The text to check.
Returns:
The unmodified text.
Raises:
ValueError: In case the text contains secrets.
"""
args = secrets_main.parse_args(["scan"])
plugins = secrets_init.from_parser_builder(args.plugins, exclude_lines_regex="")
for plugin in plugins:
if plugin.analyze_string(text, 0, "does_not_matter"):
raise ValueError(plugin.__class__.__name__)
return text
| 7,058 | 33.602941 | 113 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/dashboard/dashboard.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas and Chris Cummins.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A flask server which renders test results."""
import os
import sys
import threading
import pathlib
import glob
import random
import flask
import flask_sqlalchemy
import portpicker
import shutil
import sqlalchemy as sql
from deeplearning.benchpress.util import pbutil
from deeplearning.benchpress.util import crypto
from deeplearning.benchpress.util import environment
from deeplearning.benchpress.samplers import validation_database
from deeplearning.benchpress.samplers import samples_database
from deeplearning.benchpress.corpuses import tokenizers
from deeplearning.benchpress.corpuses import encoded
from deeplearning.benchpress.dashboard import dashboard_db
from deeplearning.benchpress.proto import model_pb2
from deeplearning.benchpress.proto import internal_pb2
from absl import flags
import humanize
from deeplearning.benchpress.util import logging as l
FLAGS = flags.FLAGS
# Disable flask banner on load.
_cli = sys.modules["flask.cli"]
_cli.show_server_banner = lambda *x: None
flags.DEFINE_integer(
"clgen_dashboard_port", None, "The port to launch the server on.",
)
MEDIA_PATH = pathlib.Path(environment.DASHBOARD_STATIC, "images")
MEDIA_PATH.mkdir(exist_ok = True)
flask_app = flask.Flask(
__name__,
template_folder = environment.DASHBOARD_TEMPLATES,
static_folder = environment.DASHBOARD_STATIC,
)
db = flask_sqlalchemy.SQLAlchemy(flask_app)
data = {}
cached_models = {}
cached_corpuses = {}
cached_samplers = {}
def GetBaseTemplateArgs():
l.logger().debug("deeplearning.clgen.dashboard.GetBaseTemplateArgs()")
return {
"urls": {
"cache_tag": str(5),
"site_css": flask.url_for("static", filename="site.css"),
"site_js": flask.url_for("static", filename="site.js"),
},
"build_info": {
"html": "Description",
"version": 2,
},
}
def parseCorpus(workspace_path):
corpuses = []
if (workspace_path / "corpus" / "encoded").exists():
corpus_path = workspace_path / "corpus" / "encoded"
for corpus_sha in corpus_path.iterdir():
encoded_db = encoded.EncodedContentFiles("sqlite:///{}".format(corpus_sha / "encoded.db"), must_exist = True)
corpus = {
'path': corpus_path / corpus_sha,
'sha' : str(corpus_sha.stem),
'datapoint_count': encoded_db.size,
'summary': "{} datapoint corpus, {}".format(encoded_db.size, str(corpus_sha.stem)),
'models' : parseModels(workspace_path, str(corpus_sha.stem))
}
global cached_corpuses
cached_corpuses[crypto.sha256_str(str(workspace_path.name) + str(corpus_sha.name))] = corpus
corpuses.append(corpus)
return corpuses
def parseModels(workspace_path, corpus_sha: str):
models = []
if (workspace_path / "model").exists():
for model_sha in (workspace_path / "model").iterdir():
model_path = workspace_path / "model" / model_sha
if (model_path / "tokenizer").exists() and pathlib.Path(os.readlink(model_path / "tokenizer")).parent.name == corpus_sha:
if (model_path / "META.pbtxt").exists():
meta = parseMeta(model_path / "META.pbtxt")
model = {
'path' : model_path,
'sha' : str(model_sha.name),
'config' : meta,
'tokenizer' : tokenizers.TokenizerBase.FromFile(model_path / pathlib.Path(os.readlink(model_path / "tokenizer"))),
'training_log': parseTrainLogs(model_path / "logs"), # TODO
'validation' : parseValidationDB(model_path / "logs" / "validation_samples.db"),
'samplers' : parseSamplers(workspace_path, model_path / "samples", str(model_sha.name)), # TODO sample_db ?
'summary' : parseModelSummary(meta)
}
global cached_models
cached_models[crypto.sha256_str(str(workspace_path.name) + str(model_sha.name))] = model
models.append(model)
return models
def parseMeta(meta):
with open(meta, 'r') as f:
return f.read().splitlines()
def parseModelSummary(meta):
m = pbutil.FromString('\n'.join(meta), internal_pb2.ModelMeta())
if m.config.architecture.backend == model_pb2.NetworkArchitecture.TENSORFLOW_BERT:
summary = ("BERT, hs: {}, nhl: {}, atth: {}, imsz: {}, pemb: {}, preds: {}, dp: {}, mprob: {}, {}"
.format(m.config.architecture.hidden_size, m.config.architecture.num_hidden_layers, m.config.architecture.num_attention_heads,
m.config.architecture.intermediate_size, m.config.architecture.max_position_embeddings, m.config.training.max_predictions_per_seq,
m.config.training.dupe_factor, round(m.config.training.masked_lm_prob, 3),
"mask" if m.config.training.data_generator.HasField("mask") else
"hole-{},{}".format(
(m.config.training.data_generator.hole.absolute
if m.config.training.data_generator.hole.HasField("absolute_length")
else m.config.training.data_generator.hole.relative_length),
"unf" if m.config.training.data_generator.hole.HasField("uniform_distribution") else
"norm-{},{}".format(
round(m.config.training.data_generator.hole.normal_distribution.mean, 2),
round(m.config.training.data_generator.hole.normal_distribution.variance, 2)
)
)
)
)
else:
raise NotImplementedError
return summary
def parseSamplerSummary(meta):
m = pbutil.FromString('\n'.join(meta), internal_pb2.SamplerMeta())
summary = ("Sequence length: {}, temperature: {}".format(
m.config.sequence_length,
m.config.temperature_micros / 1e6,
)
)
return summary
def parseTrainLogs(logs):
log_tensors = {}
if len(glob.glob(str(logs / "events.out.tfevents*"))) != 0:
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
event_acc = EventAccumulator(str(logs))
event_acc.Reload()
if 'scalars' in event_acc.Tags():
for tag in event_acc.Tags()['scalars']:
wall_time, steps, value = zip(*event_acc.Scalars(tag))
log_tensors[tag] = [{'wall_time': w, 'step_num': s, 'value': v} for w, s, v in zip(wall_time, steps, value)]
return log_tensors
def parseValidationDB(db_path):
validation_db = {
'val_sample_count': -1,
'path': None,
'val_metrics': [],
'val_samples': [],
}
try:
if db_path.exists():
validation_db['path'] = "sqlite:///{}".format(db_path)
val_db = validation_database.ValidationDatabase(validation_db['path'], must_exist = True)
validation_db['val_sample_count'] = val_db.count
except:
validation_db['val_sample_count'] = -1
validation_db['path'] = None
return validation_db
def parseSamplers(workspace_path, sample_path, model_sha):
global cached_samplers
model_samplers = []
if sample_path.exists():
for sampler_sha in sample_path.iterdir():
if ((workspace_path / "sampler" / sampler_sha.name / "META.pbtxt").exists() and
(workspace_path / "sampler" / sampler_sha.name / "samples" / model_sha).exists()):
meta = parseMeta(str(workspace_path / "sampler" / sampler_sha.name / "META.pbtxt"))
path = (workspace_path / "sampler" / sampler_sha.name / "samples" / model_sha)
sample_dbs = {}
for db in path.iterdir():
if db.suffix == ".db":
sample_dbs[db.stem] = db
sampler = {
'path': path,
'sha': sampler_sha.name,
'config': meta,
'summary': parseSamplerSummary(meta),
'sample_dbs': sample_dbs,
}
cached_samplers[sampler_sha.name] = sampler
model_samplers.append(sampler)
return model_samplers
def parseData():
dashboard_path = pathlib.Path(FLAGS.workspace_dir).absolute()
workspaces = [p for p in dashboard_path.iterdir() if p.is_dir()]
global data
data = {
"workspaces": {
p: {
'name': p.stem,
'path': p,
'corpuses': parseCorpus(p),
} for p in workspaces
},
}
return data
@flask_app.route("/")
def index():
global data
data = parseData()
return flask.render_template(
"dashboard.html", data = data, **GetBaseTemplateArgs()
)
@flask_app.route("/<string:workspace>/corpus/<string:corpus_sha>/")
def corpus(workspace: str, corpus_sha: str):
global data
global cached_corpuses
if data == {}:
data = parseData()
target_sha = crypto.sha256_str(str(workspace) + corpus_sha)
corpus = cached_corpuses[target_sha]
corpus_stats = []
for d in glob.glob(str(corpus['path'] / "*.png")):
png_path = pathlib.Path(d)
dest_file = MEDIA_PATH / workspace / corpus_sha / png_path.name
dest_file.parent.mkdir(exist_ok = True, parents = True)
shutil.copyfile(
png_path,
str(dest_file)
)
corpus_stats.append(
{
'name': png_path.stem,
'plot':
"/" + str(dest_file.relative_to(pathlib.Path(flask_app.static_folder).parent))
}
)
corpus['stats'] = corpus_stats
print(corpus['summary'])
return flask.render_template("corpus.html", data = corpus, **GetBaseTemplateArgs())
@flask_app.route("/<string:workspace>/model/<string:model_sha>/model_specs")
def model_specs(workspace: str, model_sha: str):
global data
global cached_models
if data == {}:
data = parseData()
target_sha = crypto.sha256_str(str(workspace) + model_sha)
current_model = cached_models[target_sha]
spec_data ={
'config': current_model['config']
}
return flask.render_template("model_specs.html", data = spec_data, **GetBaseTemplateArgs())
@flask_app.route("/<string:workspace>/model/<string:model_sha>/dataset")
def dataset(workspace: str, model_sha: str):
global data
global cached_models
if data == {}:
data = parseData()
target_sha = crypto.sha256_str(str(workspace) + model_sha)
current_model = cached_models[target_sha]
datasets = []
for d in glob.glob(str(current_model['path'] / "dataset" / "*.png")):
png_path = pathlib.Path(d)
dest_file = MEDIA_PATH / workspace / model_sha / "dataset" / png_path.name
dest_file.parent.mkdir(exist_ok = True, parents = True)
shutil.copyfile(
png_path,
str(dest_file)
)
datasets.append(
{
'name': png_path.stem,
'plot':
"/" + str(dest_file.relative_to(pathlib.Path(flask_app.static_folder).parent))
}
)
spec_data = {
'summary' : current_model['summary'],
'workspace': workspace,
'model_sha': model_sha,
'datasets' : datasets,
}
return flask.render_template("dataset.html", data = spec_data, **GetBaseTemplateArgs())
@flask_app.route("/<string:workspace>/sampler/<string:sampler_sha>/sampler_specs")
def sampler_specs(workspace: str, sampler_sha: str):
global data
global cached_samplers
if data == {}:
data = parseData()
current_sampler = cached_samplers[sampler_sha]
for i, l in enumerate(current_sampler['config']):
if 'start_text' in l:
current_sampler['config'][i] = current_sampler['config'][i].replace("\\n", "\n")
spec_data ={
'config': current_sampler['config']
}
return flask.render_template("sampler_specs.html", data = spec_data, **GetBaseTemplateArgs())
@flask_app.route("/<string:workspace>/model/<string:model_sha>/validation")
def validation_samples(workspace: str, model_sha: str):
global data
global cached_models
if data == {}:
data = parseData()
target_sha = crypto.sha256_str(str(workspace) + model_sha)
current_model = cached_models[target_sha]
validation = current_model['validation']
if validation['path']:
val_db = validation_database.ValidationDatabase(str(validation['path']), must_exist = True)
with val_db.Session() as session:
validation['val_samples'] = session.query(validation_database.BERTValFile).all()
validation['val_metrics'] = session.query(validation_database.ValResults).all()
# random.shuffle(validation['val_samples'])
for sample in validation['val_samples']:
processed_input_ids = []
if '[HOLE]' in sample.input_ids:
mask_type = '[HOLE]'
elif '[MASK]' in sample.input_ids:
mask_type = '[MASK]'
else:
mask_type = ''
if mask_type == '[HOLE]':
input_ids = sample.input_ids.split(mask_type)
mask_num = sample.num_targets
for i in range(mask_num):
processed_input_ids += [
{
'text': input_ids[i],
'color': 'plain',
'length': len(input_ids[i]),
},
{
'text': mask_type,
'color': 'hole',
'length': int(sample.masked_lm_lengths.split(',')[i]),
},
{
'text': sample.masked_lm_predictions.split('\n')[i].replace(' ', '[ ]').replace('\n', '\\n'),
'color': 'prediction',
'length': 1,
},
{
'text': sample.masked_lm_ids.split('\n')[i].replace(' ', '[ ]').replace('\n', '\\n'),
'color': 'target',
'length': 1,
},
]
while i < len(input_ids) - 1:
i += 1
processed_input_ids.append(
{
'text': input_ids[i],
'color': 'plain',
'length': len(input_ids[i]),
},
)
elif mask_type == '[MASK]':
processed_input_ids = [
{
'text': sample.input_ids,
'color': 'plain',
}
]
sample.input_ids = processed_input_ids
validation['summary'] = current_model['summary']
validation['workspace'] = workspace
validation['model_sha'] = model_sha
return flask.render_template("validation_samples.html", data = validation, **GetBaseTemplateArgs())
@flask_app.route("/<string:workspace>/model/<string:model_sha>/sampling")
def sampling(workspace: str, model_sha: str):
global data
global cached_models
if data == {}:
data = parseData()
target_sha = crypto.sha256_str(str(workspace) + model_sha)
current_model = cached_models[target_sha]
samplers = current_model['samplers']
data = {
'summary' : current_model['summary'],
'workspace': workspace,
'model_sha': model_sha,
'samplers' : samplers,
}
return flask.render_template("sampling.html", data = data, **GetBaseTemplateArgs())
@flask_app.route("/<string:workspace>/model/<string:model_sha>/training")
def training(workspace: str, model_sha: str):
global data
global cached_models
if data == {}:
data = parseData()
data['plots'] = []
target_sha = crypto.sha256_str(str(workspace) + model_sha)
for d in glob.glob(str(cached_models[target_sha]['path'] / "logs" / "*.png")):
png_file = pathlib.Path(d)
dest_file = MEDIA_PATH / workspace / model_sha / "logs" / png_file.name
dest_file.parent.mkdir(exist_ok = True, parents = True)
shutil.copyfile(png_file, dest_file)
data['plots'].append(
"/" + str(dest_file.relative_to(pathlib.Path(flask_app.static_folder).parent))
)
data['summary'] = cached_models[target_sha]['summary']
data['workspace'] = workspace
data['model_sha'] = model_sha
return flask.render_template("training.html", data = data, **GetBaseTemplateArgs())
@flask_app.route("/<string:workspace>/model/<string:model_sha>/sampler/<string:sampler_sha>/<string:sample_db>")
def sample_files(workspace: str, model_sha: str, sampler_sha: str, sample_db: str):
global data
global cached_models
if data == {}:
data = parseData()
current_sampler = {}
target_sha = crypto.sha256_str(str(workspace) + model_sha)
for sampler in cached_models[target_sha]['samplers']:
if sampler['sha'] == sampler_sha:
current_sampler = sampler
break
db_file = current_sampler['path'] / "{}.db".format(sample_db)
samples_db = samples_database.SamplesDatabase("sqlite:///{}".format(db_file), must_exist = True)
with samples_db.Session() as session:
sample_files = session.query(samples_database.Sample).all()
for sample in sample_files:
processed_feed = []
processed_indices = []
if '[HOLE]' in sample.sample_feed:
mask_type = '[HOLE]'
elif '[MASK]' in sample.sample_feed:
mask_type = '[MASK]'
else:
mask_type = ''
sample_feed = sample.sample_feed.split(mask_type)
sample_indices = sample.sample_indices.split('\n')
assert len(sample_feed) - 1 == len(sample_indices), ("sample hole length/generation mismatch: {}, {}"
.format(
len(sample_feed),
len(sample_indices),
)
)
prediction = sample.text
for i in range(len(sample_feed) - 1):
processed_feed += [
{
'text' : sample_feed[i],
'color': 'plain',
},
{
'text' : mask_type,
'color': 'mask',
},
]
processed_indices += [
{
'text' : sample_feed[i],
'color': 'plain',
},
{
'text' : mask_type,
'color': 'mask',
},
{
'text' : sample_indices[i].replace("\\n", "\n"),
'color': 'prediction',
},
]
while i < len(sample_feed) - 1:
i += 1
processed_indices.append(
{
'text': sample_feed[i],
'color': 'plain',
},
)
processed_feed.append(
{
'text': sample_feed[i],
'color': 'plain'
}
)
sample.sample_indices = processed_indices
sample.sample_feed = processed_feed
sample_specs = {
'summary' : cached_models[target_sha]['summary'],
'workspace' : workspace,
'model_sha' : model_sha,
'samples' : sample_files,
}
return flask.render_template("sample_files.html", data = sample_specs, **GetBaseTemplateArgs())
@flask_app.route("/corpus/<int:corpus_id>/encoded/random/")
def random_encoded_contentfile(corpus_id: int):
l.logger().debug("deeplearning.clgen.dashboard.random_encoded_contentfile()")
(encoded_url,) = (
db.session.query(dashboard_db.Corpus.encoded_url)
.filter(dashboard_db.Corpus.id == corpus_id)
.one()
)
encoded_db = encoded.EncodedContentFiles(encoded_url, must_exist=True)
with encoded_db.Session() as session:
(random_id,) = (
session.query(encoded.EncodedContentFile.id)
.order_by(encoded_db.Random())
.limit(1)
.one()
)
return flask.redirect(f"/corpus/{corpus_id}/encoded/{random_id}/", code=302)
def Launch(host: str = "0.0.0.0",
debug: bool = False,
):
l.logger().debug("deeplearning.clgen.dashboard.Launch()")
"""Launch dashboard in a separate thread."""
port = FLAGS.clgen_dashboard_port or portpicker.pick_unused_port()
l.logger().info("Launching BenchPress dashboard on http://{}:{}".format(host, port))
kwargs = {
"port": port,
# Debugging must be disabled when run in a separate thread.
"debug": debug,
"host": host,
}
db.create_all()
if debug:
flask_app.run(**kwargs)
else:
thread = threading.Thread(
target = flask_app.run, kwargs = kwargs
)
thread.setDaemon(True)
thread.start()
return thread
| 19,834 | 32.113523 | 139 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/dashboard/dashboard_db.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas and Chris Cummins.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import os
import sqlalchemy as sql
from sqlalchemy.dialects import mysql
from absl import flags
from deeplearning.benchpress.util import sqlutil
from deeplearning.benchpress.util import logging as l
FLAGS = flags.FLAGS
Base = sqlutil.Base()
class DashboardDatabase(sqlutil.Database):
def __init__(self, url: str, must_exist: bool):
super(DashboardDatabase, self).__init__(url, Base, must_exist=must_exist)
class Corpus(Base):
__tablename__ = "corpuses"
id: int = sql.Column(sql.Integer, primary_key=True)
config_proto_sha1: str = sql.Column(sql.String(40), nullable=False)
config_proto: str = sql.Column(
sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable=False
)
preprocessed_url: str = sql.Column(sql.String(256), nullable=False)
encoded_url: str = sql.Column(sql.String(256), nullable=False)
summary: str = sql.Column(sql.String(256), nullable=False)
__table_args__ = (
sql.UniqueConstraint(
"config_proto_sha1",
"preprocessed_url",
"encoded_url",
name="unique_corpus",
),
)
class Model(Base):
__tablename__ = "models"
id: int = sql.Column(sql.Integer, primary_key=True)
corpus_id: int = sql.Column(
sql.Integer, sql.ForeignKey("corpuses.id"), nullable=False,
)
config_proto_sha1: str = sql.Column(sql.String(40), nullable=False)
config_proto: str = sql.Column(
sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable=False
)
cache_path: str = sql.Column(sql.String(256), nullable=False)
summary: str = sql.Column(sql.String(256), nullable=False)
corpus: Corpus = sql.orm.relationship("Corpus")
__table_args__ = (
sql.UniqueConstraint(
"corpus_id", "config_proto_sha1", "cache_path", name="unique_model"
),
)
class TrainingTelemetry(Base):
__tablename__ = "training_telemetry"
id: int = sql.Column(sql.Integer, primary_key=True)
model_id: int = sql.Column(
sql.Integer, sql.ForeignKey("models.id"), nullable=False,
)
timestamp: datetime.datetime = sql.Column(
sql.DateTime().with_variant(mysql.DATETIME(fsp=3), "mysql"),
nullable=False,
default=lambda x: datetime.datetime.utcnow(),
)
epoch: int = sql.Column(sql.Integer, nullable=False)
step: int = sql.Column(sql.Integer, nullable=False)
training_loss: float = sql.Column(sql.Float, nullable=False)
learning_rate: float = sql.Column(sql.Float, nullable=False)
ns_per_batch: int = sql.Column(sql.Integer, nullable=False)
pending: bool = sql.Column(sql.Boolean, nullable=False, default=True)
model: Model = sql.orm.relationship("Model")
__table_args__ = (
sql.UniqueConstraint("model_id", "epoch", "step", name="unique_telemetry"),
)
class TrainingSample(Base):
__tablename__ = "training_samples"
id: int = sql.Column(sql.Integer, primary_key=True)
model_id: int = sql.Column(
sql.Integer, sql.ForeignKey("models.id"), nullable=False,
)
epoch: int = sql.Column(sql.Integer, nullable=False)
step: int = sql.Column(sql.Integer, nullable=False)
token_count: int = sql.Column(sql.Integer, nullable=False)
sample_time: int = sql.Column(sql.Integer, nullable=False)
sample: str = sql.Column(
sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable=False
)
model: Model = sql.orm.relationship("Model")
def GetDatabase() -> DashboardDatabase:
db: DashboardDatabase = DashboardDatabase(
url = "sqlite:///{}/dashboard.db".format(os.path.abspath(FLAGS.workspace_dir)), must_exist = False
)
l.logger().info("Created dashboard database {}".format(db.url))
return db
| 4,165 | 32.869919 | 108 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/corpuses/corpuses.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas and Chris Cummins.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file defines the logic and training corpuses.
A training corpus is a set of one or more "contentfiles", where each contentfile
is a file containing text to train over.
"""
import os
import pathlib
import random
import subprocess
import tempfile
import time
import typing
import json
import gdown
import humanize
import checksumdir
import numpy as np
from sqlalchemy.sql.expression import func
from deeplearning.benchpress.util import cache
from deeplearning.benchpress.util import crypto
from deeplearning.benchpress.util import commit
from deeplearning.benchpress.util import pbutil
from deeplearning.benchpress.util import distrib
from deeplearning.benchpress.util import environment
from deeplearning.benchpress.corpuses import tokenizers
from deeplearning.benchpress.corpuses import encoded
from deeplearning.benchpress.corpuses import preprocessed
from deeplearning.benchpress.preprocessors import preprocessors
from deeplearning.benchpress.proto import corpus_pb2
from absl import flags
from deeplearning.benchpress.util import sqlutil
from deeplearning.benchpress.util import logging as l
FLAGS = flags.FLAGS
flags.DEFINE_string(
"benchpress_local_path_prefix",
None,
"An optional prefix to use when resolving the path to a local directory "
"or archive. For example, given a corpus which is configured for a "
'local_directory with value "foo/bar" and a --benchpress_local_path_prefix of '
'"/tmp/", the absolute path of the corpus will resolve to "/tmp/foo/bar". '
"If the --benchpress_local_path_prefix is a directory, the trailing slash must "
"not be omitted.",
)
def AssertConfigIsValid(config: typing.Union[corpus_pb2.Corpus, corpus_pb2.PreTrainCorpus]
) -> typing.Union[corpus_pb2.Corpus, corpus_pb2.PreTrainCorpus]:
"""Assert that config proto is valid.
Args:
config: A Corpus proto.
Returns:
The Corpus proto.
Raises:
UserError: If the config is invalid.
"""
try:
# Early-exit to support corpuses derived from databases of pre-encoded
# content files.
# TODO(github.com/ChrisCummins/clgen/issues/130): Refactor after splitting
# Corpus class.
if config.HasField("pre_encoded_corpus_url"):
return config
pbutil.AssertFieldIsSet(config, "contentfiles")
if isinstance(config, corpus_pb2.Corpus):
pbutil.AssertFieldIsSet(config, "tokenizer")
pbutil.AssertFieldIsSet(config.tokenizer, "token_type")
pbutil.AssertFieldConstraint(config.tokenizer,
"token_type",
lambda x: x == "character" or x == "word" or x == "ast" or x == "incoder-1b" or x == "incoder-6b",
"tokenizer is either character or word based."
)
if config.tokenizer.token_type == "word":
pbutil.AssertFieldConstraint(config.tokenizer,
"token_list",
lambda x: os.path.isfile(str(ExpandConfigPath(x, path_prefix=FLAGS.benchpress_local_path_prefix))),
"Invalid token_list file"
)
else:
if config.HasField("tokenizer"):
raise ValueError("Pre-train corpus cannot have a distinct tokenizer.")
pbutil.AssertFieldIsSet(config, "contentfile_separator")
# Check that the preprocessor pipeline resolves to preprocessor functions.
[preprocessors.GetPreprocessorFunction(p) for p in config.preprocessor]
return config
except pbutil.ProtoValueError as e:
raise e
class Corpus(object):
"""Representation of a training corpus.
Please note corpus instances should be treated as immutable. Upon
instantiation, a corpus's properties are used to determine its hash. If you
modify a property after instantiation, the hash will be out of date, which
can lead to bad things happening.
"""
def __init__(self, config: typing.Union[corpus_pb2.Corpus, corpus_pb2.PreTrainCorpus]):
"""Instantiate a corpus from a proto config.
If this is a new corpus, a number of files will be created, which may
take some time.
Args:
config: A Corpus message.
Raises:
TypeError: If the config argument is not a Sampler proto.
UserError: In case the corpus is not found, or config contains invalid
options.
EmptyCorpusException: In case the corpus contains no data.
"""
if not isinstance(config, corpus_pb2.Corpus) and not isinstance(config, corpus_pb2.PreTrainCorpus):
raise TypeError(f"Config must be a Corpus proto. Received: '{type(config).__name__}'")
# Make a local copy of the configuration.
if isinstance(config, corpus_pb2.Corpus):
self.config = corpus_pb2.Corpus()
self.pre_train = False
else:
self.config = corpus_pb2.PreTrainCorpus()
self.pre_train = True
self.config.CopyFrom(AssertConfigIsValid(config))
self._tokenizer = None
self._created = False
# An in-memory cache of the encoded contentfiles indices arrays.
# Set and used in GetTrainingData().
self._indices_arrays: typing.Optional[typing.List[np.array]] = None
if environment.WORLD_RANK == 0:
cache.cachepath("corpus").mkdir(parents=True, exist_ok=True)
distrib.barrier()
self.content_id = ResolveContentId(self.config)
# Database of pre-processed files.
preprocessed_id = ResolvePreprocessedId(self.content_id, self.config)
if environment.WORLD_RANK == 0:
cache.cachepath("corpus", "preprocessed", preprocessed_id).mkdir(exist_ok=True, parents=True)
distrib.barrier()
preprocessed_db_path = cache.cachepath("corpus", "preprocessed",
preprocessed_id, "preprocessed.db")
if self.config.HasField("content_id") and not preprocessed_db_path.is_file():
raise ValueError(f"Content ID not found: '{self.content_id}'")
self.preprocessed = preprocessed.PreprocessedContentFiles(
f"sqlite:///{preprocessed_db_path}"
)
# Create symlink to contentfiles.
if environment.WORLD_RANK == 0:
symlink = (pathlib.Path(self.preprocessed.url[len("sqlite:///") :]).parent / "contentfiles")
if not symlink.is_symlink():
if config.HasField("local_directory"):
os.symlink(
str(ExpandConfigPath(config.local_directory, path_prefix=FLAGS.benchpress_local_path_prefix)),
symlink,
)
elif config.HasField("local_tar_archive"):
os.symlink(
str(ExpandConfigPath(config.local_tar_archive, path_prefix=FLAGS.benchpress_local_path_prefix)),
symlink,
)
elif config.HasField("bq_database"):
os.symlink(
str(ExpandConfigPath(config.bq_database, path_prefix=FLAGS.benchpress_local_path_prefix)),
symlink,
)
# elif config.HasField("fetch_github"):
# os.symlink(
# str(ExpandConfigPath(config.fetch_github, path_prefix=FLAGS.benchpress_local_path_prefix)),
# symlink,
# )
distrib.barrier()
# Data of encoded pre-preprocessed files.
encoded_id = ResolveEncodedId(self.content_id, self.config)
if environment.WORLD_RANK == 0:
cache.cachepath("corpus", "encoded", encoded_id).mkdir(exist_ok=True, parents=True)
distrib.barrier()
db_path = cache.cachepath("corpus", "encoded", encoded_id, "encoded.db")
if self.config.HasField("pre_encoded_corpus_url"):
self.encoded = encoded.EncodedContentFiles(config.pre_encoded_corpus_url, self.pre_train)
else:
self.encoded = encoded.EncodedContentFiles(f"sqlite:///{db_path}", self.pre_train)
self.tokenizer_path = cache.cachepath(
"corpus", "encoded", encoded_id, "tokenizer.pkl"
)
if environment.WORLD_RANK == 0 and not self.config.HasField("pre_encoded_corpus_url"):
symlink = (pathlib.Path(self.encoded.url[len("sqlite:///") :]).parent / "preprocessed")
if not symlink.is_symlink():
os.symlink(
os.path.relpath(
pathlib.Path(self.preprocessed.url[len("sqlite:///") :]).parent,
pathlib.Path(self.encoded.url[len("sqlite:///") :]).parent,
),
symlink,
)
self.hash = encoded_id
self.cache = cache.mkcache("corpus", "encoded", encoded_id)
if environment.WORLD_RANK == 0:
commit.saveCommit(self.cache.path)
commit.saveCommit(self.cache.path.parent.parent / "preprocessed" / preprocessed_id)
distrib.barrier()
l.logger().info("Initialized {}train corpus in {}".format("pre_" if self.pre_train else "", self.cache.path))
return
def GetShortSummary(self) -> str:
try:
corpus_size = humanize.naturalsize(self.encoded.token_count)
return (
f"{corpus_size} token corpus with {self.vocab_size}-element vocabulary"
)
except Exception:
return ""
def Create(self, tokenizer = None) -> None:
"""Create the corpus files.
Args:
tokenizer: In case of pre-training ONLY the tokenizer of fine-tuned corpus,
is provided as-is in the pre-training corpus as they must have the same vocab.
Raises:
EmptyCorpusException: If there are no content files, or no successfully
pre-processed files.
"""
if self.pre_train and not tokenizer:
raise ValueError("Tokenizer must be specified when encoding pre-training corpus.")
self._created = True
self.preprocessed.Create(self.config)
if environment.WORLD_RANK == 0:
if not self.preprocessed.size and not FLAGS.override_preprocessing:
raise ValueError(
f"Pre-processed corpus contains no files: '{self.preprocessed.url}'"
)
l.logger().info("Pre-processed {}train corpus in corpuses/{}".format("pre_" if self.pre_train else "", pathlib.Path(self.preprocessed.url).parent.stem))
start_time = time.time()
self._tokenizer = tokenizer
tokenizer = self.tokenizer
if not self.pre_train:
l.logger().info(
"{}: {} tokens".format(
type(tokenizer).__name__,
humanize.intcomma(tokenizer.vocab_size),
)
)
self.encoded.Create(
self.preprocessed, tokenizer, self.config.contentfile_separator
)
if environment.WORLD_RANK == 0:
l.logger().info("Encoded {}train corpus in corpuses/{}".format("pre_" if self.pre_train else "", pathlib.Path(self.encoded.url).parent.stem))
return
def GetTextCorpus(self, shuffle: bool) -> str:
"""Concatenate the entire corpus into a string.
Args:
shuffle: If true, randomize order of contentfiles.
Returns:
A concatenated corpus string.
"""
with self.preprocessed.Session() as session:
query = session.query(preprocessed.PreprocessedContentFile.text).filter(
preprocessed.PreprocessedContentFile.preprocessing_succeeded == True
)
if shuffle:
query = query.order_by(func.random())
return self.config.contentfile_separator.join([x[0] for x in query])
def GetTrainingDataGenerator(self, offset: int = None) -> typing.Generator:
with self.encoded.get_session() as session:
if offset is None:
for x in session.query(encoded.EncodedContentFile).yield_per(1000000):
yield list(x.indices_array)
else:
for x in session.query(encoded.EncodedContentFile).offset(offset).yield_per(1000000):
yield list(x.indices_array)
return
def GetTrainingDataFeaturesGenerator(self, offset: int = None) -> typing.Generator:
with self.encoded.get_session() as session:
if offset is None:
for x in session.query(encoded.EncodedContentFile).yield_per(1000000):
yield list(x.indices_array), x.features
else:
for x in session.query(encoded.EncodedContentFile).offset(offset).yield_per(1000000):
yield list(x.indices_array), x.features
return
def GetTrainingData(self,
shuffle: bool = False,
sequence_length: int = False,
) -> np.ndarray:
"""Concatenate the entire encoded corpus into an array.
Args:
shuffle: If true, randomize order of encoded contentfiles.
sequence_length: If set, query is optimized to bring only fitting sequences.
Returns:
The encoded corpus.
"""
if self._indices_arrays is None:
with self.encoded.get_session() as session:
if sequence_length:
query = session.query(encoded.EncodedContentFile).filter(encoded.EncodedContentFile.tokencount <= sequence_length).yield_per(1000000)
else:
query = session.query(encoded.EncodedContentFile).yield_per(1000000)
self._indices_arrays = np.array([x.indices_array for x in query])
if shuffle:
random.shuffle(self._indices_arrays)
return self._indices_arrays
def GetTrainingDataWFeatures(self,
shuffle: bool = False,
sequence_length: int = False,
) -> np.ndarray:
"""Concatenate the entire encoded corpus into an array.
Args:
shuffle: If true, randomize order of encoded contentfiles.
sequence_length: If set, query is optimized to bring only fitting sequences.
Returns:
The encoded corpus.
"""
with self.encoded.get_session() as session:
if sequence_length:
data = [[x.indices_array, x.features] for x in session.query(encoded.EncodedContentFile).filter(encoded.EncodedContentFile.tokencount <= sequence_length).yield_per(1000000)]
else:
data = [[x.indices_array, x.features] for x in session.query(encoded.EncodedContentFile).yield_per(1000000)]
if shuffle:
random.shuffle(data)
return data
def GetTrainingFeatures(self, sequence_length: int = None) -> typing.List[typing.Dict[str, typing.Dict[str, float]]]:
"""
Get feature vectors of training instances within the specified sequence length.
"""
with self.encoded.get_session() as session:
if sequence_length:
query = session.query(encoded.EncodedContentFile).filter(encoded.EncodedContentFile.tokencount <= sequence_length)
else:
query = session.query(encoded.EncodedContentFile)
return [x.features for x in query]
def getFeaturesContents(self, sequence_length: int = None) -> typing.List[typing.Tuple[np.array, typing.Dict[str, float]]]:
"""
Get tuple of contents accompanied by feature vectors.
"""
with self.encoded.get_session() as session:
if sequence_length:
query = session.query(encoded.EncodedContentFile).filter(encoded.EncodedContentFile.tokencount <= sequence_length)
else:
query = session.query(encoded.EncodedContentFile)
return [(self.tokenizer.ArrayToCode(x.indices_array, with_formatting = False), x.features) for x in query]
def GetNumContentFiles(self) -> int:
"""Get the number of contentfiles which were pre-processed."""
with self.preprocessed.Session() as session:
return session.query(preprocessed.PreprocessedContentFile).count()
def GetNumPreprocessedFiles(self) -> int:
"""The number of succesfully pre-processed content files."""
with self.preprocessed.Session() as session:
return (
session.query(preprocessed.PreprocessedContentFile.text)
.filter(
preprocessed.PreprocessedContentFile.preprocessing_succeeded == True
)
.count()
)
@property
def tokenizer(self) -> tokenizers.TokenizerBase:
"""Must call Create() first."""
if not self._created:
raise ValueError("Must call Create() before accessing tokenizer property.")
if self._tokenizer is None:
if self.tokenizer_path.is_file():
self._tokenizer = tokenizers.TokenizerBase.FromFile(self.tokenizer_path)
else:
if environment.WORLD_RANK == 0:
self._tokenizer = self._CreateTokenizer()
l.logger().warn("Created tokenizer.")
# Just wait for the NFS to do its job.
kill_counter = 0
while not self.tokenizer_path.is_file():
time.sleep(5)
kill_counter += 1
if kill_counter > 200:
raise TimeoutError("NFS just won't write the tokenizer file in time! I waited for {} seconds!".format(humanize.intcomma(kill_counter * 5)))
distrib.barrier()
if environment.WORLD_RANK != 0:
self._tokenizer = tokenizers.TokenizerBase.FromFile(self.tokenizer_path)
return self._tokenizer
def _CreateTokenizer(self) -> tokenizers.TokenizerBase:
"""Creates and caches an tokenizer."""
corpus_txt = self.GetTextCorpus(shuffle=False)
if self.config.HasField("pre_encoded_corpus_url"):
encoded_db = encoded.EncodedContentFiles(
self.config.pre_encoded_corpus_url, self.pre_train
)
tokenizer = WordTokenizerFromEncodedDb(self.config.tokenizer, encoded_db)
else:
tokenizer = tokenizers.FromText(self.config.tokenizer, self.config.contentfile_separator, corpus_txt)
tokenizer.ToFile(self.tokenizer_path)
return tokenizer
@property
def vocab_size(self) -> int:
"""Get the number of elements in the corpus vocabulary."""
return self.tokenizer.vocab_size
@property
def size(self) -> int:
"""Return the size of the atomized corpus."""
with self.encoded.get_session() as session:
return session.query(
sql.func.sum(encoded.EncodedContentFile.tokencount)
).one()
def __eq__(self, rhs) -> bool:
if not isinstance(rhs, Corpus):
return False
return rhs.hash == self.hash
def __ne__(self, rhs) -> bool:
return not self.__eq__(rhs)
def GetVocabFromMetaTable(session: sqlutil.Session) -> typing.Dict[str, int]:
"""Read a vocabulary dictionary from the 'Meta' table of a database."""
return encoded.EncodedContentFiles.GetVocabFromMetaTable(session)
def StoreVocabInMetaTable(
session: sqlutil.Session, vocabulary: typing.Dict[str, int]
) -> None:
"""Store a vocabulary dictionary in the 'Meta' table of a database."""
return encoded.EncodedContentFiles.StoreVocabInMetaTable(session, vocabulary)
def WordTokenizerFromEncodedDb(encoded_db: encoded.EncodedContentFiles):
raise NotImplementedError
"""Create a greedy tokenizer for the vocabulary of a given encoded_db."""
# TODO(github.com/ChrisCummins/clgen/issues/130): This should be a method of
# a concrete `DatabaseCorpus` class.
with encoded_db.Session() as s:
vocab = GetVocabFromMetaTable(s)
l.logger().info("Loaded vocabulary of {} tokens from meta table".format(len(vocab)))
return tokenizers.WordTokenizer(vocab)
def ExpandConfigPath(path: str, path_prefix: str = None) -> pathlib.Path:
"""Resolve an absolute path from a config proto string field.
This performs shell-style expansion of $VARS, and prefixes the
--benchpress_local_path_prefix flag value, if it is set.
Args:
path: The string value as it appears in the proto.
path_prefix: An optional string to prepend to the resolved path.
Returns:
An absolute path.
"""
# Set a useful variable for expansion.
if "HOME" not in os.environ:
os.environ["HOME"] = str(pathlib.Path("~").expanduser())
return (
pathlib.Path(os.path.expandvars((path_prefix or "") + path))
.expanduser()
.absolute()
)
def ResolveContentId(config: typing.Union[corpus_pb2.Corpus, corpus_pb2.PreTrainCorpus]) -> str:
"""Compute the hash of the input contentfiles.
This function resolves the unique sha1 checksum of a set of content files.
Args:
config: The corpus config proto.
Returns:
A hex encoded sha1 string.
"""
# We can take a massive shortcut if the content ID is already set in the
# config proto.
if config.HasField("content_id"):
# TODO(github.com/ChrisCummins/clgen/issues/130): Refactor this after splitting
# out Corpus class.
return config.content_id
elif config.HasField("pre_encoded_corpus_url"):
# TODO(github.com/ChrisCummins/clgen/issues/130): Refactor this after splitting
# out Corpus class.
return crypto.sha1_str(config.pre_encoded_corpus_url)
start_time = time.time()
if config.HasField("local_directory"):
local_directory = ExpandConfigPath(
config.local_directory, path_prefix=FLAGS.benchpress_local_path_prefix
)
# After the first time we compute the hash of a directory, we write it into
# a file. This is a shortcut to work around the fact that computing the
# directory checksum is O(n) with respect to the number of files in the
# directory (even if the directory is already cached by the hash cache).
# This means that it is the responsibility of the user to delete this cached
# file if the directory is changed.
hash_file_path = pathlib.Path(str(local_directory) + ".sha1.txt")
if hash_file_path.is_file():
l.logger().info("Reading directory hash: '{}'.".format(hash_file_path))
with open(hash_file_path) as f:
content_id = f.read().rstrip()
else:
# No hash file, so compute the directory hash and create it.
try:
# content_id = hc.GetHash(local_directory)
content_id = crypto.sha256_str(str(local_directory))
except FileNotFoundError as e:
raise ValueError(e)
# Create the hash file in the directory so that next time we don't need
# to reference the hash cache.
with open(hash_file_path, "w") as f:
print(content_id, file=f)
l.logger().info("Wrote directory hash: '{}'.".format(hash_file_path))
elif config.HasField("local_tar_archive"):
# This if not an efficient means of getting the hash, as it requires always
# unpacking the archive and reading the entire contents. It would be nicer
# to maintain a cache which maps the mtime of tarballs to their content ID,
# similart to how local_directory is implemented.
content_id = GetHashOfArchiveContents(
ExpandConfigPath(config.local_tar_archive, path_prefix=FLAGS.benchpress_local_path_prefix)
)
elif config.HasField("bq_database"):
content_id = crypto.sha256_str(str(config.bq_database))
# elif config.HasField("fetch_github"):
# gitfile_path = ExpandConfigPath(
# config.fetch_github, path_prefix=FLAGS.benchpress_local_path_prefix
# )
# gitfile_path.mkdir(exist_ok=True, parents=True)
# github_fetcher = github.GithubFetcher(gitfile_path)
# github_fetcher.fetch()
# hash_file_path = pathlib.Path(str(gitfile_path) + ".sha1.txt")
# if hash_file_path.is_file():
# l.logger().info("Reading directory hash: '{}'.".format(hash_file_path))
# with open(hash_file_path) as f:
# content_id = f.read().rstrip()
# else:
# # No hash file, so compute the directory hash and create it.
# try:
# content_id = hc.GetHash(gitfile_path)
# except FileNotFoundError as e:
# raise ValueError(e)
# # Create the hash file in the directory so that next time we don't need
# # to reference the hash cache.
# with open(hash_file_path, "w") as f:
# print(content_id, file=f)
# l.logger().info("Wrote directory hash: '{}'.".format(hash_file_path))
else:
raise NotImplementedError("Unsupported Corpus.contentfiles field value")
return content_id
def ResolvePreprocessedId(content_id: str,
config: typing.Union[corpus_pb2.Corpus, corpus_pb2.PreTrainCorpus]
) -> str:
"""Compute the hash of a corpus of preprocessed contentfiles.
The hash is computed from the ID of the input files and the serialized
representation of the preprocessor pipeline.
"""
# TODO(github.com/ChrisCummins/clgen/issues/130): Refactor this after splitting
# out Corpus class.
if config.pre_encoded_corpus_url:
return "null"
return crypto.sha1_list(content_id, *config.preprocessor)
def ResolveEncodedId(content_id: str,
config: typing.Union[corpus_pb2.Corpus, corpus_pb2.PreTrainCorpus]
) -> str:
"""Compute the hash of a corpus of preprocessed and encoded contentfiles.
The hash is computed from the ID of the input files and the serialized
representation of the config proto.
"""
if isinstance(config, corpus_pb2.Corpus):
config_without_contentfiles = corpus_pb2.Corpus()
else:
config_without_contentfiles = corpus_pb2.PreTrainCorpus()
config_without_contentfiles.CopyFrom(config)
# Clear the contentfiles field, since we use the content_id to uniquely
# identify the input files. This means that corpuses with the same content
# files delivered through different means (e.g. two separate but identical
# directories) have the same hash.
config_without_contentfiles.ClearField("contentfiles")
return crypto.sha1_list(
content_id, config_without_contentfiles.SerializeToString()
)
def GetHashOfArchiveContents(archive: pathlib.Path) -> str:
"""Compute the checksum of the contents of a directory.
Args:
archive: Path of the archive.
Returns:
Checksum of the archive.
Raises:
UserError: If the requested archive does not exist, or cannot be unpacked.
"""
if not (archive.parent / "corpus_registry.json").exists():
return crypto.sha256_str("temporary_unknown_corpus")
with open(archive.parent / "corpus_registry.json", 'r') as js:
reg = json.load(js)
if archive.name not in reg:
raise FileNotFoundError("Corpus {} is not registered in corpus_registry".format(archive.name))
if not archive.is_file():
l.logger().info("Corpus found in registry. Downloading from Google Drive...")
if environment.WORLD_RANK == 0:
gdown.download("https://drive.google.com/uc?id={}".format(reg[archive.name]['url']), str(archive))
distrib.barrier()
if 'hash' in reg[archive.name]:
return reg[archive.name]['hash']
else:
with tempfile.TemporaryDirectory(prefix="clgen_corpus_", dir = FLAGS.local_filesystem) as d:
pv = ["pv", str(archive)]
tar = ["tar", "xfj", "-", "-C", d]
try:
pv_proc = subprocess.Popen(pv, stdout = subprocess.PIPE)
subprocess.check_call(tar, stdin = pv_proc.stdout)
except subprocess.CalledProcessError:
raise ValueError(f"Archive unpack failed: '{archive}'")
return checksumdir.dirhash(d, "sha1")
| 27,021 | 38.914328 | 181 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/corpuses/benchmarks.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module for benchmarks suite pre-processing, encoding and feature extraction.
"""
import typing
import tempfile
import contextlib
import pathlib
import gdown
import json
import tqdm
import subprocess
# import multiprocessing
from deeplearning.benchpress.features import extractor
from deeplearning.benchpress.features import feature_sampler
from deeplearning.benchpress.preprocessors import opencl
from deeplearning.benchpress.preprocessors import c
from deeplearning.benchpress.util import logging as l
from deeplearning.benchpress.util import environment
from absl import flags
FLAGS = flags.FLAGS
flags.DEFINE_boolean(
"filter_benchmarks_by_git",
False,
"Select to filter out benchmarks for which github-seq len has 0 distanced samples."
)
targets = {
'rodinia' : './model_zoo/benchmarks/rodinia_3.1.tar.bz2',
'BabelStream' : './model_zoo/benchmarks/BabelStream.tar.bz2',
'cf4ocl' : './model_zoo/benchmarks/cf4ocl.tar.bz2',
'CHO' : './model_zoo/benchmarks/cho.tar.bz2',
'FinanceBench' : './model_zoo/benchmarks/FinanceBench.tar.bz2',
'HeteroMark' : './model_zoo/benchmarks/HeteroMark.tar.bz2',
'mixbench' : './model_zoo/benchmarks/mixbench.tar.bz2',
'OpenDwarfs' : './model_zoo/benchmarks/OpenDwarfs.tar.bz2',
'parboil' : './model_zoo/benchmarks/parboil.tar.bz2',
'polybench' : './model_zoo/benchmarks/polybench.tar.bz2',
'grid_walk' : '',
}
class Benchmark(typing.NamedTuple):
path : pathlib.Path
name : str
contents : str
features : typing.Dict[str, float]
runtime_features : typing.Dict[str, float]
def preprocessor_worker(contentfile_batch):
kernel_batch = []
p, cf = contentfile_batch
try:
ks = opencl.ExtractSingleKernelsHeaders(
opencl.InvertKernelSpecifier(
opencl.StripDoubleUnderscorePrefixes(
opencl.ClangPreprocessWithShim(
c.StripIncludes(cf)))))
for k, h in ks:
kernel_batch.append((p, k, h))
except ValueError:
pass
return kernel_batch
def benchmark_worker(benchmark, feature_space, reduced_git_corpus = None):
p, k, h = benchmark
features = extractor.ExtractFeatures(
k,
[feature_space],
header_file = h,
use_aux_headers = False
)
if reduced_git_corpus and FLAGS.filter_benchmarks_by_git:
closest_git = sorted([(cf, feature_sampler.calculate_distance(fts, features[feature_space], feature_space)) for cf, fts in reduced_git_corpus], key = lambda x: x[1])[0]
if features[feature_space] and closest_git[1] > 0:
return Benchmark(p, p.name, k, features[feature_space], {})
else:
if features[feature_space]:
return Benchmark(p, p.name, k, features[feature_space], {})
@contextlib.contextmanager
def GetContentFileRoot(path: pathlib.Path) -> typing.Iterator[pathlib.Path]:
"""
Extract tar archive of benchmarks and yield the root path of all files.
If benchmarks don't exist, download from google drive.
Yields:
The path of a directory containing content files.
"""
if not (path.parent / "benchmarks_registry.json").exists():
l.logger().warn("benchmarks_registry.json file not found. Assuming provided path is the benchmarks root path.")
yield pathlib.Path(path)
return
with open(path.parent / "benchmarks_registry.json", 'r') as js:
reg = json.load(js)
if path.name not in reg:
raise FileNotFoundError("Corpus {} is not registered in benchmarks_registry".format(path.name))
if not path.is_file():
l.logger().info("Benchmark found in registry. Downloading from Google Drive...")
gdown.download("https://drive.google.com/uc?id={}".format(reg[path.name]['url']), str(path))
try:
tdir = FLAGS.local_filesystem
except Exception:
tdir = None
with tempfile.TemporaryDirectory(prefix=path.stem, dir = tdir) as d:
cmd = [
"tar",
"-xf",
str(path),
"-C",
d,
]
subprocess.check_call(cmd)
l.logger().info("Unpacked benchmark suite {}".format(str(d)))
yield pathlib.Path(d)
def iter_cl_files(path: pathlib.Path) -> typing.List[typing.Tuple[pathlib.Path, str]]:
"""
Iterate base path and yield the contents of all .cl files.
"""
contentfiles = []
with GetContentFileRoot(path) as root:
file_queue = [p for p in root.iterdir()]
while file_queue:
c = file_queue.pop(0)
if c.is_symlink():
continue
elif c.is_dir():
file_queue += [p for p in c.iterdir()]
elif c.is_file() and c.suffix == ".cl":
try:
with open(c, 'r') as inf:
contentfiles.append((c, inf.read()))
except UnicodeDecodeError:
continue
l.logger().info("Scanned \'.cl\' files in {}".format(str(path)))
return contentfiles
def yield_cl_kernels(path: pathlib.Path) -> typing.List[typing.Tuple[pathlib.Path, str, str]]:
"""
Fetch all cl files from base path and atomize, preprocess
kernels to single instances.
Original benchmarks extracted from suites, go through a series of pre-processors:
1. Include statements are removed.
2. Code is preprocessed with shim (macro expansion).
3. Double underscores are removed.
4. void kernel -> kernel void
5. Translation units are split to tuples of (kernel, utility/global space)
"""
contentfiles = iter_cl_files(path)
kernels = []
# pool = multiprocessing.Pool()
# if environment.WORLD_RANK == 0:
# it = tqdm.tqdm(pool.map(preprocessor_worker, contentfiles), total = len(contentfiles), desc = "Yield {} benchmarks".format(path.stem))
# else:
# it = pool.map(preprocessor_worker, contentfiles)
for batch in contentfiles:
kernel_batch = preprocessor_worker(batch)
kernels += kernel_batch
l.logger().info("Pre-processed {} OpenCL benchmarks".format(len(kernels)))
# pool.close()
return kernels
def resolve_benchmark_names(benchmarks: typing.List["Benchmark"]) -> typing.List["Benchmark"]:
"""
Resolves duplicate benchmark names. e.g. X, X, X -> X-1, X-2, X-3.
"""
renaming = {}
for benchmark in benchmarks:
if benchmark.name not in renaming:
renaming[benchmark.name] = [0, 0]
else:
renaming[benchmark.name][1] += 1
for idx, benchmark in enumerate(benchmarks):
if renaming[benchmark.name][1] != 0:
renaming[benchmark.name][0] += 1
benchmarks[idx] = benchmarks[idx]._replace(
name = "{}-{}".format(benchmark.name, renaming[benchmark.name][0])
)
return sorted(benchmarks, key = lambda x: x.name)
| 7,078 | 33.871921 | 172 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/corpuses/preprocessed.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas and Chris Cummins.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file defines a database for pre-preprocessed content files."""
import contextlib
import datetime
import hashlib
import json
import multiprocessing
import os
import glob
import pathlib
import subprocess
import tempfile
import shutil
import time
import typing
import functools
import humanize
import tqdm
import sqlalchemy as sql
from sqlalchemy.ext import declarative
from sqlalchemy.sql import func
from deeplearning.benchpress.preprocessors import preprocessors
from deeplearning.benchpress.proto import corpus_pb2
from deeplearning.benchpress.proto import internal_pb2
from deeplearning.benchpress.github import bigQuery_database as bqdb
from deeplearning.benchpress.util import fs
from deeplearning.benchpress.util import sqlutil
from deeplearning.benchpress.util import environment
from deeplearning.benchpress.util import distrib
from deeplearning.benchpress.util import logging as l
from eupy.hermes import client
from absl import app, flags
FLAGS = flags.FLAGS
# flags.DEFINE_list(
# "local_dir_file_ext",
# None,
# "If local_directory corpus has been selected and only specific file types are required,"
# "pass a list of acceptable extensions here.",
# )
flags.DEFINE_boolean(
"override_preprocessing",
False,
"Set to override incomplete pre-processing. Does not set DB value to 'done'"
)
flags.DEFINE_string(
"preprocessed_databases",
None,
"Comma-separated list of paths for input preprocessed databases."
)
flags.DEFINE_string(
"merged_preprocessed_database",
None,
"Path for merged output preprocessed database"
)
Base = declarative.declarative_base()
class Meta(Base):
__tablename__ = "meta"
key: str = sql.Column(sql.String(1024), primary_key=True)
value: str = sql.Column(sql.String(1024), nullable=False)
class PreprocessedContentFile(Base):
__tablename__ = "preprocessed_contentfiles"
id: int = sql.Column(sql.Integer, primary_key=True)
# Relative path of the input file within the content files.
input_relpath: str = sql.Column(sql.String(3072), nullable=False, unique=False)
# Checksum of the input file.
input_sha256: str = sql.Column(sql.String(64), nullable=False)
input_charcount = sql.Column(sql.Integer, nullable=False)
input_linecount = sql.Column(sql.Integer, nullable=False)
# Checksum of the preprocessed file.
sha256: str = sql.Column(sql.String(64), nullable=False, index=True)
charcount = sql.Column(sql.Integer, nullable=False)
linecount = sql.Column(sql.Integer, nullable=False)
text: str = sql.Column(
sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable=False
)
# True if pre-processing succeeded, else False.
preprocessing_succeeded: bool = sql.Column(sql.Boolean, nullable=False)
# The number of milliseconds pre-preprocessing took.
preprocess_time_ms: int = sql.Column(sql.Integer, nullable=False)
# Pre-processing is parallelizable, so the actual wall time of pre-processing
# may be much less than the sum of all preprocess_time_ms. This column
# counts the effective number of "real" milliseconds during pre-processing
# between the last pre-processed result and this result coming in. The idea
# is that summing this column provides an accurate total of the actual time
# spent pre-processing an entire corpus. Will be <= preprocess_time_ms.
wall_time_ms: int = sql.Column(sql.Integer, nullable=False)
date_added: datetime.datetime = sql.Column(
sql.DateTime, nullable=False, default=datetime.datetime.utcnow
)
@classmethod
def FromPreprocessedContentFile(
cls,
preprocessed_file: "PreprocessedContentFile",
idx: int = None,
) -> "PreprocessedContentFile":
"""
Replicate PreprocessedContentFile
"""
return PreprocessedContentFile(
id = idx if idx else preprocessed_file.id,
input_relpath = preprocessed_file.input_relpath,
input_sha256 = preprocessed_file.input_sha256,
input_charcount = preprocessed_file.input_charcount,
input_linecount = preprocessed_file.input_linecount,
sha256 = preprocessed_file.sha256,
charcount = preprocessed_file.charcount,
linecount = preprocessed_file.linecount,
text = preprocessed_file.text,
preprocessing_succeeded = preprocessed_file.preprocessing_succeeded,
preprocess_time_ms = preprocessed_file.preprocess_time_ms,
wall_time_ms = preprocessed_file.wall_time_ms,
date_added = datetime.datetime.utcnow(),
)
@classmethod
def FromContentFile(
cls,
contentfile_root: pathlib.Path,
relpath: pathlib.Path,
preprocessors_: typing.List[str],
) -> "PreprocessedContentFile":
"""Instantiate a PreprocessedContentFile."""
start_time = time.time()
input_text = ""
preprocessing_succeeded = False
try:
with open(contentfile_root / relpath) as f:
try:
input_text = f.read()
except UnicodeDecodeError:
input_text = "/*corrupted file format*/"
except UnicodeError:
input_text = "/*corrupted file format*/"
except Exception:
input_text = "/*corrupted file format*/"
text_generator = preprocessors.Preprocess(input_text, preprocessors_)
# preprocessing_succeeded = True
except Exception as e:
raise("Unexpected exception: {}".format(e))
end_time = time.time()
preprocess_time_ms = int((end_time - start_time) * 1000)
input_text_stripped = input_text.strip()
return [ cls(
input_relpath = relpath,
input_sha256 = GetFileSha256(contentfile_root / (relpath)),
input_charcount = len(input_text_stripped),
input_linecount = len(input_text_stripped.split("\n")),
sha256 = hashlib.sha256(text.encode("utf-8")).hexdigest(),
charcount = len(text),
linecount = len(text.split("\n")),
text = text,
preprocessing_succeeded = success,
preprocess_time_ms = preprocess_time_ms,
wall_time_ms = preprocess_time_ms, # The outer-loop may change this.
date_added = datetime.datetime.utcnow(),
) for (text, success) in text_generator ]
@classmethod
def FromBQFile(
cls,
file: bqdb.bqMainFile,
preprocessors_: typing.List[str],
) -> "PreprocessedContentFile":
"""Instantiate a PreprocessedContentFile."""
start_time = time.time()
preprocessing_succeeded = False
try:
if file.size is not None and int(str(file.size)) < (10**6):
input_text = file.content
text_generator = preprocessors.Preprocess(input_text, preprocessors_)
# preprocessing_succeeded = True
else:
input_text = "<Redacted due to massive size>"
text_generator = [("File is exceptionally large", 0)]
except Exception as e:
l.logger().warn("Unexpected exception: {}".format(e), ddp_nodes = True)
return []
end_time = time.time()
preprocess_time_ms = int((end_time - start_time) * 1000)
input_text_stripped = input_text.strip()
return [ cls(
input_relpath = "main_files/{}".format(file.id),
input_sha256 = file.id,
input_charcount = len(input_text_stripped),
input_linecount = len(input_text_stripped.split("\n")),
sha256 = hashlib.sha256(text.encode("utf-8")).hexdigest(),
charcount = len(text),
linecount = len(text.split("\n")),
text = text,
preprocessing_succeeded = success,
preprocess_time_ms = preprocess_time_ms,
wall_time_ms = preprocess_time_ms, # The outer-loop may change this.
date_added = datetime.datetime.utcnow(),
) for (text, success) in text_generator ]
def PreprocessorWorker(job: str,
contentfile_root: pathlib.Path,
preprocessors: typing.List[str]
) -> PreprocessedContentFile:
"""The inner loop of a parallelizable pre-processing job."""
return PreprocessedContentFile.FromContentFile(
contentfile_root, job, preprocessors
)
def BQPreprocessorWorker(file: bqdb.bqMainFile, preprocessors: typing.List[str]) -> PreprocessedContentFile:
"""The inner loop of a parallelizable pre-processing job."""
ret = PreprocessedContentFile.FromBQFile(file, preprocessors)
return ret
class PreprocessedContentFiles(sqlutil.Database):
"""A database of pre-processed contentfiles."""
def __init__(self, url: str, must_exist: bool = False, is_replica = False):
if environment.WORLD_RANK == 0 or is_replica:
super(PreprocessedContentFiles, self).__init__(
url, Base, must_exist=must_exist
)
if environment.WORLD_SIZE > 1 and not is_replica:
# Conduct engine connections to replicated preprocessed chunks.
self.base_path = pathlib.Path(url.replace("sqlite:///", "")).resolve().parent
hash_id = self.base_path.name
try:
tdir = pathlib.Path(FLAGS.local_filesystem).resolve() / hash_id / "node_preprocessed"
except Exception:
tdir = pathlib.Path("/tmp").resolve() / hash_id / "node_preprocessed"
try:
tdir.mkdir(parents = True, exist_ok = True)
except Exception:
pass
self.replicated_path = tdir / "preprocessed_{}.db".format(environment.WORLD_RANK)
if environment.WORLD_RANK == 0:
with self.Session() as s:
if self.IsDone(s):
self.RechunkDB()
distrib.barrier()
self.replicated = PreprocessedContentFiles(
url = "sqlite:///{}".format(str(self.replicated_path)),
must_exist = must_exist,
is_replica = True
)
distrib.barrier()
return
def Create(self, config: corpus_pb2.Corpus):
"""
Create preprocessed database of raw corpus.
"""
## Check if main preprocessed.db is done.
if environment.WORLD_SIZE > 1:
if environment.WORLD_RANK == 0:
## If done, broadcast true message and return.
with self.Session() as session:
status = self.IsDone(session)
_ = distrib.broadcast(str(status))
if status:
return
else:
status = distrib.broadcast()
if status == "True":
return
if status != "False":
raise OSError("Broken distributed message: '{}'".format(status))
## For DDP only: If main DB not done, preprocess the replicas here.
sessmaker = self.Session if environment.WORLD_SIZE == 1 else self.replicated.Session
with sessmaker() as session:
if not self.IsDone(session):
self.Import(session, config)
self.SetDone(session)
session.commit()
## For DDP only: Merge replicas into main DB.
if environment.WORLD_SIZE > 1:
self.MergeReplicas()
# Logging output.
# num_input_files = session.query(PreprocessedContentFile).count()
# num_files = (
# session.query(PreprocessedContentFile)
# .filter(PreprocessedContentFile.preprocessing_succeeded == True)
# .count()
# )
# input_chars, input_lines, total_walltime, total_time, = session.query(
# func.sum(PreprocessedContentFile.charcount),
# func.sum(PreprocessedContentFile.linecount),
# func.sum(PreprocessedContentFile.wall_time_ms),
# func.sum(PreprocessedContentFile.preprocess_time_ms),
# ).first()
# char_count, line_count = (
# session.query(
# func.sum(PreprocessedContentFile.charcount),
# func.sum(PreprocessedContentFile.linecount),
# )
# .filter(PreprocessedContentFile.preprocessing_succeeded == True)
# .first()
# )
# set_mail = "Content files: {} chars, {} lines, {} files.\n".format(
# humanize.intcomma(input_chars),
# humanize.intcomma(input_lines),
# humanize.intcomma(num_input_files),
# )
# l.logger().info(
# "Content files: {} chars, {} lines, {} files.".format(
# humanize.intcomma(input_chars),
# humanize.intcomma(input_lines),
# humanize.intcomma(num_input_files),
# ), mail_level = 4
# )
# set_mail += "Pre-processed {} files in {} ({:.2f}x speedup).\n".format(
# humanize.intcomma(num_input_files),
# humanize.naturaldelta((total_walltime or 0) / 1000),
# (total_time or 1) / (total_walltime or 1),
# )
# l.logger().info(
# "Pre-processed {} files in {} ({:.2f}x speedup).".format(
# humanize.intcomma(num_input_files),
# humanize.naturaldelta((total_walltime or 0) / 1000),
# (total_time or 1) / (total_walltime or 1),
# ), mail_level = 4
# )
# set_mail += "Pre-processing discard rate: {:.1f}% ({} files).\n".format(
# (1 - (num_files / max(num_input_files, 1))) * 100,
# humanize.intcomma(num_input_files - num_files),
# )
# l.logger().info(
# "Pre-processing discard rate: {:.1f}% ({} files).".format(
# (1 - (num_files / max(num_input_files, 1))) * 100,
# humanize.intcomma(num_input_files - num_files),
# ), mail_level = 4
# )
# set_mail += "Pre-processed corpus: {} chars, {} lines, {} files.\n".format(
# humanize.intcomma(char_count),
# humanize.intcomma(line_count),
# humanize.intcomma(num_files),
# )
# l.logger().info(
# "Pre-processed corpus: {} chars, {} lines, {} files.".format(
# humanize.intcomma(char_count),
# humanize.intcomma(line_count),
# humanize.intcomma(num_files),
# ), mail_level = 4
# )
# if FLAGS.notify_me:
# client.getClient().send_message("clgen:preprocessed", set_mail)
return
def IsDone(self, session: sqlutil.Session):
if session.query(Meta).filter(Meta.key == "done").first():
return True
elif FLAGS.override_preprocessing:
return True
else:
return False
def SetDone(self, session: sqlutil.Session):
session.add(Meta(key="done", value="yes"))
return
def Import(self, session: sqlutil.Session, config: corpus_pb2.Corpus) -> None:
with self.GetContentFileRoot(config) as contentfile_root:
if not config.HasField("bq_database"):
if environment.WORLD_RANK == 0:
relpaths = set(self.GetImportRelpaths(contentfile_root))
done = set(
[x[0] for x in session.query(PreprocessedContentFile.input_relpath)]
)
todo = relpaths - done
l.logger().info(
"Preprocessing {} of {} content files".format(
humanize.intcomma(len(todo)),
humanize.intcomma(len(relpaths)),
)
)
chunk_size = 100000
jobs, total = [], 0
for idx, t in enumerate(todo):
if idx % chunk_size == 0:
jobs.append([t])
else:
jobs[-1].append(t)
total += 1
bar = tqdm.tqdm(total = total, desc = "Preprocessing", leave = True)
c = 0
last_commit = time.time()
wall_time_start = time.time()
for job_chunk in jobs:
try:
pool = multiprocessing.Pool()
for preprocessed_list in pool.imap_unordered(
functools.partial(
PreprocessorWorker,
contentfile_root = contentfile_root,
preprocessors = list(config.preprocessor)
),
job_chunk
):
for preprocessed_cf in preprocessed_list:
wall_time_end = time.time()
preprocessed_cf.wall_time_ms = int(
(wall_time_end - wall_time_start) * 1000
)
wall_time_start = wall_time_end
session.add(preprocessed_cf)
if wall_time_end - last_commit > 10:
session.commit()
last_commit = wall_time_end
c += 1
bar.update(1)
pool.close()
except KeyboardInterrupt as e:
pool.terminate()
raise e
except Exception as e:
pool.terminate()
raise e
session.commit()
else:
db = bqdb.bqDatabase("sqlite:///{}".format(contentfile_root), must_exist = True)
total = db.mainfile_count # Total number of files in BQ database.
total_per_node = total // environment.WORLD_SIZE # In distributed nodes, this is the total files to be processed per node.
if total == 0:
raise ValueError("Input BQ database {} is empty!".format(contentfile_root))
# Set of IDs that have been completed.
done = set(
[x[0].replace("main_files/", "") for x in session.query(PreprocessedContentFile.input_relpath)]
)
chunk, idx = min(total_per_node, 10**8), environment.WORLD_RANK * total_per_node
limit = (environment.WORLD_RANK + 1) * total_per_node + (total % total_per_node if environment.WORLD_RANK == environment.WORLD_SIZE - 1 else 0)
if environment.WORLD_SIZE > 1:
bar = distrib.ProgressBar(total = total, offset = idx, desc = "Preprocessing DB")
else:
bar = tqdm.tqdm(total = total, desc = "Preprocessing DB", leave = True)
last_commit = time.time()
wall_time_start = time.time()
pool = multiprocessing.Pool(maxtasksperchild = 8192)
try:
while idx < limit:
chunk = min(chunk, limit - idx) # This is equivalent to l447/l448 but needed for last node that gets a bit more.
batch = db.main_files_batch(chunk, idx, exclude_id = done)
idx += chunk - len(batch) # This difference will be the number of already done files.
for preprocessed_list in pool.imap_unordered(
functools.partial(
BQPreprocessorWorker,
preprocessors = list(config.preprocessor)
), batch):
for preprocessed_cf in preprocessed_list:
wall_time_end = time.time()
preprocessed_cf.wall_time_ms = int(
(wall_time_end - wall_time_start) * 1000
)
wall_time_start = wall_time_end
session.add(preprocessed_cf)
if wall_time_end - last_commit > 1000:
session.commit()
last_commit = wall_time_end
idx += 1
bar.update(idx - bar.n)
pool.close()
except KeyboardInterrupt as e:
pool.terminate()
raise e
except Exception as e:
l.logger().error(e, ddp_nodes = True)
pool.terminate()
raise e
session.commit()
if environment.WORLD_SIZE > 1:
bar.finalize(idx)
return
def MergeReplicas(self) -> None:
"""
When distributed nodes work for the same preprocessed DB
this function moves finalized preprocessed chunks back into the AFS
and master node merges them into the final preprocessed.db
"""
shutil.copy(
self.replicated_path, self.base_path / "preprocessed_{}.db".format(environment.WORLD_RANK)
)
distrib.barrier()
if environment.WORLD_RANK == 0:
db_chunks = glob.glob(str(self.base_path / "preprocessed_*.db"))
dbs = [PreprocessedContentFiles(url = "sqlite:///{}".format(p), must_exist = True, is_replica = True) for p in db_chunks]
merge_db(dbs, self)
for p in db_chunks:
os.remove(p)
distrib.barrier()
# Once merging has been complete, cleanup the mess left at local clusters' filesystems.
if (self.replicated_path.parent / "bq_database_replica_{}.db".format(environment.WORLD_RANK)).exists():
os.remove(self.replicated_path.parent / "bq_database_replica_{}.db".format(environment.WORLD_RANK))
else:
l.logger().warn("I didn't find my local BQ replica at {}".format(self.replicated_path.parent / "bq_database_replica_{}.db".format(environment.WORLD_RANK)), ddp_nodes = True)
distrib.barrier()
return
@contextlib.contextmanager
def GetContentFileRoot(self, config: corpus_pb2.Corpus) -> pathlib.Path:
"""Get the path of the directory containing content files.
If the corpus is a local directory, this simply returns the path. Otherwise,
this method creates a temporary copy of the files which can be used within
the scope of this context.
Args:
config: The corpus config proto.
Returns:
The path of a directory containing content files.
"""
if config.HasField("local_directory"):
yield pathlib.Path(ExpandConfigPath(config.local_directory))
elif config.HasField("local_tar_archive"):
with tempfile.TemporaryDirectory(prefix="clgen_corpus_", dir = FLAGS.local_filesystem) as d:
l.logger().info("Unpacking {}...".format(ExpandConfigPath(config.local_tar_archive).name))
start_time = time.time()
if environment.WORLD_RANK == 0:
cmd = [
"tar",
"-xf",
str(ExpandConfigPath(config.local_tar_archive)),
"-C",
d,
]
subprocess.check_call(cmd)
distrib.barrier()
l.logger().info(
"Unpacked {} in {} ms".format(
ExpandConfigPath(config.local_tar_archive).name,
humanize.intcomma(int((time.time() - start_time) * 1000)),
)
)
yield pathlib.Path(d)
elif config.HasField("bq_database"):
input_bq = pathlib.Path(ExpandConfigPath(config.bq_database))
if environment.WORLD_SIZE > 1:
target_bq = self.replicated_path.parent / "bq_database_replica_{}.db".format(environment.WORLD_RANK)
if not target_bq.exists():
shutil.copy(input_bq, target_bq)
yield target_bq
else:
yield input_bq
else:
raise NotImplementedError
@property
def size(self) -> int:
"""Return the total number of files in the pre-processed corpus.
This excludes contentfiles which did not pre-process successfully.
"""
with self.Session() as session:
return (
session.query(PreprocessedContentFile)
.filter(PreprocessedContentFile.preprocessing_succeeded == True)
.count()
)
@property
def input_size(self) -> int:
"""Return the total number of files in the pre-processed corpus.
This *includes* contentfiles which did not pre-process successfully.
"""
with self.Session() as session:
return session.query(PreprocessedContentFile).count()
@property
def char_count(self) -> int:
"""Get the total number of characters in the pre-processed corpus.
This excludes contentfiles which did not pre-process successfully.
"""
with self.Session() as session:
return (
session.query(func.sum(PreprocessedContentFile.charcount))
.filter(PreprocessedContentFile.preprocessing_succeeded == True)
.scalar()
)
@property
def line_count(self) -> int:
"""Get the total number of lines in the pre-processed corpus.
This excludes contentfiles which did not pre-process successfully.
"""
with self.Session() as session:
return (
session.query(func.sum(PreprocessedContentFile.linecount))
.filter(PreprocessedContentFile.preprocessing_succeeded == True)
.scalar()
)
@property
def input_char_count(self) -> int:
"""Get the total number of characters in the input content files."""
with self.Session() as session:
return session.query(
func.sum(PreprocessedContentFile.input_charcount)
).scalar()
@property
def input_line_count(self) -> int:
"""Get the total number of characters in the input content files."""
with self.Session() as session:
return session.query(
func.sum(PreprocessedContentFile.input_linecount)
).scalar()
def GetImportRelpaths(
self, contentfile_root: pathlib.Path
) -> typing.List[str]:
"""Get relative paths to all files in the content files directory.
Args:
contentfile_root: The root of the content files directory.
Returns:
A list of paths relative to the content files root.
Raises:
EmptyCorpusException: If the content files directory is empty.
"""
find_output = []
queue = [contentfile_root]
cpus = os.cpu_count()
multi_thr = min(cpus**2, 1600)
while queue:
if len(queue) >= multi_thr:
break
cur = queue.pop(0)
try:
for f in cur.iterdir():
if f.is_symlink():
continue
elif f.is_file():
if f.suffix in {'.c', '.cl'}:
find_output.append(str(f))
elif f.is_dir():
queue.append(f)
else:
continue
except PermissionError:
pass
except NotADirectoryError:
pass
except FileNotFoundError:
pass
except OSError:
pass
if queue:
p = multiprocessing.Pool(cpus)
for batch in p.imap_unordered(path_worker, queue):
find_output += batch
return find_output
def RechunkDB(self) -> None:
"""
When encoder is initialized after pre-processed DB has been created,
re-chunk preprocessed DB to replicas.
"""
## Get all data from main DB.
with self.Session() as session:
query = session.query(PreprocessedContentFile).filter(
PreprocessedContentFile.preprocessing_succeeded == True,
)
total = query.count()
data = query.all()
## Calculate chunk size
chunk_size = total // environment.WORLD_SIZE
for db_idx in tqdm.tqdm(range(environment.WORLD_SIZE), total = environment.WORLD_SIZE, desc = "Re-chunk DBs"):
replica = PreprocessedContentFiles(
url = "sqlite:///{}".format(self.replicated_path.parent / "preprocessed_{}.db".format(str(db_idx))),
must_exist = False,
is_replica = True
)
## If replica already is already done, skip.
with replica.Session() as s:
if self.IsDone(s):
continue
with replica.Session(commit = True) as s:
lb = chunk_size * db_idx
ub = chunk_size * (db_idx + 1) if environment.WORLD_RANK < environment.WORLD_SIZE - 1 else total
for dp in data[lb: ub]:
s.add(PreprocessedContentFile.FromPreprocessedContentFile(dp))
s.commit()
self.SetDone(s)
return
def path_worker(base_path) -> typing.List[str]:
paths = []
queue = [base_path]
while queue:
cur = queue.pop(0)
try:
for f in cur.iterdir():
if f.is_symlink():
continue
elif f.is_file():
if f.suffix in {'.c', '.cl'}:
paths.append(str(f))
elif f.is_dir():
queue.append(f)
else:
continue
except PermissionError:
pass
except NotADirectoryError:
pass
except FileNotFoundError:
pass
except OSError:
pass
return paths
def ExpandConfigPath(path: str) -> pathlib.Path:
return pathlib.Path(os.path.expandvars(path)).expanduser().absolute()
def GetFileSha256(path: pathlib.Path) -> str:
with open(path, "rb") as f:
return hashlib.sha256(f.read()).hexdigest()
def merge_db(dbs: typing.List[PreprocessedContentFiles], out_db: typing.List[PreprocessedContentFiles]) -> None:
"""
Collect data from a list of preprocessed databases and merge them.
"""
for db in dbs:
l.logger().info("Loading {}...".format(db.url))
pkey = out_db.input_size
with db.Session() as ses:
data = ses.query(PreprocessedContentFile).all()
with out_db.Session() as ses:
bar = tqdm.tqdm(total = len(data), desc = "DB Merging", leave = True)
for df in data:
ses.add(PreprocessedContentFile.FromPreprocessedContentFile(df, idx = pkey + df.id))
bar.update(1)
ses.commit()
with out_db.Session() as ses:
out_db.SetDone(ses)
ses.commit()
return
def compiling_text_to_huggingface_json(db_path: str, json_out: str) -> None:
"""
Converts preprocessed.db into json file with compiling samples
that can be read by huggingface Datasets.
"""
out_data = []
p = pathlib.Path(db_path).resolve()
out_p = pathlib.Path(json_out).resolve()
if not p.exists():
raise FileNotFoundError("{} does not exist!".format(db_path))
db = PreprocessedContentFiles(url = "sqlite:///{}".format(str(p)), must_exist = True)
with db.Session() as s:
data = [x for x in s.query(PreprocessedContentFile).all() if x.preprocessing_succeeded == True]
with open(str(out_p), 'w') as outf:
for dp in data:
json_el = json.dumps({
'file_name' : "{}.cl".format(dp.sha256),
'github_id' : str(dp.sha256),
'id' : dp.id,
'license' : "mit",
'path' : dp.input_relpath,
'repo_and_filename' : "",
'repo_name' : "",
'signature' : "",
'size' : dp.charcount,
'text' : dp.text
})
outf.write(json_el)
outf.write('\n')
return
def initMain(*args, **kwargs):
"""
Setup module's operations.
"""
l.initLogger(name = "bigQuery_database")
if not FLAGS.preprocessed_databases:
raise ValueError("Please input preprocessed databases to merge as a comma separated list.")
db_paths = [pathlib.Path(p).absolute() for p in FLAGS.preprocessed_databases.replace(" ", "").split(",")]
for p in db_paths:
if not p.exists():
raise FileNotFoundError(p)
dbs = [PreprocessedContentFiles(url = "sqlite:///{}".format(str(p)), must_exist = True) for p in db_paths]
if not FLAGS.merged_preprocessed_database:
raise ValueError("You must set a path for merged_preprocessed_database")
out_db_path = pathlib.Path(FLAGS.merged_preprocessed_database).resolve()
out_db_path.parent.mkdir(exist_ok = True, parents = True)
out_db = PreprocessedContentFiles(url = "sqlite:///{}".format(str(out_db_path)), must_exist = False)
merge_db(dbs, out_db)
return
if __name__ == "__main__":
app.run(initMain)
sys.exit(0)
| 31,556 | 36.702509 | 179 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/corpuses/structs.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module handling Structs Database.
This database captures and handles the struct/class/union/enum
dependencies found during raw corpus preprocessing.
"""
import datetime
import time
import tempfile
import sqlite3
import pathlib
import typing
import multiprocessing
import hashlib
import tqdm
import sqlalchemy as sql
from sqlalchemy.ext import declarative
from deeplearning.benchpress.preprocessors import clang
from deeplearning.benchpress.preprocessors import c
from deeplearning.benchpress.github import bigQuery_database as bqdb
from deeplearning.benchpress.util import environment
from deeplearning.benchpress.util import sqlutil
from deeplearning.benchpress.util import distrib
from deeplearning.benchpress.util import crypto
from deeplearning.benchpress.util import logging as l
from absl import app, flags
FLAGS = flags.FLAGS
flags.DEFINE_string(
"datatypes_bq_db",
None,
"Set path for BQ database to parse datatypes."
)
flags.DEFINE_string(
"datatypes_db",
None,
"Set path for output datatypes database."
)
Base = declarative.declarative_base()
class Data(Base):
__tablename__ = "data"
"""
DB Table holding struct meta-data and stats.
"""
key : str = sql.Column(sql.String(1024), primary_key=True)
results : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
class Struct(Base, sqlutil.ProtoBackedMixin):
"""
A database row representing a parsed struct.
"""
__tablename__ = "structs"
# entry id.
id : int = sql.Column(sql.Integer, primary_key = True)
# Relative path of original bq entry.
input_relpath : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# Sha256 of input bq entry
input_sha256 : str = sql.Column(sql.String(64), nullable = False)
# unique, indexable content has of struct.
sha256 : str = sql.Column(sql.String(64), nullable = False, index = True)
# Struct contents.
contents : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# Struct name.
name : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# Struct fields.
fields : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# Number of fields a struct has.
num_fields : int = sql.Column(sql.Integer, nullable = False)
# Flag indicating if compilation works on this struct.
preprocessing_succeeded : int = sql.Column(sql.Integer, nullable = False)
# Repo name where struct was found.
repo_name : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# Repo ref.
ref : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# Wall time
wall_time_ms : int = sql.Column(sql.Integer, nullable = False)
# Date
date_added : datetime.datetime = sql.Column(sql.DateTime, nullable=False)
@classmethod
def FromArgs(cls,
id : int,
contents : str,
num_fields : int,
repo_name : str,
ref : str,
) -> 'Struct':
return Struct
class DatatypeDB(sqlutil.Database):
"""A database directory of C/OpenCL composite types and functions."""
def __init__(self, url: str, must_exist: bool = False):
super(DatatypeDB, self).__init__(url, Base, must_exist = must_exist)
return
def FromBQ(entry: bqdb.bqMainFile):
start_time = time.time()
try:
structs = entry.content
preprocessors_ = [
c.StripIncludes,
c.ClangPreprocess,
c.ExtractStructs,
]
for p in preprocessors_:
try:
structs = p(structs)
except ValueError:
return []
except Exception as e:
raise("Unexpected exception: {}".format(e))
structs_code = []
for struct in structs:
try:
_ = c.Compile(' '.join(struct['text']))
structs_code.append(
(True,
struct)
)
except ValueError as e:
structs_code.append(
(False,
struct)
)
end_time = time.time()
preprocess_time_ms = int((end_time - start_time) * 1000)
return [ Struct(
input_relpath = "main_files/{}".format(entry.id),
input_sha256 = entry.id,
sha256 = hashlib.sha256(''.join(struct['text']).encode("utf-8")).hexdigest(),
contents = c.ClangFormat(' '.join(struct['text'])),
name = struct['name'],
fields = '\n'.join([','.join(field) for field in struct['fields']]),
num_fields = len(struct['fields']),
preprocessing_succeeded = success,
repo_name = entry.repo_name,
ref = entry.ref,
wall_time_ms = preprocess_time_ms,
date_added = datetime.datetime.utcnow(),
) for (success, struct) in structs_code]
def CollectStructsBQ(db, session):
total = db.mainfile_count # Total number of files in BQ database.
total_per_node = total // environment.WORLD_SIZE # In distributed nodes, this is the total files to be processed per node.
if total == 0:
raise ValueError("Input BQ database {} is empty!".format(contentfile_root))
# Set of IDs that have been completed.
done = set(
[x[0].replace("main_files/", "") for x in session.query(Struct.input_relpath)]
)
chunk, idx = min(total_per_node, 100000), environment.WORLD_RANK * total_per_node
limit = (environment.WORLD_RANK + 1) * total_per_node + (total % total_per_node if environment.WORLD_RANK == environment.WORLD_SIZE - 1 else 0)
if environment.WORLD_SIZE > 1:
bar = distrib.ProgressBar(total = total, offset = idx, decs = "Preprocessing DB")
else:
bar = tqdm.tqdm(total = total, desc = "Preprocessing DB", leave = True)
last_commit = time.time()
wall_time_start = time.time()
while idx < limit:
try:
chunk = min(chunk, limit - idx)
batch = db.main_files_batch(chunk, idx, exclude_id = done)
idx += chunk - len(batch) # This difference will be the number of already done files.
flush_queue = set()
pool = multiprocessing.Pool()
for structs_list in pool.imap_unordered(FromBQ, batch):
for struct in structs_list:
wall_time_end = time.time()
struct.wall_time_ms = int(
(wall_time_end - wall_time_start) * 1000
)
wall_time_start = wall_time_end
exists = session.query(Struct).filter(Struct.sha256 == struct.sha256).first()
if not exists and struct.sha256 not in flush_queue:
session.add(struct)
flush_queue.add(struct.sha256)
if wall_time_end - last_commit > 100:
session.commit()
flush_queue = set()
last_commit = wall_time_end
idx += 1
bar.update(idx - bar.n)
pool.close()
except KeyboardInterrupt as e:
pool.terminate()
raise e
except Exception as e:
l.logger().error(e, ddp_nodes = True)
pool.terminate()
raise e
session.commit()
flush_queue = set()
if environment.WORLD_SIZE > 1:
bar.finalize(idx)
def main(*args, **kwargs):
try:
tdir = FLAGS.local_filesystem
except Exception:
tdir = None
if FLAGS.datatypes_bq_db is None:
raise ValueError("Set path for BQ database.")
if FLAGS.datatypes_db is None:
raise ValueError("Set path for output datatypes database.")
with tempfile.TemporaryDirectory(prefix="locks_", dir = tdir) as d:
distrib.init(str(d))
db = bqdb.bqDatabase("sqlite:///{}".format(str(pathlib.Path(FLAGS.datatypes_bq_db).resolve())), must_exist = True)
structs_db = DatatypeDB("sqlite:///{}".format(str(pathlib.Path(FLAGS.datatypes_db).resolve())), must_exist = False)
with structs_db.Session(commit = True) as session:
CollectStructsBQ(db, session)
return
if __name__ == "__main__":
app.run(main)
exit()
| 8,554 | 33.918367 | 145 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/corpuses/tokenizers.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains the definition of tokenizers.
An tokenizer converts a block of text into a sequence of vocbulary tokens.
"""
import pathlib
import os
import pickle
import typing
import transformers
import json
import multiprocessing
import progressbar
import functools
import numpy as np
from collections import Counter
from absl import flags
from deeplearning.benchpress.util import logging as l
from deeplearning.benchpress.preprocessors import opencl
FLAGS = flags.FLAGS
def FromText(config, contentfile_separator: str, corpus_txt: str):
mask_tokens = False if config.mask_tokens is None else config.mask_tokens
if config.token_type == "character":
if config.token_list:
l.logger().warning("token list in character-based tokenization is going to be ignored.")
return AsciiCharacterTokenizer.FromText(corpus_txt, mask_tokens)
elif config.token_type == "word":
with open(config.token_list, 'r') as f:
token_set = json.load(f)
token_set = set(token_set['opencl']['tokens'])
wpc_tok = False if config.wordpiece_tokenization is None else config.wordpiece_tokenization
return WordTokenizer.FromText(corpus_txt,
token_set,
mask_tokens,
wpc_tok
)
elif config.token_type == "ast":
if config.token_list:
with open(config.token_list, 'r') as f:
token_set = json.load(f)
token_set = set(token_set['opencl']['tokens'])
else:
token_set = set()
return ASTokenizer.FromText(corpus_txt, token_set, contentfile_separator, mask_tokens)
elif config.token_type == "incoder-1b":
return IncoderTokenizer("facebook/incoder-1B")
elif config.token_type == "incoder-6b":
return IncoderTokenizer("facebook/incoder-6B")
else:
raise NotImplementedError
class TokenizerBase(object):
"""The base class for implementing tokenizers."""
@property
def atoms(self) -> typing.List[str]:
"""A list of atoms in the vocabulary."""
return list(sorted(self.vocab.keys()))
@property
def indices(self) -> typing.List[int]:
"""A list of vocabulary indices."""
return list(sorted(self.vocab.values()))
@classmethod
def FromText(cls, text: str) -> "TokenizerBase":
"""Instantiate and specialize an tokenizer from a corpus text.
Args:
text: Text corpus
Returns:
An tokenizer instance.
"""
raise NotImplementedError("abstract class")
@classmethod
def FromFile(cls, path: pathlib.Path) -> "TokenizerBase":
"""Load an tokenizer from file."""
try:
with open(path, "rb") as infile:
return pickle.load(infile)
except ModuleNotFoundError:
l.logger().warn("Outdated path tokenizer found. Will create an alias to unpickle it.")
import sys
import deeplearning
import deeplearning.benchpress
sys.modules['deeplearning.clgen'] = deeplearning.benchpress
with open(path, "rb") as infile:
return pickle.load(infile)
def __init__(self,
vocab: typing.Dict[str, int],
metaTokens: typing.Dict[str, str],
):
"""Instantiate an tokenizer.
Args:
vocab: A dictionary of mappings from character sequences (atoms) into
indices.
metaTokens: A dictionary mapping the metaTokens needed for tokenization.
(Used when masking is selected)
Raises:
TypeError: If vocab is not a dictionary.
ValueError: If the dictionary of mappings includes any duplicate values.
"""
self.vocab = vocab
self.metaTokens = metaTokens
self._UpdateVocabulary()
self.metaTokenValues = set(value for key, value in self.__dict__.items() if key in self.metaTokens)
self.requires_mask = True
def __len__(self):
"""
Intrinsic function to return length of vocab.
"""
return len(self.vocab)
def _UpdateVocabulary(self) -> None:
"""Private method which must be called if vocab is modified."""
if not isinstance(self.vocab, dict):
raise TypeError("vocabulary must be a dict")
# Each atom and index must be unique to ensure deterministic encoding.
if len(set(self.vocab.keys())) != len(self.vocab):
raise ValueError("all atoms must be unique")
if len(set(self.vocab.values())) != len(self.vocab):
raise ValueError("all indices must be unique")
self.vocab_size = len(self.vocab)
self.decoder = {val: key for key, val in self.vocab.items()}
# Set arbitrary object properties for meta tokens.
self.__dict__.update({x: self.vocab[y] for x, y in self.metaTokens.items()})
def ToFile(self, path: pathlib.Path) -> None:
"""Save an tokenizer to file."""
with open(path, "wb") as f:
pickle.dump(self, f)
def TokenizeString(self, text: str) -> np.array:
"""Tokenize a text into an array of vocabulary indices.
Args:
text: Input text.
Returns:
An array of indices into vocabulary for all atoms in text.
Raises:
VocabError: If the input text contains elements not in the vocabulary.
"""
raise NotImplementedError("abstract class")
def AtomizeString(self, text: str) -> typing.List[str]:
"""Split the text into atoms, but do not encode to indices.
Args:
text: Input text.
Returns:
A list of tokens.
"""
indices = self.TokenizeString(text)
return list(map(lambda x: self.decoder[x], indices))
def tokensToString(self,
encoded: np.array,
ignore_token: int = None,
with_formatting: bool = False,
):
"""Translate atomized code back into a string.
Args:
encoded: An nparray of encoded vocabulary indices.
ignore_token: A specific token to ignore from the text string (e.g. exclude pads)
with_formatting: Bool flag used to run clang format on stringified kernel. Used only in AST tokenizer.
Returns:
The decoded text.
Returns string if nparray is one-dimensional.
Else returns list for each extra dimension of strings.
"""
try:
if np.ndim(encoded) > 1:
return [ self.tokensToString(x, ignore_token) for x in encoded ]
elif np.ndim(encoded) == 1:
return "".join(list(map(lambda x: self.decoder[x] if x != ignore_token else '', encoded)))
else:
raise ValueError("Wrong encoded array specified")
except KeyError:
raise KeyError("Out of vocab: {}".format(encoded))
def ArrayToCode(self,
encoded: np.array,
with_formatting: bool = False,
) -> str:
"""
Convert encoded array to compilable code.
Removes meta tokens and converts to string.
Args:
encoded: nparray of encoded vocabulary indices.
Returns:
Code in string format.
"""
return self.tokensToString(
[x for x in encoded if x not in self.metaTokenValues],
with_formatting = with_formatting
)
def StringArrToCode(self,
text: typing.List[str],
with_formatting: bool = False,
) -> str:
"""
Convert string array to compilable code.
Removes meta tokens.
Args:
text: String representation of encoded array. (May contain metaTokens)
with_formatting: Select to run code through clang-format. Only usable in ASTokenizer
Returns:
Code in string format.
"""
mtstr = set(self.metaTokens.values())
return ''.join([x for x in text if x not in mtstr])
def SrcLocationToIndex(self,
encoded: np.array,
locations: typing.List[typing.Tuple[int, int]],
) -> typing.List[int]:
"""
Maps line-column src location to corresponding token of encoded array.
Args:
encoded: np array encoded representation of a kernel.
locations: list of tuples representing line-column locations in str-formatted code.
Returns:
List of indices pointing to mapped tokens in the sequence.
"""
indices = []
str_atoms = [self.tokensToString([token]) for token in encoded]
lidx, cidx = 1, 1
locit = iter(locations)
try:
l, c = next(locit)
except StopIteration:
return indices
for i, token in enumerate(str_atoms):
if token in self.metaTokens.values():
pass
elif token == "\n\n":
lidx += 2
cidx = 1
elif token == "\n":
lidx += 1
cidx = 1
else:
cidx += len(token)
if lidx == l and cidx > c:
indices.append(i)
try:
l, c = next(locit)
except StopIteration:
break
return indices
def __eq__(self, rhs: 'TokenizerBase') -> bool:
return self.vocab == rhs.vocab
class AsciiCharacterTokenizer(TokenizerBase):
"""An tokenizer for character-level syntactic modelling."""
@classmethod
def FromText(cls, text: str, mask_tokens: bool) -> "AsciiCharacterTokenizer":
"""Instantiate and an tokenizer from a corpus text.
Args:
text: Text corpus.
Returns:
An tokenizer instance.
"""
if mask_tokens:
metaTokens = {
'startToken' : '[START]',
'endToken' : '[END]',
'padToken' : '[PAD]',
'maskToken' : '[MASK]',
'holeToken' : '[HOLE]',
'endholeToken' : '[ENDHOLE]',
}
else:
metaTokens = {}
counter = Counter(text)
count_pairs = sorted(counter.items(), key=lambda x: -x[1])
atoms, _ = zip(*count_pairs)
atoms = tuple(metaTokens.values()) + atoms
vocab = dict(zip(atoms, range(len(atoms))))
return AsciiCharacterTokenizer(vocab, metaTokens)
def __repr__(self) -> str:
return f"AsciiCharacterTokenizer[{self.vocab_size} chars]"
def TokenizeString(self, text: str) -> np.array:
"""Tokenize a text into an array of vocabulary indices.
Args:
text: Input text.
Returns:
An array of indices into vocabulary for all atoms in text.
"""
try:
if not self.metaTokens:
return np.array(list(map(lambda x: self.vocab[x], text)), dtype=np.int32)
else:
encoded = []
skipNext = 0
for idx, char in enumerate(text):
if skipNext > 0:
skipNext -= 1
continue
if char == '[':
for meta in self.metaTokens.values():
if text[idx: idx + len(meta)] == meta:
encoded.append(self.vocab[meta])
skipNext = len(meta) - 1
break
if skipNext == 0:
encoded.append(self.vocab[char])
return np.array(encoded, dtype = np.int32)
except KeyError:
raise ValueError("OoV index in string tokenizing.")
class WordTokenizer(TokenizerBase):
"""A greedy tokenizer supports multi-character tokens."""
@classmethod
def FromText(cls,
text: str,
token_list: typing.Set[str],
mask_tokens: bool,
wordpiece: bool,
) -> "WordTokenizer":
"""Instantiate and an tokenizer from a corpus text.
Args:
text: Text corpus
token_list: A list of multi-character token_list.
Returns:
An tokenizer instance.
"""
if not token_list:
raise ValueError("No tokens specified")
if wordpiece:
raise NotImplementedError
if mask_tokens:
metaTokens = {
'startToken' : '[START]',
'endToken' : '[END]',
'padToken' : '[PAD]',
'maskToken' : '[MASK]',
'holeToken' : '[HOLE]',
'endholeToken' : '[ENDHOLE]',
}
else:
metaTokens = {}
# Add meta token_list to token set
for mt in metaTokens.values():
token_list.add(mt)
# Instantiate a greedy tokenizer using the full vocabulary.
full_vocab = dict(zip(token_list, range(len(token_list))))
c = WordTokenizer(full_vocab, metaTokens, determine_chars=True)
# Derive the subset of the vocabulary required to encode the given text.
tokens = [mt for mt in metaTokens.values()] + sorted(list(set(c.AtomizeString(text))))
vocab_subset = dict(zip(tokens, range(len(tokens))))
# Return a new tokenizer using the subset vocabulary.
return WordTokenizer(vocab_subset, metaTokens)
def __init__(self,
vocab: typing.Dict[str, int],
metaTokens: typing.Dict[str, str],
determine_chars = False
):
super(WordTokenizer, self).__init__(vocab, metaTokens)
self.determine_chars = determine_chars
multichars = set(k for k in self.atoms if len(k) > 1)
first_chars = set(a[0] for a in multichars)
self.lookup = dict(
(c, [a for a in multichars if a[0] == c]) for c in first_chars
)
def __repr__(self) -> str:
return f"WordTokenizer[{self.vocab_size} tokens]"
def TokenizeString(self, text: str) -> np.array:
"""Tokenize a text into an array of vocabulary indices.
Args:
text: Input text.
Returns:
An array of indices into vocabulary for all atoms in text.
"""
def _AddToVocab(token: str) -> int:
"""Add a token to the vocabulary and return its index."""
if self.determine_chars and token not in self.vocab:
max_index = max(self.vocab.values())
self.vocab[token] = max_index + 1
return self.vocab[token]
indices = []
i = 0
j = 2
try:
while i < len(text):
if self.lookup.get(text[i]):
if j <= len(text) and any(
x.startswith(text[i:j]) for x in self.lookup[text[i]]
):
j += 1
else:
while j > i + 1:
if any(x == text[i:j] for x in self.lookup[text[i]]):
indices.append(self.vocab[text[i:j]])
i = j
j += 2
break
else:
j -= 1
else:
indices.append(_AddToVocab(text[i]))
i += 1
j += 2
else:
indices.append(_AddToVocab(text[i]))
i += 1
j += 2
except KeyError:
raise ValueError
if self.determine_chars:
self._UpdateVocabulary()
return np.array(indices, dtype=np.int32)
class ASTokenizer(TokenizerBase):
"""A Clang AST tokenizer fully supports language grammar."""
@classmethod
def FromText(cls,
text: str,
token_set: typing.Set[str],
contentfile_separator: str,
mask_tokens: bool,
) -> "ASTokenizer":
"""Instantiate an AST tokenizer from a corpus text.
Args:
text: Text corpus
token_set: Pre-defined grammar keywords of target language.
Returns:
An tokenizer instance.
"""
if mask_tokens:
metaTokens = {
'startToken' : '[START]',
'endToken' : '[END]',
'padToken' : '[PAD]',
'maskToken' : '[MASK]',
'holeToken' : '[HOLE]',
'endholeToken' : '[ENDHOLE]',
}
else:
metaTokens = {}
token_list, source_tokens = set(), {}
chunked_text = text.split(contentfile_separator)
bar = progressbar.ProgressBar(max_value=len(chunked_text))
pool = multiprocessing.Pool()
try:
for tl in bar(pool.imap_unordered(functools.partial(opencl.DeriveSourceVocab, token_list = token_set), chunked_text)):
source_tokens.update(tl)
pool.close()
except Exception as e:
pool.terminate()
raise e
except KeyboardInterrupt as e:
pool.terminate()
raise e
# source_tokens = opencl.DeriveSourceVocab(text, token_set)
for mt in metaTokens.values():
source_tokens[mt] = ''
token_list.add(mt)
token_list.update(source_tokens.keys())
# Create full vocab and initialize AST tokenizer.
full_vocab = dict(zip(token_list, range(len(token_list))))
return ASTokenizer(full_vocab, metaTokens, source_tokens)
def __init__(self,
vocab: typing.Dict[str, int],
metaTokens: typing.Dict[str, str],
token_del: typing.Dict[str, str],
):
super(ASTokenizer, self).__init__(vocab, metaTokens)
self.decoder_with_delim = {
self.vocab[k]: "{}{}".format(k.replace('-char-based', '').replace('\\', '\\\\'), v)
for k, v in token_del.items()
}
"""
A little legend...
self.vocab : raw_string -> encoded_value. e.g. 0-char-based: 1243
self.decoder : encoded_value -> raw_string. e.g. 1243: 0-char-based
token_del : raw_string -> delimiter. e.g. 0-char-based: ''
self.decoder_with_delim : encoded_value -> pretty_string. e.g. 1243: '0'
"""
return
def ManualTokenizeString(self, text: str) -> np.array:
"""Tokenize a text into an array of vocabulary indices.
!!! This is a manual parser, which is now deprecated.
Use regular TokenizeString below.
Args:
text: Input text.
Returns:
An array of indices into vocabulary for all atoms in text.
"""
indices = []
l_idx, r_idx = 0, 1
inside_string, inside_char = False, False
cb_del = "-char-based"
while l_idx < len(text):
# for idx, ch in enumerate(text):
if text[l_idx] == '"' and not inside_char:# and (l_idx == 0 or text[l_idx - 1] != '\\'):
inside_string ^= True
try:
indices.append(self.vocab["{}{}".format(text[l_idx], cb_del)])
except KeyError:
raise ValueError("-{}- char out of vocabulary.".format(text[l_idx]))
l_idx += 1
elif text[l_idx] == "'" and not inside_string:# and (l_idx == 0 or text[l_idx - 1] != '\\'):
inside_char ^= True
try:
indices.append(self.vocab["{}{}".format(text[l_idx], cb_del)])
except KeyError:
raise ValueError("-{}- char out of vocabulary.".format(text[l_idx]))
l_idx += 1
elif text[l_idx] == '\n':
if inside_string or inside_char:
indices.append(self.vocab["n-char-based"])
l_idx += 1
elif text[l_idx] == '\t':
if inside_string or inside_char:
indices.append(self.vocab["t-char-based"])
l_idx += 1
elif text[l_idx] == ' ' or text[l_idx] == '\\':
if inside_string or inside_char:
try:
indices.append(self.vocab["{}{}".format(text[l_idx], cb_del)])
except KeyError:
raise ValueError("-{}- char out of vocabulary.".format(text[l_idx]))
l_idx += 1
else:
r_idx = l_idx
while not inside_char and not inside_string and r_idx < len(text) and text[r_idx] not in {' ', '\n', '(', ')', '{', '}', '[', ']', ';'}:
# Some basic tokens that mean we can't have a unified token from l_idx->r_idx.
# Also, no word-tokenization in string literals.
r_idx += 1
while r_idx > l_idx and text[l_idx:r_idx+1] not in self.vocab:
r_idx -= 1
if r_idx == l_idx:
# Char-based vs word-based has to be evaluated.
if (inside_char or inside_string) or text[l_idx] not in self.vocab:
# That is definitely a string literal or there just not is a word entry in vocab.
try:
indices.append(self.vocab["{}{}".format(text[l_idx], cb_del)])
except KeyError:
raise ValueError("Inside string but out of vocab: -{}-\n{}".format(text[l_idx], text))
elif ("{}{}".format(text[l_idx], cb_del) not in self.vocab) or r_idx + 1 >= len(text):
# End of file for some reason, or just there is no char-based entry.
try:
indices.append(self.vocab[text[l_idx]])
except KeyError:
raise ValueError("End of file, out of vocab: -{}-".format(text[l_idx]))
elif text[l_idx].isalnum():
# Char-based special chars can only be in strings.
# Any other char-based token found must be alphanumerical.
if text[l_idx+1].isalnum():
try:
indices.append(self.vocab["{}{}".format(text[l_idx], cb_del)])
except KeyError:
raise ValueError("Alnum out of vocab: -{}-".format(text[l_idx]))
# print("This should def be a char-based.Why ? If current is char and next is char and should be word, why did you end up rejecting the curr+1 ?")
elif text[l_idx - 1].isalnum() and text[l_idx] == 'e' and text[l_idx + 1] == '-':
try:
indices.append(self.vocab["{}{}".format(text[l_idx], cb_del)])
indices.append(self.vocab["{}{}".format(text[l_idx+1], cb_del)])
l_idx += 1
except KeyError:
raise ValueError("Floating exponent out of vocab: -{}-{}-".format(text[l_idx], text[l_idx+1]))
elif text[l_idx+1] == '(' or text[l_idx+1] == '_':
try:
indices.append(self.vocab["{}{}".format(text[l_idx], cb_del)])
except KeyError:
raise ValueError("Alnum, next is '(' but is out of vocab: -{}-".format(text[l_idx]))
else:
try:
indices.append(self.vocab[text[l_idx]])
except KeyError:
raise ValueError("Single-alnum word based out of vocab: -{}-".format(text[l_idx]))
else:
try:
indices.append(self.vocab[text[l_idx]])
except KeyError:
raise ValueError("Single special char word-based out of vocab: -{}-".format(text[l_idx]))
l_idx += 1
else:
if r_idx + 1 >= len(text):
try:
indices.append(self.vocab[text[l_idx:r_idx+1]])
except KeyError:
raise ValueError("String word in EOF not in vocab: -{}-".format(text[l_idx:r_idx+1]))
elif (not text[r_idx].isalnum() and text[r_idx] != '_') or (not text[r_idx+1].isalnum() and text[r_idx+1] != '_'):
try:
indices.append(self.vocab[text[l_idx:r_idx+1]])
except KeyError:
raise ValueError("String word not in vocab: -{}-".format(text[l_idx:r_idx+1]))
else:
# a) we have space, next is alphanumerical or underscore
# This is to catch a function call named intgetter() or int_getter().
while r_idx + 1 < len(text) and text[r_idx+1].isalnum():
r_idx += 1
for i in range(l_idx, r_idx + 1):
try:
indices.append(self.vocab["{}{}".format(text[i], cb_del)])
except KeyError:
raise ValueError("Extended word {} to char-based letters out of vocab: -{}-".format(text[l_idx:r_idx+1], text[i]))
l_idx = r_idx + 1
return np.array(indices, dtype=np.int32)
def TokenizeString(self, text: str) -> np.array:
"""Tokenize a text into an array of vocabulary indices.
Args:
text: Input text.
Returns:
An array of indices into vocabulary for all atoms in text.
"""
return np.array([self.vocab[t] for t in self.AtomizeString(text)], dtype=np.int32)
def AtomizeString(self, text: str) -> typing.List[str]:
"""Split the text into atoms, but do not encode to indices.
Args:
text: Input text.
Returns:
A list of tokens.
Raises:
ValueError: When a string atom does not belong in the vocabulary.
"""
try:
return [self.decoder[self.vocab[t]] for t in opencl.AtomizeSource(text, set(self.vocab.keys()))]
except KeyError:
raise ValueError("String index out of vocabulary: \n{}".format(text))
def tokensToString(self,
encoded: np.array,
ignore_token: int = None,
with_formatting: bool = False,
):
"""Translate atomized code back into a string.
Args:
encoded: An nparray of encoded vocabulary indices.
ignore_token: A specific token to ignore from the text string (e.g. exclude pads)
Returns:
The decoded text.
Returns string if nparray is one-dimensional.
Else returns list for each extra dimension of strings.
"""
try:
if np.ndim(encoded) > 1:
return [ self.tokensToString(x, ignore_token = ignore_token, with_formatting = with_formatting)
for x in encoded ]
elif np.ndim(encoded) == 1:
if not with_formatting:
src = []
for idx in range(len(encoded)):
if encoded[idx] == ignore_token:
continue
else:
ct = self.decoder_with_delim[encoded[idx]]
try:
if (encoded[idx] in {self.vocab['e-char-based'], self.vocab['E-char-based']}
and encoded[idx+1] in {self.vocab['+'], self.vocab['-']}):
src.append(ct + " ")
else:
src.append(ct)
except IndexError:
src.append(ct)
src = "".join(src)
else:
src = "".join(list(map(lambda x: self.decoder_with_delim[x] if x != ignore_token else '', encoded)))
try:
src = opencl.ClangFormat(src)
except ValueError:
pass
return src
else:
raise ValueError("Wrong encoded array specified")
except KeyError:
raise KeyError("Out of vocab: {}".format(encoded))
def StringArrToCode(self,
text: typing.List[str],
with_formatting: bool = False,
) -> str:
"""
Convert string array to compilable code.
Removes meta tokens.
Args:
text: String representation of encoded array. (May contain metaTokens)
with_formatting: Select to run code through clang-format. Only usable in ASTokenizer
Returns:
Code in string format.
"""
mtstr = set(self.metaTokens.values())
if with_formatting:
return opencl.ClangFormat(''.join([self.decoder_with_delim[self.vocab[x]] for x in text if x not in mtstr]))
else:
return ''.join([self.decoder_with_delim[self.vocab[x]] for x in text if x not in mtstr])
def SrcLocationToIndex(self,
encoded: np.array,
locations: typing.List[typing.Tuple[int, int]],
) -> typing.List[int]:
raise NotImplementedError("TODO")
class FeatureTokenizer(TokenizerBase):
"""
A numerical value tokenizer used to represent
integer numerical values of Grewe, InstCount and Autophase Features.
"""
@classmethod
def FromArgs(cls,
singular_threshold : int,
max_value : int,
threshold_range : int,
) -> "FeatureTokenizer":
"""Instantiate an AST tokenizer from a corpus text.
Args:
feature corpus:
A corpus of all features for all different feature spaces.
Each key holds a list of vectors.
singular_threshold:
This threshold is config-defined and defines how many int values will be
1-1 represented in the tokenizer as tokens. After this threshold,
next values will be included in ranges. This prevents the vocabulary from
exploding when some cornercases have a value of 30k or similar.
exponential threshold:
Choose the upper bound of feature values that will be represented.
threshold_range:
After surpassing singular_threshold, feature values are groupped in 'threshold_range' windows.
Returns:
An tokenizer instance.
"""
metaTokens = {
'padToken' : '[PAD]',
}
token_list = [str(x) for x in range(singular_threshold)]
lb, rb = singular_threshold, singular_threshold + threshold_range
while rb < max_value:
token_list.append("[{}->{}]".format(lb, rb))
lb, rb = rb, rb + threshold_range
token_list.append("[{}->inf]".format(lb))
token_list += list(metaTokens.values())
# Create full vocab and initialize Feature Tokenizer.
vocab = dict(zip(token_list, range(len(token_list))))
return FeatureTokenizer(vocab, metaTokens, singular_threshold, max_value, threshold_range)
def __init__(self,
vocab: typing.Dict[str, int],
metaTokens: typing.Dict[str, str],
st : int,
max_val : int,
th_range : int,
):
super(FeatureTokenizer, self).__init__(vocab, metaTokens)
self.singular_threshold = st
self.max_value = max_val
self.threshold_range = th_range
return
def TokenizeFeature(self, value: int) -> int:
if value < self.singular_threshold:
return self.vocab[str(value)]
else:
lb, rb = self.singular_threshold, self.singular_threshold + self.threshold_range
while rb < self.max_value:
# token_list.append("[{}->{}]".format(lb, rb))
if value >= lb and value < rb:
return self.vocab["[{}->{}]".format(lb, rb)]
lb, rb = rb, rb + self.threshold_range
return self.vocab["[{}->inf]".format(lb)]
def TokenizeFeatureVector(self, fv: typing.Dict[str, float], fspace: str, seq_len: int) -> np.array:
"""
Sort feature space keys, exclude derivative feature and float values
and return np array of encoded feature tensor.
"""
f_len = {
"GreweFeatures": 6,
"AutophaseFeatures": 56,
"InstCountFeatures": 70,
}
assert seq_len > sum(list(f_len.values())), "Feature sequence length is not large enough to fit concatenation of feature spaces: {}.".format(sum(list(f_len.values())))
pad_len = seq_len - sum(list(f_len.values()))
fv = sorted([[x, y] for x, y in fv.items()], key = lambda x: x[0])
vals = [self.TokenizeFeature(int(x)) for n, x in fv if fspace != "GreweFeatures" or n not in {"F2:coalesced/mem", "F4:comp/mem"}]
if fspace == "GreweFeatures":
lp = []
rp = [self.padToken] * (f_len["AutophaseFeatures"] + f_len["InstCountFeatures"] + pad_len)
elif fspace == "AutophaseFeatures":
lp = [self.padToken] * f_len["GreweFeatures"]
rp = [self.padToken] * (f_len["InstCountFeatures"] + pad_len)
elif fspace == "InstCountFeatures":
lp = [self.padToken] * (f_len["GreweFeatures"] + f_len["AutophaseFeatures"])
rp = [self.padToken] * pad_len
encoded = np.array(lp + vals + rp)
assert len(encoded) == seq_len, "Encoded length mismatch with sequence length: {}/{}".format(len(encoded), seq_len)
return encoded
def tokensToString(self,
encoded: np.array,
ignore_token: int = None,
with_formatting: bool = False,
):
"""Translate atomized features back into a string.
Args:
encoded: An nparray of encoded vocabulary indices.
ignore_token: A specific token to ignore from the text string (e.g. exclude pads)
with_formatting: Bool flag used to run clang format on stringified kernel. Used only in AST tokenizer.
Returns:
The decoded text.
Returns string if nparray is one-dimensional.
Else returns list for each extra dimension of strings.
"""
try:
if np.ndim(encoded) > 1:
return [ self.tokensToString(x, ignore_token) for x in encoded ]
elif np.ndim(encoded) == 1:
return ",".join(list(map(lambda x: self.decoder[x] if x != ignore_token else '', encoded)))
else:
raise ValueError("Wrong encoded array specified")
except KeyError:
raise KeyError("Out of vocab: {}".format(encoded))
def TokenizeString(self, text: str) -> np.array:
raise TypeError("Operation not supported for FeatureTokenizer")
def AtomizeString(self, text: str) -> typing.List[str]:
raise TypeError("Operation not supported for FeatureTokenizer")
def ArrayToCode(self,
encoded: np.array,
with_formatting: bool = False,
) -> str:
raise TypeError("Operation not supported for FeatureTokenizer")
def StringArrToCode(self,
text: typing.List[str],
with_formatting: bool = False,
) -> str:
raise TypeError("Operation not supported for FeatureTokenizer")
def SrcLocationToIndex(self,
encoded: np.array,
locations: typing.List[typing.Tuple[int, int]],
) -> typing.List[int]:
raise TypeError("Operation not supported for FeatureTokenizer")
class IncoderTokenizer(TokenizerBase):
"""
Wrapper representation of Incoder's huggingface tokenizer.
"""
def __init__(self, incoder: str):
self._tokenizer = transformers.AutoTokenizer.from_pretrained(incoder)
self.vocab_size = self._tokenizer.vocab_size
self.vocab = self._tokenizer.vocab
self.decoder = {value: key for key, value in self.vocab.items()}
self.startToken = self._tokenizer.convert_tokens_to_ids("<|endoftext|>")
self.endToken = self._tokenizer.convert_tokens_to_ids("<|mask:0|>")
self.padToken = 1 # self._tokenizer.convert_tokens_to_ids("<|endoftext|>")
self.holeToken = self._tokenizer.convert_tokens_to_ids("<|mask:0|>")
self.maskToken = self._tokenizer.convert_tokens_to_ids("<|mask:0|>")
self.endholeToken = self._tokenizer.convert_tokens_to_ids("<|endofmask|>")
self.requires_mask = False
return
def get_hf_tokenizer(self) -> 'transformers.AutoTokenizer':
"""
Getter for Hugging-Face AutoTokenizer.
"""
return self._tokenizer
def tokensToString(self, encoded: np.array, ignore_token: int = None, **unused_kwargs) -> str:
return self._tokenizer.decode([x for x in encoded if x != ignore_token])
def ArrayToCode(self, encoded: np.array, **unused_kwargs) -> str:
return self.tokensToString([x for x in encoded if x != self.padToken])
def TokenizeString(self, text: str) -> np.array:
return [self._tokenizer.convert_tokens_to_ids(x) for x in self.AtomizeString(text)]
def AtomizeString(self, text: str) -> typing.List[str]:
return self._tokenizer.tokenize(text)
| 35,032 | 35.379024 | 171 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/corpuses/encoded.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas and Chris Cummins.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file defines a database for encoded content files."""
import datetime
import functools
import multiprocessing
import pickle
import os
import time
import shutil
import typing
import pathlib
import glob
import numpy as np
import tqdm
import sqlalchemy as sql
from sqlalchemy.ext import declarative
from sqlalchemy.sql import func
from deeplearning.benchpress.corpuses import tokenizers
from deeplearning.benchpress.corpuses import preprocessed
from deeplearning.benchpress.proto import internal_pb2
from deeplearning.benchpress.samplers import samples_database
from deeplearning.benchpress.preprocessors import opencl
from deeplearning.benchpress.util import monitors
from deeplearning.benchpress.util import environment
from deeplearning.benchpress.util import distrib
from deeplearning.benchpress.features import extractor
from absl import app, flags
import humanize
from deeplearning.benchpress.util import sqlutil
from deeplearning.benchpress.util import logging as l
FLAGS = flags.FLAGS
Base = declarative.declarative_base()
flags.DEFINE_boolean(
"override_encoding",
False,
"Set to override incomplete encoding. Does not set DB value to 'done'"
)
flags.DEFINE_string(
"encoded_databases",
None,
"Comma-separated list of paths for input encoded databases."
)
flags.DEFINE_string(
"merged_encoded_database",
None,
"Path for merged output encoded database"
)
class Meta(Base):
"""Meta table for encoded content files database."""
__tablename__ = "meta"
key: str = sql.Column(sql.String(1024), primary_key = True)
value: str = sql.Column(sql.String(1024), nullable = False)
class EncodedContentFileStats(Base):
"""Stats table for encoded content files."""
__tablename__ = "encoded_contentfiles_stats"
# Total number of files.
file_count : int = sql.Column(sql.Integer, primary_key = True)
# Average feature vector of contentfiles.
corpus_features : str = sql.Column(sql.String(1024), nullable = False)
# Token length distribution of contentfiles.
corpus_lengths : str = sql.Column(sql.String(1024), nullable = False)
class EncodedContentFile(Base):
"""A single encoded content file."""
__tablename__ = "encoded_contentfiles"
# The ID of the PreprocessedContentFile.
id: int = sql.Column(sql.Integer, primary_key=True)
# We store the vocabulary indices array as a string of period-separated
# integers, e.g. '0.1.2.0.1'. To access the values as an array of integers,
# use EncodedContentFile.indices_array.
data: str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable=False)
# Number of tokens in sequence
tokencount: int = sql.Column(sql.Integer, nullable=False)
# Sequence features extracted.
feature_vector: str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# The number of milliseconds encoding took.
encoding_time_ms: int = sql.Column(sql.Integer, nullable=False)
# Encoding is parallelizable, so the actual wall time of encoding may be much
# less than the sum of all encoding_time_ms. This column counts the effective
# number of "real" milliseconds during encoding between the last encoded
# result and this result coming in. The idea is that summing this column
# provides an accurate total of the actual time spent encoding an entire
# corpus. Will be <= encoding_time_ms.
wall_time_ms: int = sql.Column(sql.Integer, nullable=False)
date_added: datetime.datetime = sql.Column(sql.DateTime, nullable=False)
@staticmethod
def DataStringToNumpyArray(data: str) -> np.ndarray:
"""Convert the 'data' string to a numpy array."""
return np.array([int(x) for x in data.split(".")], dtype=np.int32)
@staticmethod
def NumpyArrayToDataString(array: np.ndarray) -> str:
"""Convert the 'data' string to a numpy array."""
return ".".join(str(x) for x in array)
@property
def indices_array(self) -> np.ndarray:
"""The numpy array of the encoded data."""
return self.DataStringToNumpyArray(self.data)
@property
def features(self) -> typing.Dict[str, float]:
return extractor.RawToDictFeats(self.feature_vector)
@classmethod
def FromEncodedContentFile(
cls,
encoded_file: "EncodedContentFile",
idx: int = None,
) -> "EncodedContentFile":
"""
Replicate EncodedContentFile
"""
return EncodedContentFile(
id = idx if idx is not None else encoded_file.id,
data = encoded_file.data,
tokencount = encoded_file.tokencount,
feature_vector = encoded_file.feature_vector,
encoding_time_ms = encoded_file.encoding_time_ms,
wall_time_ms = encoded_file.wall_time_ms,
date_added = encoded_file.date_added,
)
@classmethod
def FromPreprocessed(
cls,
preprocessed_cf: preprocessed.PreprocessedContentFile,
tokenizer: tokenizers.TokenizerBase,
eof: str,
pre_train: bool,
) -> "EncodedContentFile":
"""Instantiate an EncodedContentFile from a preprocessed file.
Args:
preprocessed_cf: A PreprocessedContentFile instance.
tokenizer: The tokenizer to encode using.
eof: An end-of-file marker which is concatenated to the encoded sequence.
Returns:
An EncodedContentFile instance.
"""
start_time = time.time()
try:
data = tokenizer.TokenizeString(preprocessed_cf.text)
except ValueError as e:
l.logger().warn(e, ddp_nodes=True)
return None
####
# TODO kernel analytics
# encoded_length = len(data)
# token_values = data.sorted()
####
encoding_time_ms = int((time.time() - start_time) * 1000)
try:
feature_vector = extractor.ExtractRawFeatures(preprocessed_cf.text)
except Exception as e:
raise e
return EncodedContentFile(
id = preprocessed_cf.id,
# Encode the end-of-file marker separately to ensure that it resolves to
# the correct token. For example if the vocabulary contains 'a', 'b',
# and 'ab', then a content file 'a' with EOF marker 'b' would be encoded
# as 'ab', instead of 'a'+'b'.
data = cls.NumpyArrayToDataString(
np.concatenate((data, tokenizer.TokenizeString(eof)))
),
tokencount = len(data),
feature_vector = feature_vector,
encoding_time_ms = encoding_time_ms,
wall_time_ms = encoding_time_ms, # The outer-loop may change this.
date_added = datetime.datetime.utcnow(),
)
def EncoderWorker(
job: internal_pb2.EncoderWorker,
tokenizer,
contentfile_separator,
is_pre_train,
) -> typing.Optional[EncodedContentFile]:
"""Encode a single content file."""
# TODO(cec): There is a bug in the tokenizer creation logic such that the
# derived tokenizer is not always capable of encoding the preprocessed files.
# Once this has been fixed, there is no need to catch the VocabError here,
# and EncoderWorker can always return an EncodedContentFile instance.
try:
return EncodedContentFile.FromPreprocessed(
preprocessed.PreprocessedContentFile(id=job.id, text=job.text),
tokenizer,
contentfile_separator,
is_pre_train,
)
except Exception as e:
raise e
class EncodedContentFiles(sqlutil.Database):
"""A database of encoded pre-processed contentfiles."""
def __init__(self, url: str, is_pre_train: bool = False, must_exist: bool = False, is_replica = False):
self.is_pre_train = is_pre_train
if environment.WORLD_RANK == 0 or is_replica:
encoded_path = pathlib.Path(url.replace("sqlite:///", "")).parent
self.length_monitor = monitors.CumulativeHistMonitor(encoded_path, "encoded_kernel_length")
# if not self.is_pre_train:
self.token_monitor = monitors.NormalizedFrequencyMonitor(encoded_path, "token_distribution")
self.feature_monitors = {ftype: monitors.CategoricalDistribMonitor(encoded_path, "{}_distribution".format(ftype)) for ftype in extractor.extractors.keys()}
super(EncodedContentFiles, self).__init__(url, Base, must_exist=must_exist)
if environment.WORLD_SIZE > 1 and not is_replica:
# Conduct engine connections to replicated preprocessed chunks.
self.base_path = pathlib.Path(url.replace("sqlite:///", "")).resolve().parent
hash_id = self.base_path.name
try:
tdir = pathlib.Path(FLAGS.local_filesystem).resolve() / hash_id / "node_encoded"
except Exception:
tdir = pathlib.Path("/tmp").resolve() / hash_id / "node_encoded"
try:
tdir.mkdir(parents = True, exist_ok = True)
except Exception:
pass
self.replicated_path = tdir / "encoded_{}.db".format(environment.WORLD_RANK)
self.replicated = EncodedContentFiles(
url = "sqlite:///{}".format(str(self.replicated_path)),
is_pre_train = is_pre_train,
must_exist = must_exist,
is_replica = True
)
self.length_monitor = self.replicated.length_monitor
if not self.is_pre_train:
self.token_monitor = self.replicated.token_monitor
self.feature_monitors = self.replicated.feature_monitors
distrib.barrier()
return
def Create(
self,
p: preprocessed.PreprocessedContentFiles,
tokenizer: tokenizers.TokenizerBase,
contentfile_separator: str,
) -> bool:
"""Populate the encoded contentfiles database.
Args:
p: A PreprocessedContentFiles database.
tokenizer: An TokenizerBase instance.
contentfile_separator: The contentfile separator.
Returns:
True if work was done, else False.
Raises:
EmptyCorpusException: If the PreprocessedContentFiles database has
no files.
"""
if environment.WORLD_SIZE > 1:
if environment.WORLD_RANK == 0:
with self.get_session() as session:
status = self.IsDone(session)
_ = distrib.broadcast(str(status))
if status:
return
else:
status = distrib.broadcast()
if status == "True":
return
if status != "False":
raise OSError("Broken distributed message: '{}'".format(status))
sessmaker = self.Session if environment.WORLD_SIZE == 1 else self.replicated.Session
with sessmaker() as session:
if not self.IsDone(session):
self.Import(session, p, tokenizer, contentfile_separator)
self.SetStats(session)
self.SetDone(session)
session.commit()
if environment.WORLD_SIZE > 1:
self.MergeReplicas(p)
# Logging output.
# num_files = session.query(EncodedContentFile).count()
# token_count, total_walltime, total_time, = session.query(
# func.sum(EncodedContentFile.tokencount),
# func.sum(EncodedContentFile.wall_time_ms),
# func.sum(EncodedContentFile.encoding_time_ms),
# ).first()
# l.logger().info("Encoded {} files in {} ms ({:.2f}x speedup)"
# .format(
# humanize.intcomma(num_files),
# humanize.intcomma(total_walltime),
# total_time / total_walltime,
# ), mail_level = 4
# )
# l.logger().info("Encoded corpus: {} tokens, {} files."
# .format(
# humanize.intcomma(token_count),
# humanize.intcomma(num_files),
# ), mail_level = 4
# )
return
@property
def get_session(self):
"""
get proper DB session.
"""
if environment.WORLD_SIZE == 1 or environment.WORLD_RANK == 0:
return self.Session
else:
return self.replicated.Session
@property
def size(self):
"""Return the total number of files in the encoded corpus."""
with self.get_session() as session:
if session.query(EncodedContentFileStats).first():
stats = session.query(EncodedContentFileStats).first()
return stats.file_count
else:
l.logger().warn("Stats table not found. Inserting stats...")
self.SetStats(session)
return session.query(EncodedContentFileStats).first().file_count
@property
def token_count(self) -> int:
"""Return the total number of tokens in the encoded corpus.
This excludes the EOF markers which are appended to each encoded text.
"""
with self.get_session() as session:
return session.query(func.sum(EncodedContentFile.tokencount)).scalar()
def IsDone(self, session: sqlutil.Session):
if session.query(Meta).filter(Meta.key == "done").first():
return True
elif FLAGS.override_encoding:
l.logger().warn("Overriding incomplete encoded DB.")
return True
else:
return False
def SetDone(self, session: sqlutil.Session):
session.add(Meta(key="done", value="yes"))
def SetStats(self, session: sqlutil.Session) -> None:
"""Write corpus stats to DB"""
file_count = session.query(EncodedContentFile.id).count()
if not self.is_pre_train:
corpus_features = '\n\n'.join([ftype + ":\n" + mon.getStrData() for ftype, mon in self.feature_monitors.items()])
else:
corpus_features = ""
corpus_lengths = self.length_monitor.getStrData()
if session.query(EncodedContentFileStats).first():
stats = session.query(EncodedContentFileStats).first()
stats.file_count = file_count
stats.corpus_features = corpus_features
stats.corpus_lengths = corpus_lengths
else:
session.add(
EncodedContentFileStats(
file_count = file_count,
corpus_features = corpus_features,
corpus_lengths = corpus_lengths,
)
)
session.commit()
return
def Import(
self,
session: sqlutil.Session,
preprocessed_db: preprocessed.PreprocessedContentFiles,
tokenizer: tokenizers.TokenizerBase,
contentfile_separator: str,
) -> None:
# if environment.WORLD_RANK == 0:
if environment.WORLD_SIZE > 1:
preprocessed_db = preprocessed_db.replicated
with preprocessed_db.Session() as p_session:
query = p_session.query(preprocessed.PreprocessedContentFile).filter(
preprocessed.PreprocessedContentFile.preprocessing_succeeded == True,
)
done = set([int(x.id) for x in session.query(EncodedContentFile).all()])
total_jobs = query.count() # - len(done)
l.logger().info("Encoding {} of {} preprocessed files"
.format(
humanize.intcomma(total_jobs),
humanize.intcomma(
p_session.query(preprocessed.PreprocessedContentFile)
.filter(preprocessed.PreprocessedContentFile.preprocessing_succeeded == True)
.count()
)
)
)
chunk, idx = 2000000, 0
if environment.WORLD_SIZE > 1:
bar = distrib.ProgressBar(total = total_jobs, offset = idx, desc = "Encoding DB")
else:
bar = tqdm.tqdm(total = total_jobs, desc = "Encoding DB", leave = True)
wall_time_start = time.time()
while idx < total_jobs:
try:
if done:
batch = []
for f in query.limit(chunk).offset(idx).all():
if f.id not in done:
batch.append(f)
else:
idx += 1
# done.remove(f.id)
else:
batch = query.limit(chunk).offset(idx).all()
pool = multiprocessing.Pool()
last_commit = time.time()
for encoded_cf in pool.imap_unordered(
functools.partial(EncoderWorker,
tokenizer = tokenizer,
contentfile_separator = contentfile_separator,
is_pre_train = self.is_pre_train,
),
batch
):
wall_time_end = time.time()
if encoded_cf:
encoded_cf.wall_time_ms = int(
(wall_time_end - wall_time_start) * 1000
)
session.add(encoded_cf)
self.length_monitor.register(encoded_cf.tokencount)
# if not self.is_pre_train:
self.token_monitor.register([tokenizer.decoder[int(x)] for x in encoded_cf.data.split('.')])
dict_features = extractor.RawToDictFeats(encoded_cf.feature_vector)
if dict_features:
for key, value in dict_features.items():
self.feature_monitors[key].register(value)
wall_time_start = wall_time_end
if wall_time_end - last_commit > 1000:
session.commit()
last_commit = wall_time_end
idx += 1
bar.update(idx - bar.n)
pool.close()
except KeyboardInterrupt as e:
pool.terminate()
self.length_monitor.plot()
# if not self.is_pre_train:
self.token_monitor.plot()
for m in self.feature_monitors.values():
m.plot()
raise e
except Exception as e:
l.logger().error(e, ddp_nodes = True)
pool.terminate()
self.length_monitor.plot()
# if not self.is_pre_train:
self.token_monitor.plot()
for m in self.feature_monitors.values():
m.plot()
raise e
self.length_monitor.plot()
# if not self.is_pre_train:
self.token_monitor.plot()
for m in self.feature_monitors.values():
m.plot()
session.commit()
if environment.WORLD_SIZE > 1:
bar.finalize(idx)
return
def MergeReplicas(self, preprocessed_db: preprocessed.PreprocessedContentFiles) -> None:
"""
When distributed nodes work for the same encoded DB
this function moves finalized encoded chunks back into the AFS
and master node merges them into the final encodeddb
"""
shutil.copy(
self.replicated_path, self.base_path / "encoded_{}.db".format(environment.WORLD_RANK)
)
distrib.barrier()
if environment.WORLD_RANK == 0:
db_chunks = glob.glob(str(self.base_path / "encoded_*.db"))
dbs = [EncodedContentFiles(url = "sqlite:///{}".format(p), must_exist = True, is_replica = True) for p in db_chunks]
merge_db(dbs, self)
for p in db_chunks:
os.remove(p)
# Cleanup the local mess inside the local temp filesystem.
if (self.replicated_path).exists():
os.remove(self.replicated_path)
else:
l.logger().warn("I didn't find my local encoded DB at {}".format(self.replicated_path), ddp_nodes = True)
if preprocessed_db.replicated_path.exists():
os.remove(preprocessed_db.replicated_path)
else:
l.logger().warn("I didn't find my local preprocessed DB at {}".format(preprocessed_db.replicated_path), ddp_nodes = True)
distrib.barrier()
return
@staticmethod
def GetVocabFromMetaTable(session) -> typing.Dict[str, int]:
"""Read a vocabulary dictionary from the 'Meta' table of a database."""
q = session.query(Meta.value).filter(Meta.key == "vocab_size")
if not q.first():
return {}
vocab_size = int(q.one()[0])
q = session.query(Meta.value)
return {
q.filter(Meta.key == f"vocab_{i}").one()[0]: i for i in range(vocab_size)
}
@staticmethod
def StoreVocabInMetaTable(
session: sqlutil.Session, vocabulary: typing.Dict[str, int]
) -> None:
"""Store a vocabulary dictionary in the 'Meta' table of a database."""
q = session.query(encoded.Meta).filter(encoded.Meta.key.like("vocab_%"))
q.delete(synchronize_session=False)
session.add(encoded.Meta(key="vocab_size", value=str(len(vocabulary))))
session.add_all(
[encoded.Meta(key=f"vocab_{v}", value=k) for k, v in vocabulary.items()]
)
def get_data(self, sequence_length: int = None) -> typing.List[np.array]:
"""
Get the indices array of encoded contentfiles.
"""
with self.get_session() as session:
if sequence_length:
return [x.indices_array for x in session.query(EncodedContentFile).filter(EncodedContentFile.tokencount <= sequence_length).all()]
else:
return [x.indices_array for x in session.query(EncodedContentFile).all()]
def get_features(self, sequence_length: int = None) -> typing.List[str]:
"""
Get feature vectors of training instances within the specified sequence length.
"""
with self.get_session() as session:
if sequence_length:
return [x.feature_vector for x in session.query(EncodedContentFile).filter(EncodedContentFile.tokencount <= sequence_length).all()]
else:
return [x.feature_vector for x in session.query(EncodedContentFile).all()]
def get_data_features(self, tokenizer, sequence_length: int = None) -> typing.List[typing.Tuple[str, str]]:
"""
Collect list of source with features
"""
with self.get_session() as session:
if sequence_length:
return [(tokenizer.ArrayToCode(x.indices_array, with_formatting = False), x.feature_vector) for x in session.query(EncodedContentFile).filter(EncodedContentFile.tokencount <= sequence_length).all()]
else:
return [(tokenizer.ArrayToCode(x.indices_array, with_formatting = False), x.feature_vector) for x in session.query(EncodedContentFile).all()]
def merge_db(dbs: typing.List[EncodedContentFiles], out_db: typing.List[EncodedContentFiles]) -> None:
"""
Collect data from a list of preprocessed databases and merge them.
"""
for db in dbs:
l.logger().info("Loading {}...".format(db.url))
chunk, idx = 2000000, 0
bar = tqdm.trange(db.size, desc = "Encoded DB merge", leave = True)
pkey = out_db.size
while idx < db.size:
with db.Session() as ses:
data = ses.query(EncodedContentFile).limit(chunk).offset(idx).all()
with out_db.Session() as ses:
for df in data:
f = EncodedContentFile.FromEncodedContentFile(df, idx = pkey + idx)
ses.add(f)
idx += 1
bar.update(idx)
ses.commit()
with out_db.Session() as ses:
out_db.SetDone(ses)
ses.commit()
## TODO: Merge File stats.
return
def ContentHash_worker(contentfile: EncodedContentFile, tokenizer) -> typing.Tuple[str, EncodedContentFile]:
"""
Return new contentfile along with content hash of code.
"""
try:
return opencl.ContentHash(tokenizer.ArrayToCode(contentfile.indices_array, with_formatting = False)), contentfile
except Exception as e:
l.logger().warn(e)
return None
def to_unique_samples(db: EncodedContentFiles, out_db: EncodedContentFiles, tokenizer) -> None:
"""
Read input database, pass through deterministic re-writer and keep only unique samples.
"""
pool = multiprocessing.Pool()
visited = set()
data = []
f = functools.partial(ContentHash_worker, tokenizer = tokenizer)
with db.Session() as s:
inp_data = [x for x in s.query(EncodedContentFile).all()]
try:
for sha, cfile in tqdm.tqdm(pool.imap_unordered(f, inp_data), total = len(inp_data), desc = "Unique-fy encoded database"):
if sha not in visited:
visited.add(sha)
data.append(cfile)
except Exception as e:
l.logger().error(e)
pool.terminate()
raise e
pool.close()
with out_db.Session() as s:
idx = 0
for dp in tqdm.tqdm(data, total = len(data), desc = "Adding to DB"):
new_dp = EncodedContentFile.FromEncodedContentFile(dp, idx = idx)
idx += 1
s.add(new_dp)
s.commit()
return
def initMain(*args, **kwargs):
"""
Setup module's operations.
"""
l.initLogger(name = "bigQuery_database")
if not FLAGS.encoded_databases:
raise ValueError("Please input encoded databases to merge as a comma separated list.")
db_paths = [pathlib.Path(p).absolute() for p in FLAGS.encoded_databases.replace(" ", "").split(",")]
for p in db_paths:
if not p.exists():
raise FileNotFoundError(p)
dbs = [EncodedContentFiles(url = "sqlite:///{}".format(str(p)), must_exist = True) for p in db_paths]
if not FLAGS.merged_encoded_database:
raise ValueError("You must set a path for merged_encoded_database")
out_db_path = pathlib.Path(FLAGS.merged_encoded_database).resolve()
out_db_path.parent.mkdir(exist_ok = True, parents = True)
out_db = EncodedContentFiles(url = "sqlite:///{}".format(str(out_db_path)), must_exist = False)
# merge_db(dbs, out_db)
tokenizer_path = pathlib.Path(FLAGS.tokenizer_path).resolve()
if not tokenizer_path.exists():
raise FileNotFoundError(tokenizer_path)
tokenizer = tokenizers.TokenizerBase.FromFile(tokenizer_path)
to_unique_samples(dbs[0], out_db, tokenizer)
return
if __name__ == "__main__":
app.run(initMain)
sys.exit(0)
| 25,602 | 36.376642 | 206 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/models/sequence_masking.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Core algorithm of sequence masking"""
import sys
import typing
import copy
import humanize
import pickle
import numpy as np
import progressbar
from deeplearning.benchpress.util import distributions
from deeplearning.benchpress.util.tf import tf
from deeplearning.benchpress.util import logging as l
class tfSequence(typing.NamedTuple):
"""
Tuple representation of a single MaskLM Instance.
This is not batch! generateTfDataset applies native batching,
so this class represents a single instance!
"""
seen_in_training : np.int32
original_input : np.array
input_ids : np.array
input_mask : np.array
masked_lm_positions : np.array
masked_lm_ids : np.array
masked_lm_weights : np.array
masked_lm_lengths : np.array
next_sentence_label : np.int32
@staticmethod
def tfTypes():
return (tf.int32, tf.int32, tf.int32, tf.int32, tf.int32, tf.int32, tf.float32, tf.int32, tf.int32)
@staticmethod
def npTypes():
return (np.int32, np.int32, np.int32, np.int32, np.int32, np.int32, np.float32, np.int32, np.int32)
@staticmethod
def tfShapes(batch_size, sequence_length, max_position_embeddings = None):
return (tf.TensorShape([batch_size, 1]),
tf.TensorShape([batch_size, sequence_length]),
tf.TensorShape([batch_size, sequence_length]),
tf.TensorShape([batch_size, sequence_length]),
tf.TensorShape([batch_size, max_position_embeddings]),
tf.TensorShape([batch_size, max_position_embeddings]),
tf.TensorShape([batch_size, max_position_embeddings]),
tf.TensorShape([batch_size, max_position_embeddings]),
tf.TensorShape([batch_size, 1]),
)
## Tuple representation of mask id/position/hole_length for easy sorting
class MaskedLmInstance():
def __init__(self,
pos_index: int,
token_id: int,
hole_length: int,
extend_left: bool,
):
self.pos_index = pos_index
self.token_id = token_id
self.hole_length = hole_length
self.extend_left = extend_left
def MPHoleSequence(seq: np.array,
train_set: bool,
max_predictions: int,
pickled_distribution: distributions.Distribution,
pickled_tokenizer,
training_opts,
is_torch: bool,
repair_locations: typing.List[int] = None,
) -> typing.Tuple[
typing.Union[typing.Dict[str, np.array], tfSequence],
typing.List[MaskedLmInstance],
]:
"""
Inserts hole tokens to a given sequence.
If repair_locations is set, then algorithm places holes over syntactic errors
for the model to repair them. Default is None, where hole-d indices are randomly
selected.
This function is compatible for multiprocessing. There is an optimized single-core
version below.
"""
assert seq.ndim == 1, "Input for masking must be single-dimension array."
# Unpack tokenizer and sampler
distribution = pickle.loads(pickled_distribution)
tokenizer = pickle.loads(pickled_tokenizer)
use_start_end = True if seq[0] == tokenizer.startToken else False
# Actual length represents the sequence length before pad begins
if use_start_end:
actual_length = np.where(seq == tokenizer.endToken)[0][0]
last_elem = actual_length
elif tokenizer.padToken in seq:
actual_length = np.where(seq == tokenizer.padToken)[0][0]
last_elem = actual_length - 1
else:
actual_length = len(seq)
last_elem = actual_length - 1
# total tokens to add in holes.
# No more than max_predictions_per_seq (or otherwise specified), no less than actual seq length x the probability of hiding a token
holes_to_predict = min(max_predictions,
max(1, int(round(actual_length * training_opts.masked_lm_prob))))
extend_left = True if np.random.RandomState().randint(0, 2) == 1 else False
input_ids = list(np.copy(seq))
# List of (seq_idx, token_id, hole_length) tuples
masked_lms = []
# Offset array. Indices represent elements in the initial array (seq)
# Values of indices represent current offset position in processed array (input_ids).
offset_idxs = np.zeros(len(seq), dtype = np.int64)
# Set with all candidate_indexes that have been holed.
visited_indices = set()
# Total masks placed so far.
total_predictions = 0
while total_predictions < holes_to_predict:
if repair_locations:
pos_index = repair_locations[np.random.RandomState().randint(0, len(repair_locations))]
else:
pos_index = np.random.RandomState().randint(0, actual_length) # Fixed seed doesn't work!
assert pos_index < len(seq), "Candidate index is out of bounds: {} >= {}".format(pos_index, len(seq))
# Element in processed array can be found in its original index +/- offset
input_id_idx = pos_index + offset_idxs[pos_index]
if total_predictions >= holes_to_predict:
break
elif pos_index in visited_indices:
# Do not target an index, already holed
continue
elif input_id_idx > len(seq):
# Do not mask a part of input_ids that is going to be cropped.
continue
elif input_ids[input_id_idx] in {tokenizer.startToken, tokenizer.endToken}:
# Do not target [START] or [END] token
continue
assert (input_ids[input_id_idx] == seq[pos_index],
"Original and offset-ted sequence have misaligned tokens: {}, {}"
.format(seq[pos_index], input_ids[input_id_idx]))
# Sampled number from distribution to represent the actual hole length
hole_length = distribution.sample(actual_length)
# Increase hole length a little bit, if too many empty holes have pushed rightmost elements
# over the edge.
while last_elem + offset_idxs[last_elem] + 1 - hole_length >= len(seq):
hole_length += 1
# Inside range, make sure hole length does not run over input_id_idx bounds
# This may be redundant given the next for loop
if extend_left:
hole_length = min(hole_length, input_id_idx)
else:
hole_length = min(hole_length, (last_elem + offset_idxs[last_elem]) - input_id_idx)
# Confirm there is no conflict with another hole, further down the sequence.
for i in range(hole_length):
if extend_left:
if (input_ids[input_id_idx - i] == tokenizer.holeToken
or input_ids[input_id_idx - i] == tokenizer.startToken
or input_ids[input_id_idx - i] == tokenizer.endToken
# or input_id_idx - i == 0
):
hole_length = i
break
else:
if (input_ids[input_id_idx + i] == tokenizer.holeToken
or input_ids[input_id_idx + i] == tokenizer.startToken
or input_ids[input_id_idx + i] == tokenizer.endToken
# or input_id_idx + i == len(input_ids)
):
hole_length = i
break
if offset_idxs[last_elem] + 1 - hole_length >= len(seq):
# This hole can't help but explode the sequence. Go find a new position.
continue
assert hole_length >= 0, "hole length is negative: {}".format(hole_length)
pos_index -= hole_length - 1 if hole_length != 0 and extend_left else 0
input_id_idx = pos_index + offset_idxs[pos_index]
# Target token for classifier is either the first token of the hole, or endholeToken if hole is empty
target = input_ids[input_id_idx] if hole_length > 0 else tokenizer.endholeToken
input_ids = input_ids[:input_id_idx] + [tokenizer.holeToken] + input_ids[input_id_idx + hole_length:]
# Store position index, and after making all masks, update with updated offset array
masked_lms.append(MaskedLmInstance(
pos_index = pos_index, token_id = target, hole_length = hole_length, extend_left = extend_left
)
)
# Adjust the offset of all affected tokens, from pos_index and after.
offset_idxs[pos_index + 1:] += 1 - hole_length
total_predictions += max(1, hole_length)
visited_indices.update(range(pos_index, pos_index + hole_length))
hole_analytics = copy.deepcopy(masked_lms)
# Now update the entries with offset index.
for lm in masked_lms:
prev_index = lm.pos_index
lm.pos_index = lm.pos_index + offset_idxs[lm.pos_index]
assert input_ids[lm.pos_index] == tokenizer.holeToken, "{}".format(lm.hole_length)
while len(input_ids) < len(seq):
input_ids.append(tokenizer.padToken)
masked_lms = sorted(masked_lms, key=lambda x: x.pos_index)
input_mask = np.ones(len(seq), dtype = np.int64)
if tokenizer.padToken in input_ids:
first_pad_index = input_ids.index(tokenizer.padToken)
input_mask[first_pad_index:] = 0
# Check that the pad index is likely correct.
assert input_ids[first_pad_index] == tokenizer.padToken, "{}".format(input_ids)
assert input_ids[first_pad_index - 1] != tokenizer.padToken
"""
Related to next_sentence_labels: Fix it to 0 for now, as no next_sentence prediction
is intended on kernels. In any other case, check bert's create_instances_from_document
to see how next_sentence_labels are calculated.
Setting this to 0 means that next sentence is NOT random.
Note that if next_sentence prediction is to be embedded, [SEP] token has to be added.
"""
if len(masked_lms) == 0:
l.logger().warn("No HOLE added to datapoint. Increase probability of hole occuring.")
if is_torch:
seen_in_training = np.int64([1] if train_set else [0])
next_sentence_labels = np.int64([0])
masked_lm_lengths = np.full(holes_to_predict, -1, dtype = np.int64)
mask_labels = np.full(len(seq), -100, dtype = np.int64)
ind = 0
for p in masked_lms:
if p.pos_index < len(seq):
mask_labels[p.pos_index] = p.token_id
masked_lm_lengths[ind] = p.hole_length
ind += 1
return {
'seen_in_training' : seen_in_training,
'original_input' : seq,
'input_ids' : np.asarray(input_ids[:len(seq)], dtype = np.int64),
'input_mask' : input_mask,
'position_ids' : np.arange(len(seq), dtype = np.int64),
'mask_labels' : mask_labels,
'masked_lm_lengths' : masked_lm_lengths,
'next_sentence_labels': next_sentence_labels,
}, hole_analytics
else: # TF 1.X, 2.[0-2]
seen_in_training = np.int32(1 if train_set else 0)
next_sentence_label = np.int32(0)
masked_lm_positions, masked_lm_ids, masked_lm_weights, masked_lm_lengths = [], [], [], []
for p in masked_lms:
if p.pos_index < len(seq):
"""
Adding holes can increase or decrease the length of the original sequence.
It is important in the end, to end up with an input sequence compatible
with the model's sequence length, i.e. len(seq). If any mask is found
beyond that point, will have to be rejected.
"""
masked_lm_positions.append(p.pos_index)
masked_lm_ids.append(p.token_id)
masked_lm_weights.append(1.0)
masked_lm_lengths.append(p.hole_length)
num_holes = len(masked_lm_positions)
while len(masked_lm_positions) < training_opts.max_predictions_per_seq:
masked_lm_positions.append(0)
masked_lm_ids.append(tokenizer.padToken)
masked_lm_weights.append(0.0)
masked_lm_lengths.append(-1)
assert (input_ids[:len(seq)].count(tokenizer.holeToken) == num_holes,
"Number of targets {} does not correspond to hole number in final input sequence: {}"
.format(num_holes, input_ids[:len(seq)].count(tokenizer.holeToken))
)
return tfSequence(seen_in_training, seq,
np.asarray(input_ids[:len(seq)]), input_mask,
np.asarray(masked_lm_positions), np.asarray(masked_lm_ids),
np.asarray(masked_lm_weights), np.asarray(masked_lm_lengths),
next_sentence_label
), hole_analytics
def MPMaskSequence(seq: np.array,
train_set: bool,
max_predictions: int,
pickled_tokenizer,
training_opts,
config,
is_torch: bool,
) -> typing.Dict:
"""
Inserts masks to a given sequence.
This function is compatible for multiprocessing. There is an optimized single-core
version below.
"""
assert seq.ndim == 1, "Input for masking must be single-dimension array."
## Tuple representation of mask id/position for easy sorting
class MaskedLmInstance(typing.NamedTuple):
pos_index: int
token_id: int
# Unpack tokenizer
tokenizer = pickle.loads(pickled_tokenizer)
use_start_end = True if seq[0] == tokenizer.startToken else False
# Actual length represents the sequence length before pad begins
if use_start_end:
actual_length = np.where(seq == tokenizer.endToken)[0][0]
elif tokenizer.padToken in seq:
actual_length = np.where(seq == tokenizer.padToken)[0][0]
else:
actual_length = len(seq)
candidate_indexes = np.arange(actual_length)
np.random.RandomState().shuffle(candidate_indexes)
masks_to_predict = min(max_predictions,
max(1, int(round(actual_length * training_opts.masked_lm_prob))))
input_ids = list(np.copy(seq))
masked_lms = []
for pos_index in candidate_indexes:
if len(masked_lms) >= masks_to_predict:
break
if config.mask.random_placed_mask:
# 80% of the time, replace with [MASK]
if np.random.RandomState().random() < 0.8:
input_ids[pos_index] = tokenizer.maskToken
else:
# 10% of the time, keep original
if np.random.RandomState().random() < 0.5:
pass
# 10% of the time, replace with random word
else:
random_token = np.random.RandomState().randint(0, tokenizer.vocab_size)
while any(tokenizer.vocab[t] == random_token for (idx, t) in tokenizer.metaTokens.items()):
random_token = np.random.RandomState().randint(0, tokenizer.vocab_size)
input_ids[pos_index] = np.random.RandomState().randint(0, tokenizer.vocab_size)
else:
if np.random.RandomState().random() < 0.8:
input_ids[pos_index] = tokenizer.maskToken
masked_lms.append(MaskedLmInstance(pos_index=pos_index, token_id=seq[pos_index]))
assert len(masked_lms) <= masks_to_predict
masked_lms = sorted(masked_lms, key=lambda x: x.pos_index)
input_mask = np.ones(len(seq), dtype = np.int64)
if tokenizer.padToken in input_ids:
input_mask[input_ids.index(tokenizer.padToken):] = 0
## Related to next_sentence_labels: Fix it to 0 for now, as no next_sentence prediction
## is intended on kernels. In any other case, check bert's create_instances_from_document
## to see how next_sentence_labels are calculated.
## Setting this to 0 means that next sentence is NOT random.
## Note that if next_sentence prediction is to be embedded, [SEP] token has to be added.
if is_torch:
seen_in_training = np.int64([1] if train_set else [0])
next_sentence_labels = np.int64([0])
masked_lm_lengths = np.full(masks_to_predict, -1, dtype = np.int64)
mask_labels = np.full(len(seq), -100, dtype = np.int64)
ind = 0
for p in masked_lms:
if p.pos_index < len(seq):
mask_labels[p.pos_index] = p.token_id
masked_lm_lengths[ind] = 1
ind += 1
return ({
'seen_in_training' : seen_in_training,
'original_input' : seq,
'input_ids' : np.asarray(input_ids[:len(seq)], dtype = np.int64),
'input_mask' : input_mask,
'position_ids' : np.arange(len(seq), dtype = np.int64),
'mask_labels' : mask_labels,
'masked_lm_lengths' : masked_lm_lengths,
'next_sentence_labels': next_sentence_labels,
}, [])
else: # TF 1.X, 2.[0-2]
masked_lm_positions, masked_lm_ids, masked_lm_weights, masked_lm_lengths = [], [], [], []
seen_in_training = np.int32(1 if train_set else 0)
next_sentence_label = np.int32(0)
for p in masked_lms:
masked_lm_positions.append(p.pos_index)
masked_lm_ids.append(p.token_id)
masked_lm_weights.append(1.0)
masked_lm_lengths.append(1)
while len(masked_lm_positions) < training_opts.max_predictions_per_seq:
masked_lm_positions.append(0)
masked_lm_ids.append(tokenizer.padToken)
masked_lm_weights.append(0.0)
masked_lm_lengths.append(-1)
return tfSequence(seen_in_training, seq,
np.asarray(input_ids), input_mask,
np.asarray(masked_lm_positions), np.asarray(masked_lm_ids),
np.asarray(masked_lm_weights), np.asarray(masked_lm_lengths),
next_sentence_label
), [], []
def HoleSequence(seq: np.array,
train_set: bool,
max_predictions: int,
masked_lm_prob: int,
distribution: distributions.Distribution,
tokenizer,
) -> typing.Dict[str, np.array]:
"""
Inserts hole tokens to a given sequence.
Used for online training.
"""
use_start_end = True if seq[0] == tokenizer.startToken else False
# Actual length represents the sequence length before pad begins
if use_start_end:
actual_length = np.where(seq == tokenizer.endToken)[0][0]
last_elem = actual_length
elif tokenizer.padToken in seq:
actual_length = np.where(seq == tokenizer.padToken)[0][0]
last_elem = actual_length - 1
else:
actual_length = len(seq)
last_elem = actual_length - 1
# total tokens to add in holes.
# No more than max_predictions_per_seq (or otherwise specified), no less than actual seq length x the probability of hiding a token
holes_to_predict = min(max_predictions,
max(1, int(round(actual_length * masked_lm_prob))))
extend_left = True if np.random.RandomState().randint(0, 2) == 1 else False
input_ids = list(np.copy(seq))
# List of (seq_idx, token_id, hole_length) tuples
masked_lms = []
# Offset array. Indices represent elements in the initial array (seq)
# Values of indices represent current offset position in processed array (input_ids).
offset_idxs = np.zeros(len(seq), dtype = np.int64)
# Set with all candidate_indexes that have been holed.
visited_indices = set()
# Total masks placed so far.
total_predictions = 0
while total_predictions < holes_to_predict:
try:
pos_index = np.random.RandomState().randint(0, actual_length) # Fixed seed doesn't work!
except ValueError as e:
l.logger().error(actual_length)
l.logger().error(tokenizer.tokensToString(seq))
raise e
# Element in processed array can be found in its original index +/- offset
input_id_idx = pos_index + offset_idxs[pos_index]
if total_predictions >= holes_to_predict:
break
elif pos_index in visited_indices:
# Do not target an index, already holed
continue
elif input_id_idx > len(seq):
# Do not mask a part of input_ids that is going to be cropped.
continue
elif input_ids[input_id_idx] in {tokenizer.startToken, tokenizer.endToken}:
# Do not target [START] or [END] token
continue
# Sampled number from distribution to represent the actual hole length
hole_length = distribution.sample(actual_length)
# Increase hole length a little bit, if too many empty holes have pushed rightmost elements
# over the edge.
while last_elem + offset_idxs[last_elem] + 1 - hole_length >= len(seq):
hole_length += 1
# Inside range, make sure hole length does not run over input_id_idx bounds
# This may be redundant given the next for loop
if extend_left:
hole_length = min(hole_length, input_id_idx)
else:
hole_length = min(hole_length, (last_elem + offset_idxs[last_elem]) - input_id_idx)
# Confirm there is no conflict with another hole, further down the sequence.
for i in range(hole_length):
if extend_left:
if (input_ids[input_id_idx - i] == tokenizer.holeToken
or input_ids[input_id_idx - i] == tokenizer.startToken
or input_ids[input_id_idx - i] == tokenizer.endToken
# or input_id_idx - i == 0
):
hole_length = i
break
else:
if (input_ids[input_id_idx + i] == tokenizer.holeToken
or input_ids[input_id_idx + i] == tokenizer.startToken
or input_ids[input_id_idx + i] == tokenizer.endToken
# or input_id_idx + i == len(input_ids)
):
hole_length = i
break
if offset_idxs[last_elem] + 1 - hole_length >= len(seq):
# This hole can't help but explode the sequence. Go find a new position.
continue
assert hole_length >= 0, "hole length is negative: {}".format(hole_length)
pos_index -= hole_length - 1 if hole_length != 0 and extend_left else 0
input_id_idx = pos_index + offset_idxs[pos_index]
# Target token for classifier is either the first token of the hole, or endholeToken if hole is empty
target = input_ids[input_id_idx] if hole_length > 0 else tokenizer.endholeToken
input_ids = input_ids[:input_id_idx] + [tokenizer.holeToken] + input_ids[input_id_idx + hole_length:]
# Store position index, and after making all masks, update with updated offset array
masked_lms.append(MaskedLmInstance(
pos_index = pos_index, token_id = target, hole_length = hole_length, extend_left = extend_left
)
)
# Adjust the offset of all affected tokens, from pos_index and after.
offset_idxs[pos_index + 1:] += 1 - hole_length
total_predictions += max(1, hole_length)
visited_indices.update(range(pos_index, pos_index + hole_length))
# Now update the entries with offset index.
for lm in masked_lms:
prev_index = lm.pos_index
lm.pos_index = lm.pos_index + offset_idxs[lm.pos_index]
while len(input_ids) < len(seq):
input_ids.append(tokenizer.padToken)
masked_lms = sorted(masked_lms, key=lambda x: x.pos_index)
input_mask = np.ones(len(seq), dtype = np.int64)
if tokenizer.padToken in input_ids:
first_pad_index = input_ids.index(tokenizer.padToken)
input_mask[first_pad_index:] = 0
seen_in_training = np.int64([1] if train_set else [0])
next_sentence_labels = np.int64([0])
masked_lm_lengths = np.full(holes_to_predict, -1, dtype = np.int64)
mask_labels = np.full(len(seq), -100, dtype = np.int64)
ind = 0
for p in masked_lms:
if p.pos_index < len(seq):
mask_labels[p.pos_index] = p.token_id
masked_lm_lengths[ind] = p.hole_length
ind += 1
return {
'seen_in_training' : seen_in_training,
'original_input' : seq,
'input_ids' : np.asarray(input_ids[:len(seq)], dtype = np.int64),
'input_mask' : input_mask,
'position_ids' : np.arange(len(seq), dtype = np.int64),
'mask_labels' : mask_labels,
'masked_lm_lengths' : masked_lm_lengths,
'next_sentence_labels': next_sentence_labels,
}
def HoleSequenceSeqMasks(seq: np.array,
train_set: bool,
max_predictions: int,
masked_lm_prob: int,
distribution: distributions.Distribution,
tokenizer,
) -> typing.Dict[str, np.array]:
"""
Instead of a hole, place left context on the leftmost part,
the right context on the rightmost part and all remaining
stuff are the masks in the middle. When the actual to-predict
sentence.
This is PLDI Reviewer B's idea.
"""
use_start_end = True if seq[0] == tokenizer.startToken else False
# Actual length represents the sequence length before pad begins
if use_start_end:
actual_length = np.where(seq == tokenizer.endToken)[0][0]
last_elem = actual_length
elif tokenizer.padToken in seq:
actual_length = np.where(seq == tokenizer.padToken)[0][0]
last_elem = actual_length - 1
else:
actual_length = len(seq)
last_elem = actual_length - 1
# total tokens to add in holes.
# No more than max_predictions_per_seq (or otherwise specified), no less than actual seq length x the probability of hiding a token
holes_to_predict = min(max_predictions,
max(1, int(round(actual_length * masked_lm_prob))))
assert holes_to_predict == 1, "This mode only supports a single hole."
extend_left = True if np.random.RandomState().randint(0, 2) == 1 else False
input_ids = list(np.copy(seq))
# List of (seq_idx, token_id, hole_length) tuples
masked_lms = []
# Set with all candidate_indexes that have been holed.
visited_indices = set()
# Total masks placed so far.
total_predictions = 0
while total_predictions < holes_to_predict:
pos_index = np.random.RandomState().randint(0, actual_length) # Fixed seed doesn't work!
# Element in processed array can be found in its original index +/- offset
if total_predictions >= holes_to_predict:
break
elif pos_index in visited_indices:
# Do not target an index, already holed
continue
elif input_ids[pos_index] in {tokenizer.startToken, tokenizer.endToken}:
# Do not target [START] or [END] token
continue
# Sampled number from distribution to represent the actual hole length
hole_length = distribution.sample(actual_length)
# Increase hole length a little bit, if too many empty holes have pushed rightmost elements
# over the edge.
while last_elem + 1 - hole_length >= len(seq):
hole_length += 1
# Inside range, make sure hole length does not run over pos_index bounds
# This may be redundant given the next for loop
if extend_left:
hole_length = min(hole_length, pos_index)
else:
hole_length = min(hole_length, last_elem - pos_index)
# Confirm there is no conflict with another hole, further down the sequence.
for i in range(hole_length):
if extend_left:
if (input_ids[pos_index - i] == tokenizer.holeToken
or input_ids[pos_index - i] == tokenizer.startToken
or input_ids[pos_index - i] == tokenizer.endToken
# or pos_index - i == 0
):
hole_length = i
break
else:
if (input_ids[pos_index + i] == tokenizer.holeToken
or input_ids[pos_index + i] == tokenizer.startToken
or input_ids[pos_index + i] == tokenizer.endToken
# or pos_index + i == len(input_ids)
):
hole_length = i
break
if 1 - hole_length >= len(seq):
# This hole can't help but explode the sequence. Go find a new position.
continue
assert hole_length >= 0, "hole length is negative: {}".format(hole_length)
pos_index -= hole_length - 1 if hole_length != 0 and extend_left else 0
# Target token for classifier is either the first token of the hole, or endholeToken if hole is empty
targets = input_ids[pos_index: pos_index + hole_length]
lc = input_ids[:pos_index]
rc = input_ids[pos_index + hole_length:actual_length+1]
pad_len = len(seq) - len(lc) - len(rc) - len(targets)
if pad_len == 0:
if len(rc) > 1:
# input_ids = input_ids[:-2] + [input_ids[-1]]
input_ids = lc + [tokenizer.maskToken]*(len(targets) + pad_len + 1) + rc[:-2] + [rc[-1]]
targets += [tokenizer.endholeToken]
else:
targets[-1] = tokenizer.endholeToken
input_ids = lc + [tokenizer.maskToken]*(len(targets) + pad_len) + rc
else:
input_ids = lc + [tokenizer.maskToken]*(len(targets) + pad_len) + rc
targets += [tokenizer.endholeToken] * pad_len
# Store position index, and after making all masks, update with updated offset array
masked_lms.append(MaskedLmInstance(
pos_index = pos_index, token_id = targets, hole_length = hole_length, extend_left = extend_left
)
)
# Adjust the offset of all affected tokens, from pos_index and after.
total_predictions += max(1, hole_length)
visited_indices.update(range(pos_index, pos_index + hole_length))
assert len(input_ids) == len(seq), "Input sequence and sequence length mismatch: {} / {}, {}".format(len(input_ids), len(seq), tokenizer.tokensToString(input_ids))
assert input_ids[0] == tokenizer.startToken, "{}".format(tokenizer.tokensToString(input_ids[0]))
assert input_ids[-1] == tokenizer.endToken, "{}".format(tokenizer.tokensToString(input_ids[-1]))
# Now update the entries with offset index.
masked_lms = sorted(masked_lms, key=lambda x: x.pos_index)
mask_labels = np.full(len(seq), -100, dtype = np.int64)
for p in masked_lms:
if p.pos_index < len(seq):
for idx, tid in enumerate(p.token_id):
mask_labels[p.pos_index + idx] = tid
return {
'seen_in_training' : np.int64([1] if train_set else [0]),
'original_input' : seq,
'input_ids' : np.asarray(input_ids[:len(seq)], dtype = np.int64),
'input_mask' : np.ones(len(seq), dtype = np.int64),
'position_ids' : np.arange(len(seq), dtype = np.int64),
'mask_labels' : mask_labels,
'masked_lm_lengths' : np.int64([1]),
'next_sentence_labels': np.int64([0]),
}
def MaskedSeqToBlob(enc_text: np.array,
tokenizer,
sequence_length: int,
max_position_embeddings: int,
):
"""
Constructs training/sampling instance from plain input text.
"""
input_sample = enc_text
target_idx = np.where(np.in1d(input_sample, [tokenizer.maskToken, tokenizer.holeToken]))[0]
num_targets = (np.count_nonzero(input_sample == tokenizer.maskToken) +
np.count_nonzero(input_sample == tokenizer.holeToken))
assert np.ndim(input_sample) == 1, "Input samples have to be one-dimensional. {} given.".format(input_sample.shape)
# if tokenizer.requires_mask:
# assert len(target_idx) != 0, "No target prediction in sample text"
seen_in_training = np.zeros([1], dtype = np.int32)
original_input = np.full((1), tokenizer.padToken, dtype = np.int64)
input_ids = np.concatenate([
input_sample, np.array([tokenizer.padToken] * (max_position_embeddings - len(input_sample)), dtype = np.int64)
])[:sequence_length]
input_mask = np.concatenate([
np.ones(len(input_sample), dtype = np.int64),
np.zeros(len(input_ids) - len(input_sample), dtype = np.int64)
])
position_ids = np.arange(sequence_length, dtype = np.int64)
mask_labels = np.full((sequence_length), -100, dtype = np.int64)
masked_lm_lengths = np.full((1), -1, dtype = np.int64)
next_sentence_labels = np.zeros([1], dtype = np.int32)
return {
'seen_in_training' : seen_in_training,
'original_input' : original_input,
'input_ids' : input_ids,
'input_mask' : input_mask,
'position_ids' : position_ids,
'mask_labels' : mask_labels,
'masked_lm_lengths' : masked_lm_lengths,
'next_sentence_labels': next_sentence_labels,
}
def ExhaustiveHoleSequence(all_seq: np.array,
train_set: bool,
# max_predictions: int,
# pickled_distribution: distributions.Distribution,
pickled_tokenizer,
# training_opts,
# is_torch: bool,
# repair_locations: typing.List[int] = None,
) -> typing.Generator:
"""
Placing random holes seems to introduce an overfitting bias on the model.
It doesn't learn a good distribution of what should go in a specific hole
for a given index, a left and a right context. This function may be solving
this, hopefully in a sustainable way.
No holes are placed randomly. Each index produces many holed seq instances;
starting from empty hole up to hiding everything until the end.
Given one sequence, returns a list of instances, one for each hole instances.
!!!WARNING: Currently only supported for PyTorch.
"""
with progressbar.ProgressBar(max_value = len(all_seq)) as bar:
for seq in bar(all_seq):
assert seq.ndim == 1, "Input for masking must be single-dimension array."
# Unpack tokenizer
tokenizer = pickle.loads(pickled_tokenizer)
use_start_end = True if seq[0] == tokenizer.startToken else False
# Actual length represents the sequence length before pad begins
start_idx = 0
if use_start_end:
start_idx = 1
end = np.where(seq == tokenizer.endToken)[0][0]
elif tokenizer.padToken in seq:
end = np.where(seq == tokenizer.padToken)[0][0]
else:
end = len(seq)
st_input_ids = list(seq)
for idx in range(start_idx, end):
for hole_len in range(0, end - idx):
if end + 1 - hole_len >= len(seq):
continue
input_ids = st_input_ids[:idx] + [tokenizer.holeToken] + st_input_ids[idx + hole_len:]
input_ids += [tokenizer.padToken] * (len(seq) - len(input_ids))
input_ids = input_ids[:len(seq)]
mask_labels = np.full(len(seq), -100, dtype = np.int64)
target = seq[idx] if hole_len else tokenizer.endholeToken
mask_labels[ idx if hole_len else idx - 1] = target
mlm_inst = MaskedLmInstance(
pos_index = idx, token_id = target,
hole_length = hole_len, extend_left = False
)
yield ({
'seen_in_training' : np.int64([1] if train_set else [0]),
'original_input' : seq,
'input_ids' : np.asarray(input_ids, dtype = np.int64),
'input_mask' : (seq != tokenizer.padToken),
'position_ids' : np.arange(len(seq), dtype = np.int64),
'mask_labels' : mask_labels,
'masked_lm_lengths' : np.array([hole_len]),
'next_sentence_labels': np.int64([0]),
}, [mlm_inst])
return | 35,295 | 41.019048 | 165 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/models/lm_database.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas and Chris Cummins.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Observation database for lm datasets."""
import typing
import sqlalchemy as sql
from sqlalchemy.ext import declarative
from deeplearning.benchpress.util import sqlutil
Base = declarative.declarative_base()
class LMInstance(Base, sqlutil.ProtoBackedMixin):
"""
A database entry representing a BenchPress validation trace.
"""
__tablename__ = "masked_lm_instances"
id : int = sql.Column(sql.Integer, primary_key = True, index = True)
original_input : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
input_ids : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
masked_lm_lengths : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
masked_lm_predictions : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
@classmethod
def FromArgs(cls,
id :int,
original_input :str,
input_ids :str,
masked_lm_lengths :typing.List[int],
masked_lm_predictions :typing.List[str],
) -> typing.Dict[str, typing.Any]:
return {
"id" : id,
"original_input" : original_input,
"input_ids" : input_ids,
"masked_lm_lengths" : ','.join([str(x) for x in masked_lm_lengths if x >= 0]),
"masked_lm_predictions" : ','.join(masked_lm_predictions),
}
class LMDatabase(sqlutil.Database):
"""A database of BenchPress samples."""
def __init__(self, url: str, must_exist: bool = False):
super(LMDatabase, self).__init__(url, Base, must_exist = must_exist)
@property
def count(self):
with self.Session() as s:
count = s.query(LMInstance).count()
return count | 2,465 | 39.42623 | 104 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.