repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
ocp | ocp-main/ocpmodels/common/relaxation/optimizers/lbfgs_torch.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import logging
from collections import deque
from pathlib import Path
from typing import Deque, Optional
import ase
import torch
from torch_geometric.data import Batch
from torch_scatter import scatter
from ocpmodels.common.relaxation.ase_utils import batch_to_atoms
from ocpmodels.common.utils import radius_graph_pbc
class LBFGS:
def __init__(
self,
batch: Batch,
model: "TorchCalc",
maxstep: float = 0.01,
memory: int = 100,
damping: float = 0.25,
alpha: float = 100.0,
force_consistent=None,
device: str = "cuda:0",
save_full_traj: bool = True,
traj_dir: Optional[Path] = None,
traj_names=None,
early_stop_batch: bool = False,
) -> None:
self.batch = batch
self.model = model
self.maxstep = maxstep
self.memory = memory
self.damping = damping
self.alpha = alpha
self.H0 = 1.0 / self.alpha
self.force_consistent = force_consistent
self.device = device
self.save_full = save_full_traj
self.traj_dir = traj_dir
self.traj_names = traj_names
self.early_stop_batch = early_stop_batch
self.otf_graph = model.model._unwrapped_model.otf_graph
assert not self.traj_dir or (
traj_dir and len(traj_names)
), "Trajectory names should be specified to save trajectories"
logging.info("Step Fmax(eV/A)")
if not self.otf_graph and "edge_index" not in batch:
self.model.update_graph(self.batch)
def get_energy_and_forces(self, apply_constraint: bool = True):
energy, forces = self.model.get_energy_and_forces(
self.batch, apply_constraint
)
return energy, forces
def set_positions(self, update, update_mask) -> None:
if not self.early_stop_batch:
update = torch.where(update_mask.unsqueeze(1), update, 0.0)
self.batch.pos += update.to(dtype=torch.float32)
if not self.otf_graph:
self.model.update_graph(self.batch)
def check_convergence(self, iteration, forces=None, energy=None):
if forces is None or energy is None:
energy, forces = self.get_energy_and_forces()
forces = forces.to(dtype=torch.float64)
max_forces_ = scatter(
(forces**2).sum(axis=1).sqrt(), self.batch.batch, reduce="max"
)
logging.info(
f"{iteration} "
+ " ".join(f"{x:0.3f}" for x in max_forces_.tolist())
)
# (batch_size) -> (nAtoms)
max_forces = max_forces_[self.batch.batch]
return max_forces.ge(self.fmax), energy, forces
def run(self, fmax, steps):
self.fmax = fmax
self.steps = steps
self.s = deque(maxlen=self.memory)
self.y = deque(maxlen=self.memory)
self.rho = deque(maxlen=self.memory)
self.r0 = self.f0 = None
self.trajectories = None
if self.traj_dir:
self.traj_dir.mkdir(exist_ok=True, parents=True)
self.trajectories = [
ase.io.Trajectory(self.traj_dir / f"{name}.traj_tmp", mode="w")
for name in self.traj_names
]
iteration = 0
converged = False
while iteration < steps and not converged:
update_mask, energy, forces = self.check_convergence(iteration)
converged = torch.all(torch.logical_not(update_mask))
if self.trajectories is not None:
if (
self.save_full
or converged
or iteration == steps - 1
or iteration == 0
):
self.write(energy, forces, update_mask)
if not converged and iteration < steps - 1:
self.step(iteration, forces, update_mask)
iteration += 1
# GPU memory usage as per nvidia-smi seems to gradually build up as
# batches are processed. This releases unoccupied cached memory.
torch.cuda.empty_cache()
if self.trajectories is not None:
for traj in self.trajectories:
traj.close()
for name in self.traj_names:
traj_fl = Path(self.traj_dir / f"{name}.traj_tmp", mode="w")
traj_fl.rename(traj_fl.with_suffix(".traj"))
self.batch.y, self.batch.force = self.get_energy_and_forces(
apply_constraint=False
)
return self.batch
def step(
self,
iteration: int,
forces: Optional[torch.Tensor],
update_mask: torch.Tensor,
) -> None:
def determine_step(dr):
steplengths = torch.norm(dr, dim=1)
longest_steps = scatter(
steplengths, self.batch.batch, reduce="max"
)
longest_steps = longest_steps[self.batch.batch]
maxstep = longest_steps.new_tensor(self.maxstep)
scale = (longest_steps + 1e-7).reciprocal() * torch.min(
longest_steps, maxstep
)
dr *= scale.unsqueeze(1)
return dr * self.damping
if forces is None:
_, forces = self.get_energy_and_forces()
r = self.batch.pos.clone().to(dtype=torch.float64)
# Update s, y, rho
if iteration > 0:
s0 = (r - self.r0).flatten()
self.s.append(s0)
y0 = -(forces - self.f0).flatten()
self.y.append(y0)
self.rho.append(1.0 / torch.dot(y0, s0))
loopmax = min(self.memory, iteration)
alpha = forces.new_empty(loopmax)
q = -forces.flatten()
for i in range(loopmax - 1, -1, -1):
alpha[i] = self.rho[i] * torch.dot(self.s[i], q) # b
q -= alpha[i] * self.y[i]
z = self.H0 * q
for i in range(loopmax):
beta = self.rho[i] * torch.dot(self.y[i], z)
z += self.s[i] * (alpha[i] - beta)
# descent direction
p = -z.reshape((-1, 3))
dr = determine_step(p)
if torch.abs(dr).max() < 1e-7:
# Same configuration again (maybe a restart):
return
self.set_positions(dr, update_mask)
self.r0 = r
self.f0 = forces
def write(self, energy, forces, update_mask) -> None:
self.batch.y, self.batch.force = energy, forces
atoms_objects = batch_to_atoms(self.batch)
update_mask_ = torch.split(update_mask, self.batch.natoms.tolist())
for atm, traj, mask in zip(
atoms_objects, self.trajectories, update_mask_
):
if mask[0] or not self.save_full:
traj.write(atm)
class TorchCalc:
def __init__(self, model, transform=None) -> None:
self.model = model
self.transform = transform
def get_energy_and_forces(self, atoms, apply_constraint: bool = True):
predictions = self.model.predict(
atoms, per_image=False, disable_tqdm=True
)
energy = predictions["energy"]
forces = predictions["forces"]
if apply_constraint:
fixed_idx = torch.where(atoms.fixed == 1)[0]
forces[fixed_idx] = 0
return energy, forces
def update_graph(self, atoms):
edge_index, cell_offsets, num_neighbors = radius_graph_pbc(
atoms, 6, 50
)
atoms.edge_index = edge_index
atoms.cell_offsets = cell_offsets
atoms.neighbors = num_neighbors
if self.transform is not None:
atoms = self.transform(atoms)
return atoms
| 7,832 | 31.502075 | 79 | py |
ocp | ocp-main/ocpmodels/common/relaxation/optimizers/__init__.py | 0 | 0 | 0 | py |
|
ocp | ocp-main/ocpmodels/models/base.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import logging
import torch
import torch.nn as nn
from torch_geometric.nn import radius_graph
from ocpmodels.common.utils import (
compute_neighbors,
conditional_grad,
get_pbc_distances,
radius_graph_pbc,
)
class BaseModel(nn.Module):
def __init__(
self, num_atoms=None, bond_feat_dim=None, num_targets=None
) -> None:
super(BaseModel, self).__init__()
self.num_atoms = num_atoms
self.bond_feat_dim = bond_feat_dim
self.num_targets = num_targets
def forward(self, data):
raise NotImplementedError
def generate_graph(
self,
data,
cutoff=None,
max_neighbors=None,
use_pbc=None,
otf_graph=None,
enforce_max_neighbors_strictly=None,
):
cutoff = cutoff or self.cutoff
max_neighbors = max_neighbors or self.max_neighbors
use_pbc = use_pbc or self.use_pbc
otf_graph = otf_graph or self.otf_graph
if enforce_max_neighbors_strictly is not None:
pass
elif hasattr(self, "enforce_max_neighbors_strictly"):
# Not all models will have this attribute
enforce_max_neighbors_strictly = (
self.enforce_max_neighbors_strictly
)
else:
# Default to old behavior
enforce_max_neighbors_strictly = True
if not otf_graph:
try:
edge_index = data.edge_index
if use_pbc:
cell_offsets = data.cell_offsets
neighbors = data.neighbors
except AttributeError:
logging.warning(
"Turning otf_graph=True as required attributes not present in data object"
)
otf_graph = True
if use_pbc:
if otf_graph:
edge_index, cell_offsets, neighbors = radius_graph_pbc(
data,
cutoff,
max_neighbors,
enforce_max_neighbors_strictly,
)
out = get_pbc_distances(
data.pos,
edge_index,
data.cell,
cell_offsets,
neighbors,
return_offsets=True,
return_distance_vec=True,
)
edge_index = out["edge_index"]
edge_dist = out["distances"]
cell_offset_distances = out["offsets"]
distance_vec = out["distance_vec"]
else:
if otf_graph:
edge_index = radius_graph(
data.pos,
r=cutoff,
batch=data.batch,
max_num_neighbors=max_neighbors,
)
j, i = edge_index
distance_vec = data.pos[j] - data.pos[i]
edge_dist = distance_vec.norm(dim=-1)
cell_offsets = torch.zeros(
edge_index.shape[1], 3, device=data.pos.device
)
cell_offset_distances = torch.zeros_like(
cell_offsets, device=data.pos.device
)
neighbors = compute_neighbors(data, edge_index)
return (
edge_index,
edge_dist,
distance_vec,
cell_offsets,
cell_offset_distances,
neighbors,
)
@property
def num_params(self) -> int:
return sum(p.numel() for p in self.parameters())
| 3,688 | 27.596899 | 94 | py |
ocp | ocp-main/ocpmodels/models/dimenet.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import torch
from torch import nn
from torch_geometric.nn import DimeNet, radius_graph
from torch_scatter import scatter
from torch_sparse import SparseTensor
from ocpmodels.common.registry import registry
from ocpmodels.common.utils import (
conditional_grad,
get_pbc_distances,
radius_graph_pbc,
)
from ocpmodels.models.base import BaseModel
@registry.register_model("dimenet")
class DimeNetWrap(DimeNet, BaseModel):
r"""Wrapper around the directional message passing neural network (DimeNet) from the
`"Directional Message Passing for Molecular Graphs"
<https://arxiv.org/abs/2003.03123>`_ paper.
DimeNet transforms messages based on the angle between them in a
rotation-equivariant fashion.
Args:
num_atoms (int): Unused argument
bond_feat_dim (int): Unused argument
num_targets (int): Number of targets to predict.
use_pbc (bool, optional): If set to :obj:`True`, account for periodic boundary conditions.
(default: :obj:`True`)
regress_forces (bool, optional): If set to :obj:`True`, predict forces by differentiating
energy with respect to positions.
(default: :obj:`True`)
hidden_channels (int, optional): Number of hidden channels.
(default: :obj:`128`)
num_blocks (int, optional): Number of building blocks.
(default: :obj:`6`)
num_bilinear (int, optional): Size of the bilinear layer tensor.
(default: :obj:`8`)
num_spherical (int, optional): Number of spherical harmonics.
(default: :obj:`7`)
num_radial (int, optional): Number of radial basis functions.
(default: :obj:`6`)
otf_graph (bool, optional): If set to :obj:`True`, compute graph edges on the fly.
(default: :obj:`False`)
cutoff (float, optional): Cutoff distance for interatomic interactions.
(default: :obj:`10.0`)
envelope_exponent (int, optional): Shape of the smooth cutoff.
(default: :obj:`5`)
num_before_skip: (int, optional): Number of residual layers in the
interaction blocks before the skip connection. (default: :obj:`1`)
num_after_skip: (int, optional): Number of residual layers in the
interaction blocks after the skip connection. (default: :obj:`2`)
num_output_layers: (int, optional): Number of linear layers for the
output blocks. (default: :obj:`3`)
max_angles_per_image (int, optional): The maximum number of angles used
per image. This can be used to reduce memory usage at the cost of
model performance. (default: :obj:`1e6`)
"""
def __init__(
self,
num_atoms: int,
bond_feat_dim, # not used
num_targets: int,
use_pbc: bool = True,
regress_forces: bool = True,
hidden_channels: int = 128,
num_blocks: int = 6,
num_bilinear: int = 8,
num_spherical: int = 7,
num_radial: int = 6,
otf_graph: bool = False,
cutoff: float = 10.0,
envelope_exponent: int = 5,
num_before_skip: int = 1,
num_after_skip: int = 2,
num_output_layers: int = 3,
max_angles_per_image: int = int(1e6),
) -> None:
self.num_targets = num_targets
self.regress_forces = regress_forces
self.use_pbc = use_pbc
self.cutoff = cutoff
self.otf_graph = otf_graph
self.max_angles_per_image = max_angles_per_image
self.max_neighbors = 50
super(DimeNetWrap, self).__init__(
hidden_channels=hidden_channels,
out_channels=num_targets,
num_blocks=num_blocks,
num_bilinear=num_bilinear,
num_spherical=num_spherical,
num_radial=num_radial,
cutoff=cutoff,
envelope_exponent=envelope_exponent,
num_before_skip=num_before_skip,
num_after_skip=num_after_skip,
num_output_layers=num_output_layers,
)
def triplets(self, edge_index, cell_offsets, num_nodes: int):
row, col = edge_index # j->i
value = torch.arange(row.size(0), device=row.device)
adj_t = SparseTensor(
row=col, col=row, value=value, sparse_sizes=(num_nodes, num_nodes)
)
adj_t_row = adj_t[row]
num_triplets = adj_t_row.set_value(None).sum(dim=1).to(torch.long)
# Node indices (k->j->i) for triplets.
idx_i = col.repeat_interleave(num_triplets)
idx_j = row.repeat_interleave(num_triplets)
idx_k = adj_t_row.storage.col()
# Edge indices (k->j, j->i) for triplets.
idx_kj = adj_t_row.storage.value()
idx_ji = adj_t_row.storage.row()
# Remove self-loop triplets d->b->d
# Check atom as well as cell offset
cell_offset_kji = cell_offsets[idx_kj] + cell_offsets[idx_ji]
mask = (idx_i != idx_k) | torch.any(cell_offset_kji != 0, dim=-1)
idx_i, idx_j, idx_k = idx_i[mask], idx_j[mask], idx_k[mask]
idx_kj, idx_ji = idx_kj[mask], idx_ji[mask]
return col, row, idx_i, idx_j, idx_k, idx_kj, idx_ji
@conditional_grad(torch.enable_grad())
def _forward(self, data):
pos = data.pos
batch = data.batch
(
edge_index,
dist,
_,
cell_offsets,
offsets,
neighbors,
) = self.generate_graph(data)
data.edge_index = edge_index
data.cell_offsets = cell_offsets
data.neighbors = neighbors
j, i = edge_index
_, _, idx_i, idx_j, idx_k, idx_kj, idx_ji = self.triplets(
edge_index,
data.cell_offsets,
num_nodes=data.atomic_numbers.size(0),
)
# Cap no. of triplets during training.
if self.training:
sub_ix = torch.randperm(idx_i.size(0))[
: self.max_angles_per_image * data.natoms.size(0)
]
idx_i, idx_j, idx_k = (
idx_i[sub_ix],
idx_j[sub_ix],
idx_k[sub_ix],
)
idx_kj, idx_ji = idx_kj[sub_ix], idx_ji[sub_ix]
# Calculate angles.
pos_i = pos[idx_i].detach()
pos_j = pos[idx_j].detach()
if self.use_pbc:
pos_ji, pos_kj = (
pos[idx_j].detach() - pos_i + offsets[idx_ji],
pos[idx_k].detach() - pos_j + offsets[idx_kj],
)
else:
pos_ji, pos_kj = (
pos[idx_j].detach() - pos_i,
pos[idx_k].detach() - pos_j,
)
a = (pos_ji * pos_kj).sum(dim=-1)
b = torch.cross(pos_ji, pos_kj).norm(dim=-1)
angle = torch.atan2(b, a)
rbf = self.rbf(dist)
sbf = self.sbf(dist, angle, idx_kj)
# Embedding block.
x = self.emb(data.atomic_numbers.long(), rbf, i, j)
P = self.output_blocks[0](x, rbf, i, num_nodes=pos.size(0))
# Interaction blocks.
for interaction_block, output_block in zip(
self.interaction_blocks, self.output_blocks[1:]
):
x = interaction_block(x, rbf, sbf, idx_kj, idx_ji)
P += output_block(x, rbf, i, num_nodes=pos.size(0))
energy = P.sum(dim=0) if batch is None else scatter(P, batch, dim=0)
return energy
def forward(self, data):
if self.regress_forces:
data.pos.requires_grad_(True)
energy = self._forward(data)
if self.regress_forces:
forces = -1 * (
torch.autograd.grad(
energy,
data.pos,
grad_outputs=torch.ones_like(energy),
create_graph=True,
)[0]
)
return energy, forces
else:
return energy
@property
def num_params(self) -> int:
return sum(p.numel() for p in self.parameters())
| 8,211 | 34.549784 | 98 | py |
ocp | ocp-main/ocpmodels/models/dimenet_plus_plus.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
---
This code borrows heavily from the DimeNet implementation as part of
pytorch-geometric: https://github.com/rusty1s/pytorch_geometric. License:
---
Copyright (c) 2020 Matthias Fey <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from typing import Optional
import torch
from torch import nn
from torch_geometric.nn import radius_graph
from torch_geometric.nn.inits import glorot_orthogonal
from torch_geometric.nn.models.dimenet import (
BesselBasisLayer,
EmbeddingBlock,
Envelope,
ResidualLayer,
SphericalBasisLayer,
)
from torch_geometric.nn.resolver import activation_resolver
from torch_scatter import scatter
from torch_sparse import SparseTensor
from ocpmodels.common.registry import registry
from ocpmodels.common.utils import (
conditional_grad,
get_pbc_distances,
radius_graph_pbc,
)
from ocpmodels.models.base import BaseModel
try:
import sympy as sym
except ImportError:
sym = None
class InteractionPPBlock(torch.nn.Module):
def __init__(
self,
hidden_channels,
int_emb_size,
basis_emb_size,
num_spherical,
num_radial,
num_before_skip,
num_after_skip,
act="silu",
) -> None:
act = activation_resolver(act)
super(InteractionPPBlock, self).__init__()
self.act = act
# Transformations of Bessel and spherical basis representations.
self.lin_rbf1 = nn.Linear(num_radial, basis_emb_size, bias=False)
self.lin_rbf2 = nn.Linear(basis_emb_size, hidden_channels, bias=False)
self.lin_sbf1 = nn.Linear(
num_spherical * num_radial, basis_emb_size, bias=False
)
self.lin_sbf2 = nn.Linear(basis_emb_size, int_emb_size, bias=False)
# Dense transformations of input messages.
self.lin_kj = nn.Linear(hidden_channels, hidden_channels)
self.lin_ji = nn.Linear(hidden_channels, hidden_channels)
# Embedding projections for interaction triplets.
self.lin_down = nn.Linear(hidden_channels, int_emb_size, bias=False)
self.lin_up = nn.Linear(int_emb_size, hidden_channels, bias=False)
# Residual layers before and after skip connection.
self.layers_before_skip = torch.nn.ModuleList(
[
ResidualLayer(hidden_channels, act)
for _ in range(num_before_skip)
]
)
self.lin = nn.Linear(hidden_channels, hidden_channels)
self.layers_after_skip = torch.nn.ModuleList(
[
ResidualLayer(hidden_channels, act)
for _ in range(num_after_skip)
]
)
self.reset_parameters()
def reset_parameters(self) -> None:
glorot_orthogonal(self.lin_rbf1.weight, scale=2.0)
glorot_orthogonal(self.lin_rbf2.weight, scale=2.0)
glorot_orthogonal(self.lin_sbf1.weight, scale=2.0)
glorot_orthogonal(self.lin_sbf2.weight, scale=2.0)
glorot_orthogonal(self.lin_kj.weight, scale=2.0)
self.lin_kj.bias.data.fill_(0)
glorot_orthogonal(self.lin_ji.weight, scale=2.0)
self.lin_ji.bias.data.fill_(0)
glorot_orthogonal(self.lin_down.weight, scale=2.0)
glorot_orthogonal(self.lin_up.weight, scale=2.0)
for res_layer in self.layers_before_skip:
res_layer.reset_parameters()
glorot_orthogonal(self.lin.weight, scale=2.0)
self.lin.bias.data.fill_(0)
for res_layer in self.layers_after_skip:
res_layer.reset_parameters()
def forward(self, x, rbf, sbf, idx_kj, idx_ji):
# Initial transformations.
x_ji = self.act(self.lin_ji(x))
x_kj = self.act(self.lin_kj(x))
# Transformation via Bessel basis.
rbf = self.lin_rbf1(rbf)
rbf = self.lin_rbf2(rbf)
x_kj = x_kj * rbf
# Down-project embeddings and generate interaction triplet embeddings.
x_kj = self.act(self.lin_down(x_kj))
# Transform via 2D spherical basis.
sbf = self.lin_sbf1(sbf)
sbf = self.lin_sbf2(sbf)
x_kj = x_kj[idx_kj] * sbf
# Aggregate interactions and up-project embeddings.
x_kj = scatter(x_kj, idx_ji, dim=0, dim_size=x.size(0))
x_kj = self.act(self.lin_up(x_kj))
h = x_ji + x_kj
for layer in self.layers_before_skip:
h = layer(h)
h = self.act(self.lin(h)) + x
for layer in self.layers_after_skip:
h = layer(h)
return h
class OutputPPBlock(torch.nn.Module):
def __init__(
self,
num_radial: int,
hidden_channels,
out_emb_channels,
out_channels,
num_layers: int,
act: str = "silu",
) -> None:
act = activation_resolver(act)
super(OutputPPBlock, self).__init__()
self.act = act
self.lin_rbf = nn.Linear(num_radial, hidden_channels, bias=False)
self.lin_up = nn.Linear(hidden_channels, out_emb_channels, bias=True)
self.lins = torch.nn.ModuleList()
for _ in range(num_layers):
self.lins.append(nn.Linear(out_emb_channels, out_emb_channels))
self.lin = nn.Linear(out_emb_channels, out_channels, bias=False)
self.reset_parameters()
def reset_parameters(self) -> None:
glorot_orthogonal(self.lin_rbf.weight, scale=2.0)
glorot_orthogonal(self.lin_up.weight, scale=2.0)
for lin in self.lins:
glorot_orthogonal(lin.weight, scale=2.0)
lin.bias.data.fill_(0)
self.lin.weight.data.fill_(0)
def forward(self, x, rbf, i, num_nodes: Optional[int] = None):
x = self.lin_rbf(rbf) * x
x = scatter(x, i, dim=0, dim_size=num_nodes)
x = self.lin_up(x)
for lin in self.lins:
x = self.act(lin(x))
return self.lin(x)
class DimeNetPlusPlus(torch.nn.Module):
r"""DimeNet++ implementation based on https://github.com/klicperajo/dimenet.
Args:
hidden_channels (int): Hidden embedding size.
out_channels (int): Size of each output sample.
num_blocks (int): Number of building blocks.
int_emb_size (int): Embedding size used for interaction triplets
basis_emb_size (int): Embedding size used in the basis transformation
out_emb_channels(int): Embedding size used for atoms in the output block
num_spherical (int): Number of spherical harmonics.
num_radial (int): Number of radial basis functions.
cutoff: (float, optional): Cutoff distance for interatomic
interactions. (default: :obj:`5.0`)
envelope_exponent (int, optional): Shape of the smooth cutoff.
(default: :obj:`5`)
num_before_skip: (int, optional): Number of residual layers in the
interaction blocks before the skip connection. (default: :obj:`1`)
num_after_skip: (int, optional): Number of residual layers in the
interaction blocks after the skip connection. (default: :obj:`2`)
num_output_layers: (int, optional): Number of linear layers for the
output blocks. (default: :obj:`3`)
act: (function, optional): The activation funtion.
(default: :obj:`silu`)
"""
url = "https://github.com/klicperajo/dimenet/raw/master/pretrained"
def __init__(
self,
hidden_channels,
out_channels,
num_blocks: int,
int_emb_size: int,
basis_emb_size: int,
out_emb_channels,
num_spherical: int,
num_radial: int,
cutoff: float = 5.0,
envelope_exponent=5,
num_before_skip: int = 1,
num_after_skip: int = 2,
num_output_layers: int = 3,
act: str = "silu",
) -> None:
act = activation_resolver(act)
super(DimeNetPlusPlus, self).__init__()
self.cutoff = cutoff
if sym is None:
raise ImportError("Package `sympy` could not be found.")
self.num_blocks = num_blocks
self.rbf = BesselBasisLayer(num_radial, cutoff, envelope_exponent)
self.sbf = SphericalBasisLayer(
num_spherical, num_radial, cutoff, envelope_exponent
)
self.emb = EmbeddingBlock(num_radial, hidden_channels, act)
self.output_blocks = torch.nn.ModuleList(
[
OutputPPBlock(
num_radial,
hidden_channels,
out_emb_channels,
out_channels,
num_output_layers,
act,
)
for _ in range(num_blocks + 1)
]
)
self.interaction_blocks = torch.nn.ModuleList(
[
InteractionPPBlock(
hidden_channels,
int_emb_size,
basis_emb_size,
num_spherical,
num_radial,
num_before_skip,
num_after_skip,
act,
)
for _ in range(num_blocks)
]
)
self.reset_parameters()
def reset_parameters(self) -> None:
self.rbf.reset_parameters()
self.emb.reset_parameters()
for out in self.output_blocks:
out.reset_parameters()
for interaction in self.interaction_blocks:
interaction.reset_parameters()
def triplets(self, edge_index, cell_offsets, num_nodes: int):
row, col = edge_index # j->i
value = torch.arange(row.size(0), device=row.device)
adj_t = SparseTensor(
row=col, col=row, value=value, sparse_sizes=(num_nodes, num_nodes)
)
adj_t_row = adj_t[row]
num_triplets = adj_t_row.set_value(None).sum(dim=1).to(torch.long)
# Node indices (k->j->i) for triplets.
idx_i = col.repeat_interleave(num_triplets)
idx_j = row.repeat_interleave(num_triplets)
idx_k = adj_t_row.storage.col()
# Edge indices (k->j, j->i) for triplets.
idx_kj = adj_t_row.storage.value()
idx_ji = adj_t_row.storage.row()
# Remove self-loop triplets d->b->d
# Check atom as well as cell offset
cell_offset_kji = cell_offsets[idx_kj] + cell_offsets[idx_ji]
mask = (idx_i != idx_k) | torch.any(cell_offset_kji != 0, dim=-1)
idx_i, idx_j, idx_k = idx_i[mask], idx_j[mask], idx_k[mask]
idx_kj, idx_ji = idx_kj[mask], idx_ji[mask]
return col, row, idx_i, idx_j, idx_k, idx_kj, idx_ji
def forward(self, z, pos, batch=None):
""" """
raise NotImplementedError
@registry.register_model("dimenetplusplus")
class DimeNetPlusPlusWrap(DimeNetPlusPlus, BaseModel):
def __init__(
self,
num_atoms,
bond_feat_dim, # not used
num_targets,
use_pbc=True,
regress_forces=True,
hidden_channels=128,
num_blocks=4,
int_emb_size=64,
basis_emb_size=8,
out_emb_channels=256,
num_spherical=7,
num_radial=6,
otf_graph=False,
cutoff=10.0,
envelope_exponent=5,
num_before_skip=1,
num_after_skip=2,
num_output_layers=3,
) -> None:
self.num_targets = num_targets
self.regress_forces = regress_forces
self.use_pbc = use_pbc
self.cutoff = cutoff
self.otf_graph = otf_graph
self.max_neighbors = 50
super(DimeNetPlusPlusWrap, self).__init__(
hidden_channels=hidden_channels,
out_channels=num_targets,
num_blocks=num_blocks,
int_emb_size=int_emb_size,
basis_emb_size=basis_emb_size,
out_emb_channels=out_emb_channels,
num_spherical=num_spherical,
num_radial=num_radial,
cutoff=cutoff,
envelope_exponent=envelope_exponent,
num_before_skip=num_before_skip,
num_after_skip=num_after_skip,
num_output_layers=num_output_layers,
)
@conditional_grad(torch.enable_grad())
def _forward(self, data):
pos = data.pos
batch = data.batch
(
edge_index,
dist,
_,
cell_offsets,
offsets,
neighbors,
) = self.generate_graph(data)
data.edge_index = edge_index
data.cell_offsets = cell_offsets
data.neighbors = neighbors
j, i = edge_index
_, _, idx_i, idx_j, idx_k, idx_kj, idx_ji = self.triplets(
edge_index,
data.cell_offsets,
num_nodes=data.atomic_numbers.size(0),
)
# Calculate angles.
pos_i = pos[idx_i].detach()
pos_j = pos[idx_j].detach()
if self.use_pbc:
pos_ji, pos_kj = (
pos[idx_j].detach() - pos_i + offsets[idx_ji],
pos[idx_k].detach() - pos_j + offsets[idx_kj],
)
else:
pos_ji, pos_kj = (
pos[idx_j].detach() - pos_i,
pos[idx_k].detach() - pos_j,
)
a = (pos_ji * pos_kj).sum(dim=-1)
b = torch.cross(pos_ji, pos_kj).norm(dim=-1)
angle = torch.atan2(b, a)
rbf = self.rbf(dist)
sbf = self.sbf(dist, angle, idx_kj)
# Embedding block.
x = self.emb(data.atomic_numbers.long(), rbf, i, j)
P = self.output_blocks[0](x, rbf, i, num_nodes=pos.size(0))
# Interaction blocks.
for interaction_block, output_block in zip(
self.interaction_blocks, self.output_blocks[1:]
):
x = interaction_block(x, rbf, sbf, idx_kj, idx_ji)
P += output_block(x, rbf, i, num_nodes=pos.size(0))
energy = P.sum(dim=0) if batch is None else scatter(P, batch, dim=0)
return energy
def forward(self, data):
if self.regress_forces:
data.pos.requires_grad_(True)
energy = self._forward(data)
if self.regress_forces:
forces = -1 * (
torch.autograd.grad(
energy,
data.pos,
grad_outputs=torch.ones_like(energy),
create_graph=True,
)[0]
)
return energy, forces
else:
return energy
@property
def num_params(self) -> int:
return sum(p.numel() for p in self.parameters())
| 15,691 | 32.175476 | 80 | py |
ocp | ocp-main/ocpmodels/models/forcenet.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
from math import pi as PI
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from torch_geometric.nn import MessagePassing
from torch_scatter import scatter
from ocpmodels.common.registry import registry
from ocpmodels.common.utils import get_pbc_distances, radius_graph_pbc
from ocpmodels.datasets.embeddings import ATOMIC_RADII, CONTINUOUS_EMBEDDINGS
from ocpmodels.models.base import BaseModel
from ocpmodels.models.utils.activations import Act
from ocpmodels.models.utils.basis import Basis, SphericalSmearing
class FNDecoder(nn.Module):
def __init__(
self, decoder_type, decoder_activation_str, output_dim
) -> None:
super(FNDecoder, self).__init__()
self.decoder_type = decoder_type
self.decoder_activation = Act(decoder_activation_str)
self.output_dim = output_dim
if self.decoder_type == "linear":
self.decoder = nn.Sequential(nn.Linear(self.output_dim, 3))
elif self.decoder_type == "mlp":
self.decoder = nn.Sequential(
nn.Linear(self.output_dim, self.output_dim),
nn.BatchNorm1d(self.output_dim),
self.decoder_activation,
nn.Linear(self.output_dim, 3),
)
else:
raise ValueError(f"Undefined force decoder: {self.decoder_type}")
self.reset_parameters()
def reset_parameters(self) -> None:
for m in self.decoder:
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
m.bias.data.fill_(0)
def forward(self, x):
return self.decoder(x)
class InteractionBlock(MessagePassing):
def __init__(
self,
hidden_channels,
mlp_basis_dim: int,
basis_type,
depth_mlp_edge: int = 2,
depth_mlp_trans: int = 1,
activation_str: str = "ssp",
ablation: str = "none",
) -> None:
super(InteractionBlock, self).__init__(aggr="add")
self.activation = Act(activation_str)
self.ablation = ablation
self.basis_type = basis_type
# basis function assumes input is in the range of [-1,1]
if self.basis_type != "rawcat":
self.lin_basis = torch.nn.Linear(mlp_basis_dim, hidden_channels)
if self.ablation == "nocond":
# the edge filter only depends on edge_attr
in_features = (
mlp_basis_dim
if self.basis_type == "rawcat"
else hidden_channels
)
else:
# edge filter depends on edge_attr and current node embedding
in_features = (
mlp_basis_dim + 2 * hidden_channels
if self.basis_type == "rawcat"
else 3 * hidden_channels
)
if depth_mlp_edge > 0:
mlp_edge = [torch.nn.Linear(in_features, hidden_channels)]
for i in range(depth_mlp_edge):
mlp_edge.append(self.activation)
mlp_edge.append(
torch.nn.Linear(hidden_channels, hidden_channels)
)
else:
## need batch normalization afterwards. Otherwise training is unstable.
mlp_edge = [
torch.nn.Linear(in_features, hidden_channels),
torch.nn.BatchNorm1d(hidden_channels),
]
self.mlp_edge = torch.nn.Sequential(*mlp_edge)
if not self.ablation == "nofilter":
self.lin = torch.nn.Linear(hidden_channels, hidden_channels)
if depth_mlp_trans > 0:
mlp_trans = [torch.nn.Linear(hidden_channels, hidden_channels)]
for i in range(depth_mlp_trans):
mlp_trans.append(torch.nn.BatchNorm1d(hidden_channels))
mlp_trans.append(self.activation)
mlp_trans.append(
torch.nn.Linear(hidden_channels, hidden_channels)
)
else:
# need batch normalization afterwards. Otherwise, becomes NaN
mlp_trans = [
torch.nn.Linear(hidden_channels, hidden_channels),
torch.nn.BatchNorm1d(hidden_channels),
]
self.mlp_trans = torch.nn.Sequential(*mlp_trans)
if not self.ablation == "noself":
self.center_W = torch.nn.Parameter(
torch.Tensor(1, hidden_channels)
)
self.reset_parameters()
def reset_parameters(self) -> None:
if self.basis_type != "rawcat":
torch.nn.init.xavier_uniform_(self.lin_basis.weight)
self.lin_basis.bias.data.fill_(0)
for m in self.mlp_trans:
if isinstance(m, torch.nn.Linear):
torch.nn.init.xavier_uniform_(m.weight)
m.bias.data.fill_(0)
for m in self.mlp_edge:
if isinstance(m, torch.nn.Linear):
torch.nn.init.xavier_uniform_(m.weight)
m.bias.data.fill_(0)
if not self.ablation == "nofilter":
torch.nn.init.xavier_uniform_(self.lin.weight)
self.lin.bias.data.fill_(0)
if not self.ablation == "noself":
torch.nn.init.xavier_uniform_(self.center_W)
def forward(self, x, edge_index, edge_attr, edge_weight):
if self.basis_type != "rawcat":
edge_emb = self.lin_basis(edge_attr)
else:
# for rawcat, we directly use the raw feature
edge_emb = edge_attr
if self.ablation == "nocond":
emb = edge_emb
else:
emb = torch.cat(
[edge_emb, x[edge_index[0]], x[edge_index[1]]], dim=1
)
W = self.mlp_edge(emb) * edge_weight.view(-1, 1)
if self.ablation == "nofilter":
x = self.propagate(edge_index, x=x, W=W) + self.center_W
else:
x = self.lin(x)
if self.ablation == "noself":
x = self.propagate(edge_index, x=x, W=W)
else:
x = self.propagate(edge_index, x=x, W=W) + self.center_W * x
x = self.mlp_trans(x)
return x
def message(self, x_j, W):
if self.ablation == "nofilter":
return W
else:
return x_j * W
# flake8: noqa: C901
@registry.register_model("forcenet")
class ForceNet(BaseModel):
r"""Implementation of ForceNet architecture.
Args:
num_atoms (int): Unused argument
bond_feat_dim (int): Unused argument
num_targets (int): Unused argumebt
hidden_channels (int, optional): Number of hidden channels.
(default: :obj:`512`)
num_iteractions (int, optional): Number of interaction blocks.
(default: :obj:`5`)
cutoff (float, optional): Cutoff distance for interatomic interactions.
(default: :obj:`6.0`)
feat (str, optional): Input features to be used
(default: :obj:`full`)
num_freqs (int, optional): Number of frequencies for basis function.
(default: :obj:`50`)
max_n (int, optional): Maximum order of spherical harmonics.
(default: :obj:`6`)
basis (str, optional): Basis function to be used.
(default: :obj:`full`)
depth_mlp_edge (int, optional): Depth of MLP for edges in interaction blocks.
(default: :obj:`2`)
depth_mlp_node (int, optional): Depth of MLP for nodes in interaction blocks.
(default: :obj:`1`)
activation_str (str, optional): Activation function used post linear layer in all message passing MLPs.
(default: :obj:`swish`)
ablation (str, optional): Type of ablation to be performed.
(default: :obj:`none`)
decoder_hidden_channels (int, optional): Number of hidden channels in the decoder.
(default: :obj:`512`)
decoder_type (str, optional): Type of decoder: linear or MLP.
(default: :obj:`mlp`)
decoder_activation_str (str, optional): Activation function used post linear layer in decoder.
(default: :obj:`swish`)
training (bool, optional): If set to :obj:`True`, specify training phase.
(default: :obj:`True`)
otf_graph (bool, optional): If set to :obj:`True`, compute graph edges on the fly.
(default: :obj:`False`)
"""
def __init__(
self,
num_atoms, # not used
bond_feat_dim, # not used
num_targets, # not used
hidden_channels=512,
num_interactions=5,
cutoff=6.0,
feat="full",
num_freqs=50,
max_n=3,
basis="sphallmul",
depth_mlp_edge=2,
depth_mlp_node=1,
activation_str="swish",
ablation="none",
decoder_hidden_channels=512,
decoder_type="mlp",
decoder_activation_str="swish",
training=True,
otf_graph=False,
use_pbc=True,
) -> None:
super(ForceNet, self).__init__()
self.training = training
self.ablation = ablation
if self.ablation not in [
"none",
"nofilter",
"nocond",
"nodistlist",
"onlydist",
"nodelinear",
"edgelinear",
"noself",
]:
raise ValueError(f"Unknown ablation called {ablation}.")
"""
Descriptions of ablations:
- none: base ForceNet model
- nofilter: no element-wise filter parameterization in message modeling
- nocond: convolutional filter is only conditioned on edge features, not node embeddings
- nodistlist: no atomic radius information in edge features
- onlydist: edge features only contains distance information. Orientation information is ommited.
- nodelinear: node update MLP function is replaced with linear function followed by batch normalization
- edgelinear: edge MLP transformation function is replaced with linear function followed by batch normalization.
- noself: no self edge of m_t.
"""
self.otf_graph = otf_graph
self.cutoff = cutoff
self.output_dim = decoder_hidden_channels
self.feat = feat
self.num_freqs = num_freqs
self.num_layers = num_interactions
self.max_n = max_n
self.activation_str = activation_str
self.use_pbc = use_pbc
self.max_neighbors = 50
if self.ablation == "edgelinear":
depth_mlp_edge = 0
if self.ablation == "nodelinear":
depth_mlp_node = 0
# read atom map and atom radii
atom_map = torch.zeros(101, 9)
for i in range(101):
atom_map[i] = torch.tensor(CONTINUOUS_EMBEDDINGS[i])
atom_radii = torch.zeros(101)
for i in range(101):
atom_radii[i] = ATOMIC_RADII[i]
atom_radii = atom_radii / 100
self.atom_radii = nn.Parameter(atom_radii, requires_grad=False)
self.basis_type = basis
self.pbc_apply_sph_harm = "sph" in self.basis_type
self.pbc_sph_option = None
# for spherical harmonics for PBC
if "sphall" in self.basis_type:
self.pbc_sph_option = "all"
elif "sphsine" in self.basis_type:
self.pbc_sph_option = "sine"
elif "sphcosine" in self.basis_type:
self.pbc_sph_option = "cosine"
self.pbc_sph: Optional[SphericalSmearing] = None
if self.pbc_apply_sph_harm:
self.pbc_sph = SphericalSmearing(
max_n=self.max_n, option=self.pbc_sph_option
)
# self.feat can be "simple" or "full"
if self.feat == "simple":
self.embedding = nn.Embedding(100, hidden_channels)
# set up dummy atom_map that only contains atomic_number information
atom_map = torch.linspace(0, 1, 101).view(-1, 1).repeat(1, 9)
self.atom_map = nn.Parameter(atom_map, requires_grad=False)
elif self.feat == "full":
# Normalize along each dimaension
atom_map[0] = np.nan
atom_map_notnan = atom_map[atom_map[:, 0] == atom_map[:, 0]]
atom_map_min = torch.min(atom_map_notnan, dim=0)[0]
atom_map_max = torch.max(atom_map_notnan, dim=0)[0]
atom_map_gap = atom_map_max - atom_map_min
## squash to [0,1]
atom_map = (
atom_map - atom_map_min.view(1, -1)
) / atom_map_gap.view(1, -1)
self.atom_map = torch.nn.Parameter(atom_map, requires_grad=False)
in_features = 9
# first apply basis function and then linear function
if "sph" in self.basis_type:
# spherical basis is only meaningful for edge feature, so use powersine instead
node_basis_type = "powersine"
else:
node_basis_type = self.basis_type
basis = Basis(
in_features,
num_freqs=num_freqs,
basis_type=node_basis_type,
act=self.activation_str,
)
self.embedding = torch.nn.Sequential(
basis, torch.nn.Linear(basis.out_dim, hidden_channels)
)
else:
raise ValueError("Undefined feature type for atom")
# process basis function for edge feature
if self.ablation == "nodistlist":
# do not consider additional distance edge features
# normalized (x,y,z) + distance
in_feature = 4
elif self.ablation == "onlydist":
# only consider distance-based edge features
# ignore normalized (x,y,z)
in_feature = 4
# if basis_type is spherical harmonics, then reduce to powersine
if "sph" in self.basis_type:
logging.info(
"Under onlydist ablation, spherical basis is reduced to powersine basis."
)
self.basis_type = "powersine"
self.pbc_sph = None
else:
in_feature = 7
self.basis_fun = Basis(
in_feature,
num_freqs,
self.basis_type,
self.activation_str,
sph=self.pbc_sph,
)
# process interaction blocks
self.interactions = torch.nn.ModuleList()
for _ in range(num_interactions):
block = InteractionBlock(
hidden_channels,
self.basis_fun.out_dim,
self.basis_type,
depth_mlp_edge=depth_mlp_edge,
depth_mlp_trans=depth_mlp_node,
activation_str=self.activation_str,
ablation=ablation,
)
self.interactions.append(block)
self.lin = torch.nn.Linear(hidden_channels, self.output_dim)
self.activation = Act(activation_str)
# ForceNet decoder
self.decoder = FNDecoder(
decoder_type, decoder_activation_str, self.output_dim
)
# Projection layer for energy prediction
self.energy_mlp = nn.Linear(self.output_dim, 1)
def forward(self, data):
z = data.atomic_numbers.long()
pos = data.pos
batch = data.batch
if self.feat == "simple":
h = self.embedding(z)
elif self.feat == "full":
h = self.embedding(self.atom_map[z])
else:
raise RuntimeError("Undefined feature type for atom")
(
edge_index,
edge_dist,
edge_vec,
cell_offsets,
_, # cell offset distances
neighbors,
) = self.generate_graph(data)
data.edge_index = edge_index
data.cell_offsets = cell_offsets
data.neighbors = neighbors
if self.pbc_apply_sph_harm:
edge_vec_normalized = edge_vec / edge_dist.view(-1, 1)
edge_attr_sph = self.pbc_sph(edge_vec_normalized)
# calculate the edge weight according to the dist
edge_weight = torch.cos(0.5 * edge_dist * PI / self.cutoff)
# normalized edge vectors
edge_vec_normalized = edge_vec / edge_dist.view(-1, 1)
# edge distance, taking the atom_radii into account
# each element lies in [0,1]
edge_dist_list = (
torch.stack(
[
edge_dist,
edge_dist - self.atom_radii[z[edge_index[0]]],
edge_dist - self.atom_radii[z[edge_index[1]]],
edge_dist
- self.atom_radii[z[edge_index[0]]]
- self.atom_radii[z[edge_index[1]]],
]
).transpose(0, 1)
/ self.cutoff
)
if self.ablation == "nodistlist":
edge_dist_list = edge_dist_list[:, 0].view(-1, 1)
# make sure distance is positive
edge_dist_list[edge_dist_list < 1e-3] = 1e-3
# squash to [0,1] for gaussian basis
if self.basis_type == "gauss":
edge_vec_normalized = (edge_vec_normalized + 1) / 2.0
# process raw_edge_attributes to generate edge_attributes
if self.ablation == "onlydist":
raw_edge_attr = edge_dist_list
else:
raw_edge_attr = torch.cat(
[edge_vec_normalized, edge_dist_list], dim=1
)
if "sph" in self.basis_type:
edge_attr = self.basis_fun(raw_edge_attr, edge_attr_sph)
else:
edge_attr = self.basis_fun(raw_edge_attr)
# pass edge_attributes through interaction blocks
for _, interaction in enumerate(self.interactions):
h = h + interaction(h, edge_index, edge_attr, edge_weight)
h = self.lin(h)
h = self.activation(h)
out = scatter(h, batch, dim=0, reduce="add")
force = self.decoder(h)
energy = self.energy_mlp(out)
return energy, force
@property
def num_params(self) -> int:
return sum(p.numel() for p in self.parameters())
| 18,340 | 34.339114 | 124 | py |
ocp | ocp-main/ocpmodels/models/spinconv.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import math
import time
from math import pi as PI
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Embedding, Linear, ModuleList, Sequential
from torch_geometric.nn import MessagePassing, SchNet, radius_graph
from torch_scatter import scatter
from ocpmodels.common.registry import registry
from ocpmodels.common.transforms import RandomRotate
from ocpmodels.common.utils import (
compute_neighbors,
conditional_grad,
get_pbc_distances,
radius_graph_pbc,
)
from ocpmodels.models.base import BaseModel
try:
from e3nn import o3
from e3nn.io import SphericalTensor
from e3nn.o3 import FromS2Grid, SphericalHarmonics, ToS2Grid
except Exception:
pass
@registry.register_model("spinconv")
class spinconv(BaseModel):
def __init__(
self,
num_atoms: int, # not used
bond_feat_dim: int, # not used
num_targets: int,
use_pbc: bool = True,
regress_forces: bool = True,
otf_graph: bool = False,
hidden_channels: int = 32,
mid_hidden_channels: int = 200,
num_interactions: int = 1,
num_basis_functions: int = 200,
basis_width_scalar: float = 1.0,
max_num_neighbors: int = 20,
sphere_size_lat: int = 15,
sphere_size_long: int = 9,
cutoff: float = 10.0,
distance_block_scalar_max: float = 2.0,
max_num_elements: int = 90,
embedding_size: int = 32,
show_timing_info: bool = False,
sphere_message: str = "fullconv", # message block sphere representation
output_message: str = "fullconv", # output block sphere representation
lmax: bool = False,
force_estimator: str = "random",
model_ref_number: int = 0,
readout: str = "add",
num_rand_rotations: int = 5,
scale_distances: bool = True,
) -> None:
super(spinconv, self).__init__()
self.num_targets = num_targets
self.num_random_rotations = num_rand_rotations
self.regress_forces = regress_forces
self.use_pbc = use_pbc
self.cutoff = cutoff
self.otf_graph = otf_graph
self.show_timing_info = show_timing_info
self.max_num_elements = max_num_elements
self.mid_hidden_channels = mid_hidden_channels
self.sphere_size_lat = sphere_size_lat
self.sphere_size_long = sphere_size_long
self.num_atoms = 0
self.hidden_channels = hidden_channels
self.embedding_size = embedding_size
self.max_num_neighbors = self.max_neighbors = max_num_neighbors
self.sphere_message = sphere_message
self.output_message = output_message
self.force_estimator = force_estimator
self.num_basis_functions = num_basis_functions
self.distance_block_scalar_max = distance_block_scalar_max
self.grad_forces = False
self.num_embedding_basis = 8
self.lmax = lmax
self.scale_distances = scale_distances
self.basis_width_scalar = basis_width_scalar
if self.sphere_message in ["spharm", "rotspharmroll", "rotspharmwd"]:
assert self.lmax, "lmax must be defined for spherical harmonics"
if self.output_message in ["spharm", "rotspharmroll", "rotspharmwd"]:
assert self.lmax, "lmax must be defined for spherical harmonics"
# variables used for display purposes
self.counter = 0
self.start_time = time.time()
self.total_time = 0
self.model_ref_number = model_ref_number
if self.force_estimator == "grad":
self.grad_forces = True
# self.act = ShiftedSoftplus()
self.act = Swish()
self.distance_expansion_forces = GaussianSmearing(
0.0,
cutoff,
num_basis_functions,
basis_width_scalar,
)
# Weights for message initialization
self.embeddingblock2 = EmbeddingBlock(
self.mid_hidden_channels,
self.hidden_channels,
self.mid_hidden_channels,
self.embedding_size,
self.num_embedding_basis,
self.max_num_elements,
self.act,
)
self.distfc1 = nn.Linear(
self.mid_hidden_channels, self.mid_hidden_channels
)
self.distfc2 = nn.Linear(
self.mid_hidden_channels, self.mid_hidden_channels
)
self.dist_block = DistanceBlock(
self.num_basis_functions,
self.mid_hidden_channels,
self.max_num_elements,
self.distance_block_scalar_max,
self.distance_expansion_forces,
self.scale_distances,
)
self.message_blocks = ModuleList()
for _ in range(num_interactions):
block = MessageBlock(
hidden_channels,
hidden_channels,
mid_hidden_channels,
embedding_size,
self.sphere_size_lat,
self.sphere_size_long,
self.max_num_elements,
self.sphere_message,
self.act,
self.lmax,
)
self.message_blocks.append(block)
self.energyembeddingblock = EmbeddingBlock(
hidden_channels,
1,
mid_hidden_channels,
embedding_size,
8,
self.max_num_elements,
self.act,
)
if force_estimator == "random":
self.force_output_block = ForceOutputBlock(
hidden_channels,
2,
mid_hidden_channels,
embedding_size,
self.sphere_size_lat,
self.sphere_size_long,
self.max_num_elements,
self.output_message,
self.act,
self.lmax,
)
@conditional_grad(torch.enable_grad())
def forward(self, data):
self.device = data.pos.device
self.num_atoms = len(data.batch)
self.batch_size = len(data.natoms)
pos = data.pos
if self.regress_forces:
pos = pos.requires_grad_(True)
(
edge_index,
edge_distance,
edge_distance_vec,
cell_offsets,
_, # cell offset distances
neighbors,
) = self.generate_graph(data)
edge_index, edge_distance, edge_distance_vec = self._filter_edges(
edge_index,
edge_distance,
edge_distance_vec,
self.max_num_neighbors,
)
outputs = self._forward_helper(
data, edge_index, edge_distance, edge_distance_vec
)
if self.show_timing_info is True:
torch.cuda.synchronize()
print(
"Memory: {}\t{}\t{}".format(
len(edge_index[0]),
torch.cuda.memory_allocated()
/ (1000 * len(edge_index[0])),
torch.cuda.max_memory_allocated() / 1000000,
)
)
return outputs
# restructure forward helper for conditional grad
def _forward_helper(
self, data, edge_index, edge_distance, edge_distance_vec
):
###############################################################
# Initialize messages
###############################################################
source_element = data.atomic_numbers[edge_index[0, :]].long()
target_element = data.atomic_numbers[edge_index[1, :]].long()
x_dist = self.dist_block(edge_distance, source_element, target_element)
x = x_dist
x = self.distfc1(x)
x = self.act(x)
x = self.distfc2(x)
x = self.act(x)
x = self.embeddingblock2(x, source_element, target_element)
###############################################################
# Update messages using block interactions
###############################################################
edge_rot_mat = self._init_edge_rot_mat(
data, edge_index, edge_distance_vec
)
(
proj_edges_index,
proj_edges_delta,
proj_edges_src_index,
) = self._project2D_edges_init(
edge_rot_mat, edge_index, edge_distance_vec
)
for block_index, interaction in enumerate(self.message_blocks):
x_out = interaction(
x,
x_dist,
source_element,
target_element,
proj_edges_index,
proj_edges_delta,
proj_edges_src_index,
)
if block_index > 0:
x = x + x_out
else:
x = x_out
###############################################################
# Decoder
# Compute the forces and energies from the messages
###############################################################
assert self.force_estimator in ["random", "grad"]
energy = scatter(x, edge_index[1], dim=0, dim_size=data.num_nodes) / (
self.max_num_neighbors / 2.0 + 1.0
)
atomic_numbers = data.atomic_numbers.long()
energy = self.energyembeddingblock(
energy, atomic_numbers, atomic_numbers
)
energy = scatter(energy, data.batch, dim=0)
if self.regress_forces:
if self.force_estimator == "grad":
forces = -1 * (
torch.autograd.grad(
energy,
data.pos,
grad_outputs=torch.ones_like(energy),
create_graph=True,
)[0]
)
if self.force_estimator == "random":
forces = self._compute_forces_random_rotations(
x,
self.num_random_rotations,
data.atomic_numbers.long(),
edge_index,
edge_distance_vec,
data.batch,
)
if not self.regress_forces:
return energy
else:
return energy, forces
def _compute_forces_random_rotations(
self,
x,
num_random_rotations: int,
target_element,
edge_index,
edge_distance_vec,
batch,
) -> torch.Tensor:
# Compute the forces and energy by randomly rotating the system and taking the average
device = x.device
rot_mat_x = torch.zeros(3, 3, device=device)
rot_mat_x[0][0] = 1.0
rot_mat_x[1][1] = 1.0
rot_mat_x[2][2] = 1.0
rot_mat_y = torch.zeros(3, 3, device=device)
rot_mat_y[0][1] = 1.0
rot_mat_y[1][0] = -1.0
rot_mat_y[2][2] = 1.0
rot_mat_z = torch.zeros(3, 3, device=device)
rot_mat_z[0][2] = 1.0
rot_mat_z[1][1] = 1.0
rot_mat_z[2][0] = -1.0
rot_mat_x = rot_mat_x.view(-1, 3, 3).repeat(self.num_atoms, 1, 1)
rot_mat_y = rot_mat_y.view(-1, 3, 3).repeat(self.num_atoms, 1, 1)
rot_mat_z = rot_mat_z.view(-1, 3, 3).repeat(self.num_atoms, 1, 1)
# compute the random rotations
random_rot_mat = self._random_rot_mat(
self.num_atoms * num_random_rotations, device
)
random_rot_mat = random_rot_mat.view(
num_random_rotations, self.num_atoms, 3, 3
)
# the first matrix is the identity with the rest being random
# atom_rot_mat = torch.cat([torch.eye(3, device=device).view(1, 1, 3, 3).repeat(1, self.num_atoms, 1, 1), random_rot_mat], dim=0)
# or they are all random
atom_rot_mat = random_rot_mat
forces = torch.zeros(self.num_atoms, 3, device=device)
for rot_index in range(num_random_rotations):
rot_mat_x_perturb = torch.bmm(rot_mat_x, atom_rot_mat[rot_index])
rot_mat_y_perturb = torch.bmm(rot_mat_y, atom_rot_mat[rot_index])
rot_mat_z_perturb = torch.bmm(rot_mat_z, atom_rot_mat[rot_index])
# project neighbors using the random rotations
(
proj_nodes_index_x,
proj_nodes_delta_x,
proj_nodes_src_index_x,
) = self._project2D_nodes_init(
rot_mat_x_perturb, edge_index, edge_distance_vec
)
(
proj_nodes_index_y,
proj_nodes_delta_y,
proj_nodes_src_index_y,
) = self._project2D_nodes_init(
rot_mat_y_perturb, edge_index, edge_distance_vec
)
(
proj_nodes_index_z,
proj_nodes_delta_z,
proj_nodes_src_index_z,
) = self._project2D_nodes_init(
rot_mat_z_perturb, edge_index, edge_distance_vec
)
# estimate the force in each perpendicular direction
force_x = self.force_output_block(
x,
self.num_atoms,
target_element,
proj_nodes_index_x,
proj_nodes_delta_x,
proj_nodes_src_index_x,
)
force_y = self.force_output_block(
x,
self.num_atoms,
target_element,
proj_nodes_index_y,
proj_nodes_delta_y,
proj_nodes_src_index_y,
)
force_z = self.force_output_block(
x,
self.num_atoms,
target_element,
proj_nodes_index_z,
proj_nodes_delta_z,
proj_nodes_src_index_z,
)
forces_perturb = torch.cat(
[force_x[:, 0:1], force_y[:, 0:1], force_z[:, 0:1]], dim=1
)
# rotate the predicted forces back into the global reference frame
rot_mat_inv = torch.transpose(rot_mat_x_perturb, 1, 2)
forces_perturb = torch.bmm(
rot_mat_inv, forces_perturb.view(-1, 3, 1)
).view(-1, 3)
forces = forces + forces_perturb
forces = forces / (num_random_rotations)
return forces
def _filter_edges(
self,
edge_index,
edge_distance,
edge_distance_vec,
max_num_neighbors: int,
):
# Remove edges that aren't within the closest max_num_neighbors from either the target or source atom.
# This ensures all edges occur in pairs, i.e., if X -> Y exists then Y -> X is included.
# However, if both X -> Y and Y -> X don't both exist in the original list, this isn't guaranteed.
# Since some edges may have exactly the same distance, this function is not deterministic
device = edge_index.device
length = len(edge_distance)
# Assuming the edges are consecutive based on the target index
target_node_index, neigh_count = torch.unique_consecutive(
edge_index[1], return_counts=True
)
max_neighbors = torch.max(neigh_count)
# handle special case where an atom doesn't have any neighbors
target_neigh_count = torch.zeros(self.num_atoms, device=device).long()
target_neigh_count.index_copy_(
0, target_node_index.long(), neigh_count
)
# Create a list of edges for each atom
index_offset = (
torch.cumsum(target_neigh_count, dim=0) - target_neigh_count
)
neigh_index = torch.arange(length, device=device)
neigh_index = neigh_index - index_offset[edge_index[1]]
edge_map_index = (edge_index[1] * max_neighbors + neigh_index).long()
target_lookup = (
torch.zeros(self.num_atoms * max_neighbors, device=device) - 1
).long()
target_lookup.index_copy_(
0, edge_map_index, torch.arange(length, device=device).long()
)
# Get the length of each edge
distance_lookup = (
torch.zeros(self.num_atoms * max_neighbors, device=device)
+ 1000000.0
)
distance_lookup.index_copy_(0, edge_map_index, edge_distance)
distance_lookup = distance_lookup.view(self.num_atoms, max_neighbors)
# Sort the distances
distance_sorted_no_op, indices = torch.sort(distance_lookup, dim=1)
# Create a hash that maps edges that go from X -> Y and Y -> X in the same bin
edge_index_min, no_op = torch.min(edge_index, dim=0)
edge_index_max, no_op = torch.max(edge_index, dim=0)
edge_index_hash = edge_index_min * self.num_atoms + edge_index_max
edge_count_start = torch.zeros(
self.num_atoms * self.num_atoms, device=device
)
edge_count_start.index_add_(
0, edge_index_hash, torch.ones(len(edge_index_hash), device=device)
)
# Find index into the original edge_index
indices = indices + (
torch.arange(len(indices), device=device) * max_neighbors
).view(-1, 1).repeat(1, max_neighbors)
indices = indices.view(-1)
target_lookup_sorted = (
torch.zeros(self.num_atoms * max_neighbors, device=device) - 1
).long()
target_lookup_sorted = target_lookup[indices]
target_lookup_sorted = target_lookup_sorted.view(
self.num_atoms, max_neighbors
)
# Select the closest max_num_neighbors for each edge and remove the unused entries
target_lookup_below_thres = (
target_lookup_sorted[:, 0:max_num_neighbors].contiguous().view(-1)
)
target_lookup_below_thres = target_lookup_below_thres.view(-1)
mask_unused = target_lookup_below_thres.ge(0)
target_lookup_below_thres = torch.masked_select(
target_lookup_below_thres, mask_unused
)
# Find edges that are used at least once and create a mask to keep
edge_count = torch.zeros(
self.num_atoms * self.num_atoms, device=device
)
edge_count.index_add_(
0,
edge_index_hash[target_lookup_below_thres],
torch.ones(len(target_lookup_below_thres), device=device),
)
edge_count_mask = edge_count.ne(0)
edge_keep = edge_count_mask[edge_index_hash]
# Finally remove all edges that are too long in distance as indicated by the mask
edge_index_mask = edge_keep.view(1, -1).repeat(2, 1)
edge_index = torch.masked_select(edge_index, edge_index_mask).view(
2, -1
)
edge_distance = torch.masked_select(edge_distance, edge_keep)
edge_distance_vec_mask = edge_keep.view(-1, 1).repeat(1, 3)
edge_distance_vec = torch.masked_select(
edge_distance_vec, edge_distance_vec_mask
).view(-1, 3)
return edge_index, edge_distance, edge_distance_vec
def _random_rot_mat(self, num_matrices: int, device) -> torch.Tensor:
ang_a = 2.0 * math.pi * torch.rand(num_matrices, device=device)
ang_b = 2.0 * math.pi * torch.rand(num_matrices, device=device)
ang_c = 2.0 * math.pi * torch.rand(num_matrices, device=device)
cos_a = torch.cos(ang_a)
cos_b = torch.cos(ang_b)
cos_c = torch.cos(ang_c)
sin_a = torch.sin(ang_a)
sin_b = torch.sin(ang_b)
sin_c = torch.sin(ang_c)
rot_a = (
torch.eye(3, device=device)
.view(1, 3, 3)
.repeat(num_matrices, 1, 1)
)
rot_b = (
torch.eye(3, device=device)
.view(1, 3, 3)
.repeat(num_matrices, 1, 1)
)
rot_c = (
torch.eye(3, device=device)
.view(1, 3, 3)
.repeat(num_matrices, 1, 1)
)
rot_a[:, 1, 1] = cos_a
rot_a[:, 1, 2] = sin_a
rot_a[:, 2, 1] = -sin_a
rot_a[:, 2, 2] = cos_a
rot_b[:, 0, 0] = cos_b
rot_b[:, 0, 2] = -sin_b
rot_b[:, 2, 0] = sin_b
rot_b[:, 2, 2] = cos_b
rot_c[:, 0, 0] = cos_c
rot_c[:, 0, 1] = sin_c
rot_c[:, 1, 0] = -sin_c
rot_c[:, 1, 1] = cos_c
return torch.bmm(torch.bmm(rot_a, rot_b), rot_c)
def _init_edge_rot_mat(
self, data, edge_index, edge_distance_vec
) -> torch.Tensor:
device = data.pos.device
num_atoms = len(data.batch)
edge_vec_0 = edge_distance_vec
edge_vec_0_distance = torch.sqrt(torch.sum(edge_vec_0**2, dim=1))
if torch.min(edge_vec_0_distance) < 0.0001:
print(
"Error edge_vec_0_distance: {}".format(
torch.min(edge_vec_0_distance)
)
)
(minval, minidx) = torch.min(edge_vec_0_distance, 0)
print(
"Error edge_vec_0_distance: {} {} {} {} {}".format(
minidx,
edge_index[0, minidx],
edge_index[1, minidx],
data.pos[edge_index[0, minidx]],
data.pos[edge_index[1, minidx]],
)
)
avg_vector = torch.zeros(num_atoms, 3, device=device)
weight = 0.5 * (
torch.cos(edge_vec_0_distance * PI / self.cutoff) + 1.0
)
avg_vector.index_add_(
0, edge_index[1, :], edge_vec_0 * weight.view(-1, 1).expand(-1, 3)
)
edge_vec_2 = avg_vector[edge_index[1, :]] + 0.0001
edge_vec_2_distance = torch.sqrt(torch.sum(edge_vec_2**2, dim=1))
if torch.min(edge_vec_2_distance) < 0.000001:
print(
"Error edge_vec_2_distance: {}".format(
torch.min(edge_vec_2_distance)
)
)
norm_x = edge_vec_0 / (edge_vec_0_distance.view(-1, 1))
norm_0_2 = edge_vec_2 / (edge_vec_2_distance.view(-1, 1))
norm_z = torch.cross(norm_x, norm_0_2, dim=1)
norm_z = norm_z / (
torch.sqrt(torch.sum(norm_z**2, dim=1, keepdim=True)) + 0.0000001
)
norm_y = torch.cross(norm_x, norm_z, dim=1)
norm_y = norm_y / (
torch.sqrt(torch.sum(norm_y**2, dim=1, keepdim=True)) + 0.0000001
)
norm_x = norm_x.view(-1, 3, 1)
norm_y = norm_y.view(-1, 3, 1)
norm_z = norm_z.view(-1, 3, 1)
edge_rot_mat_inv = torch.cat([norm_x, norm_y, norm_z], dim=2)
edge_rot_mat = torch.transpose(edge_rot_mat_inv, 1, 2)
return edge_rot_mat
def _project2D_edges_init(self, rot_mat, edge_index, edge_distance_vec):
torch.set_printoptions(sci_mode=False)
length = len(edge_distance_vec)
device = edge_distance_vec.device
# Assuming the edges are consecutive based on the target index
target_node_index, neigh_count = torch.unique_consecutive(
edge_index[1], return_counts=True
)
max_neighbors = torch.max(neigh_count)
target_neigh_count = torch.zeros(self.num_atoms, device=device).long()
target_neigh_count.index_copy_(
0, target_node_index.long(), neigh_count
)
index_offset = (
torch.cumsum(target_neigh_count, dim=0) - target_neigh_count
)
neigh_index = torch.arange(length, device=device)
neigh_index = neigh_index - index_offset[edge_index[1]]
edge_map_index = edge_index[1] * max_neighbors + neigh_index
target_lookup = (
torch.zeros(self.num_atoms * max_neighbors, device=device) - 1
).long()
target_lookup.index_copy_(
0,
edge_map_index.long(),
torch.arange(length, device=device).long(),
)
target_lookup = target_lookup.view(self.num_atoms, max_neighbors)
# target_lookup - For each target node, a list of edge indices
# target_neigh_count - number of neighbors for each target node
source_edge = target_lookup[edge_index[0]]
target_edge = (
torch.arange(length, device=device)
.long()
.view(-1, 1)
.repeat(1, max_neighbors)
)
source_edge = source_edge.view(-1)
target_edge = target_edge.view(-1)
mask_unused = source_edge.ge(0)
source_edge = torch.masked_select(source_edge, mask_unused)
target_edge = torch.masked_select(target_edge, mask_unused)
return self._project2D_init(
source_edge, target_edge, rot_mat, edge_distance_vec
)
def _project2D_nodes_init(self, rot_mat, edge_index, edge_distance_vec):
torch.set_printoptions(sci_mode=False)
length = len(edge_distance_vec)
device = edge_distance_vec.device
target_node = edge_index[1]
source_edge = torch.arange(length, device=device)
return self._project2D_init(
source_edge, target_node, rot_mat, edge_distance_vec
)
def _project2D_init(
self, source_edge, target_edge, rot_mat, edge_distance_vec
):
edge_distance_norm = F.normalize(edge_distance_vec)
source_edge_offset = edge_distance_norm[source_edge]
source_edge_offset_rot = torch.bmm(
rot_mat[target_edge], source_edge_offset.view(-1, 3, 1)
)
source_edge_X = torch.atan2(
source_edge_offset_rot[:, 1], source_edge_offset_rot[:, 2]
).view(-1)
# source_edge_X ranges from -pi to pi
source_edge_X = (source_edge_X + math.pi) / (2.0 * math.pi)
# source_edge_Y ranges from -1 to 1
source_edge_Y = source_edge_offset_rot[:, 0].view(-1)
source_edge_Y = torch.clamp(source_edge_Y, min=-1.0, max=1.0)
source_edge_Y = (source_edge_Y.asin() + (math.pi / 2.0)) / (
math.pi
) # bin by angle
# source_edge_Y = (source_edge_Y + 1.0) / 2.0 # bin by sin
source_edge_Y = 0.99 * (source_edge_Y) + 0.005
source_edge_X = source_edge_X * self.sphere_size_long
source_edge_Y = source_edge_Y * (
self.sphere_size_lat - 1.0
) # not circular so pad by one
source_edge_X_0 = torch.floor(source_edge_X).long()
source_edge_X_del = source_edge_X - source_edge_X_0
source_edge_X_0 = source_edge_X_0 % self.sphere_size_long
source_edge_X_1 = (source_edge_X_0 + 1) % self.sphere_size_long
source_edge_Y_0 = torch.floor(source_edge_Y).long()
source_edge_Y_del = source_edge_Y - source_edge_Y_0
source_edge_Y_0 = source_edge_Y_0 % self.sphere_size_lat
source_edge_Y_1 = (source_edge_Y_0 + 1) % self.sphere_size_lat
# Compute the values needed to bilinearly splat the values onto the spheres
index_0_0 = (
target_edge * self.sphere_size_lat * self.sphere_size_long
+ source_edge_Y_0 * self.sphere_size_long
+ source_edge_X_0
)
index_0_1 = (
target_edge * self.sphere_size_lat * self.sphere_size_long
+ source_edge_Y_0 * self.sphere_size_long
+ source_edge_X_1
)
index_1_0 = (
target_edge * self.sphere_size_lat * self.sphere_size_long
+ source_edge_Y_1 * self.sphere_size_long
+ source_edge_X_0
)
index_1_1 = (
target_edge * self.sphere_size_lat * self.sphere_size_long
+ source_edge_Y_1 * self.sphere_size_long
+ source_edge_X_1
)
delta_0_0 = (1.0 - source_edge_X_del) * (1.0 - source_edge_Y_del)
delta_0_1 = (source_edge_X_del) * (1.0 - source_edge_Y_del)
delta_1_0 = (1.0 - source_edge_X_del) * (source_edge_Y_del)
delta_1_1 = (source_edge_X_del) * (source_edge_Y_del)
index_0_0 = index_0_0.view(1, -1)
index_0_1 = index_0_1.view(1, -1)
index_1_0 = index_1_0.view(1, -1)
index_1_1 = index_1_1.view(1, -1)
# NaNs otherwise
if self.grad_forces:
with torch.no_grad():
delta_0_0 = delta_0_0.view(1, -1)
delta_0_1 = delta_0_1.view(1, -1)
delta_1_0 = delta_1_0.view(1, -1)
delta_1_1 = delta_1_1.view(1, -1)
else:
delta_0_0 = delta_0_0.view(1, -1)
delta_0_1 = delta_0_1.view(1, -1)
delta_1_0 = delta_1_0.view(1, -1)
delta_1_1 = delta_1_1.view(1, -1)
return (
torch.cat([index_0_0, index_0_1, index_1_0, index_1_1]),
torch.cat([delta_0_0, delta_0_1, delta_1_0, delta_1_1]),
source_edge,
)
@property
def num_params(self) -> int:
return sum(p.numel() for p in self.parameters())
class MessageBlock(torch.nn.Module):
def __init__(
self,
in_hidden_channels: int,
out_hidden_channels: int,
mid_hidden_channels: int,
embedding_size: int,
sphere_size_lat: int,
sphere_size_long: int,
max_num_elements: int,
sphere_message: str,
act,
lmax,
) -> None:
super(MessageBlock, self).__init__()
self.in_hidden_channels = in_hidden_channels
self.out_hidden_channels = out_hidden_channels
self.act = act
self.lmax = lmax
self.embedding_size = embedding_size
self.mid_hidden_channels = mid_hidden_channels
self.sphere_size_lat = sphere_size_lat
self.sphere_size_long = sphere_size_long
self.sphere_message = sphere_message
self.max_num_elements = max_num_elements
self.num_embedding_basis = 8
self.spinconvblock = SpinConvBlock(
self.in_hidden_channels,
self.mid_hidden_channels,
self.sphere_size_lat,
self.sphere_size_long,
self.sphere_message,
self.act,
self.lmax,
)
self.embeddingblock1 = EmbeddingBlock(
self.mid_hidden_channels,
self.mid_hidden_channels,
self.mid_hidden_channels,
self.embedding_size,
self.num_embedding_basis,
self.max_num_elements,
self.act,
)
self.embeddingblock2 = EmbeddingBlock(
self.mid_hidden_channels,
self.out_hidden_channels,
self.mid_hidden_channels,
self.embedding_size,
self.num_embedding_basis,
self.max_num_elements,
self.act,
)
self.distfc1 = nn.Linear(
self.mid_hidden_channels, self.mid_hidden_channels
)
self.distfc2 = nn.Linear(
self.mid_hidden_channels, self.mid_hidden_channels
)
def forward(
self,
x,
x_dist,
source_element,
target_element,
proj_index,
proj_delta,
proj_src_index,
):
out_size = len(x)
x = self.spinconvblock(
x, out_size, proj_index, proj_delta, proj_src_index
)
x = self.embeddingblock1(x, source_element, target_element)
x_dist = self.distfc1(x_dist)
x_dist = self.act(x_dist)
x_dist = self.distfc2(x_dist)
x = x + x_dist
x = self.act(x)
x = self.embeddingblock2(x, source_element, target_element)
return x
class ForceOutputBlock(torch.nn.Module):
def __init__(
self,
in_hidden_channels: int,
out_hidden_channels: int,
mid_hidden_channels: int,
embedding_size: int,
sphere_size_lat: int,
sphere_size_long: int,
max_num_elements: int,
sphere_message: str,
act,
lmax,
) -> None:
super(ForceOutputBlock, self).__init__()
self.in_hidden_channels = in_hidden_channels
self.out_hidden_channels = out_hidden_channels
self.act = act
self.lmax = lmax
self.embedding_size = embedding_size
self.mid_hidden_channels = mid_hidden_channels
self.sphere_size_lat = sphere_size_lat
self.sphere_size_long = sphere_size_long
self.sphere_message = sphere_message
self.max_num_elements = max_num_elements
self.num_embedding_basis = 8
self.spinconvblock = SpinConvBlock(
self.in_hidden_channels,
self.mid_hidden_channels,
self.sphere_size_lat,
self.sphere_size_long,
self.sphere_message,
self.act,
self.lmax,
)
self.block1 = EmbeddingBlock(
self.mid_hidden_channels,
self.mid_hidden_channels,
self.mid_hidden_channels,
self.embedding_size,
self.num_embedding_basis,
self.max_num_elements,
self.act,
)
self.block2 = EmbeddingBlock(
self.mid_hidden_channels,
self.out_hidden_channels,
self.mid_hidden_channels,
self.embedding_size,
self.num_embedding_basis,
self.max_num_elements,
self.act,
)
def forward(
self,
x,
out_size,
target_element,
proj_index,
proj_delta,
proj_src_index,
):
x = self.spinconvblock(
x, out_size, proj_index, proj_delta, proj_src_index
)
x = self.block1(x, target_element, target_element)
x = self.act(x)
x = self.block2(x, target_element, target_element)
return x
class SpinConvBlock(torch.nn.Module):
def __init__(
self,
in_hidden_channels: int,
mid_hidden_channels: int,
sphere_size_lat: int,
sphere_size_long: int,
sphere_message: str,
act,
lmax,
) -> None:
super(SpinConvBlock, self).__init__()
self.in_hidden_channels = in_hidden_channels
self.mid_hidden_channels = mid_hidden_channels
self.sphere_size_lat = sphere_size_lat
self.sphere_size_long = sphere_size_long
self.sphere_message = sphere_message
self.act = act
self.lmax = lmax
self.num_groups = self.in_hidden_channels // 8
self.ProjectLatLongSphere = ProjectLatLongSphere(
sphere_size_lat, sphere_size_long
)
assert self.sphere_message in [
"fullconv",
"rotspharmwd",
]
if self.sphere_message in ["rotspharmwd"]:
self.sph_froms2grid = FromS2Grid(
(self.sphere_size_lat, self.sphere_size_long), self.lmax
)
self.mlp = nn.Linear(
self.in_hidden_channels * (self.lmax + 1) ** 2,
self.mid_hidden_channels,
)
self.sphlength = (self.lmax + 1) ** 2
rotx = torch.zeros(self.sphere_size_long) + (
2 * math.pi / self.sphere_size_long
)
roty = torch.zeros(self.sphere_size_long)
rotz = torch.zeros(self.sphere_size_long)
self.wigner = []
for xrot, yrot, zrot in zip(rotx, roty, rotz):
_blocks = []
for l_degree in range(self.lmax + 1):
_blocks.append(o3.wigner_D(l_degree, xrot, yrot, zrot))
self.wigner.append(torch.block_diag(*_blocks))
if self.sphere_message == "fullconv":
padding = self.sphere_size_long // 2
self.conv1 = nn.Conv1d(
self.in_hidden_channels * self.sphere_size_lat,
self.mid_hidden_channels,
self.sphere_size_long,
groups=self.in_hidden_channels // 8,
padding=padding,
padding_mode="circular",
)
self.pool = nn.AvgPool1d(sphere_size_long)
self.GroupNorm = nn.GroupNorm(
self.num_groups, self.mid_hidden_channels
)
def forward(self, x, out_size, proj_index, proj_delta, proj_src_index):
x = self.ProjectLatLongSphere(
x, out_size, proj_index, proj_delta, proj_src_index
)
if self.sphere_message == "rotspharmwd":
sph_harm_calc = torch.zeros(
((x.shape[0], self.mid_hidden_channels)),
device=x.device,
)
sph_harm = self.sph_froms2grid(x)
sph_harm = sph_harm.view(-1, self.sphlength, 1)
for wD_diag in self.wigner:
wD_diag = wD_diag.to(x.device)
sph_harm_calc += self.act(
self.mlp(sph_harm.reshape(x.shape[0], -1))
)
wd = wD_diag.view(1, self.sphlength, self.sphlength).expand(
len(x) * self.in_hidden_channels, -1, -1
)
sph_harm = torch.bmm(wd, sph_harm)
x = sph_harm_calc
if self.sphere_message in ["fullconv"]:
x = x.view(
-1,
self.in_hidden_channels * self.sphere_size_lat,
self.sphere_size_long,
)
x = self.conv1(x)
x = self.act(x)
# Pool in the longitudal direction
x = self.pool(x[:, :, 0 : self.sphere_size_long])
x = x.view(out_size, -1)
x = self.GroupNorm(x)
return x
class EmbeddingBlock(torch.nn.Module):
def __init__(
self,
in_hidden_channels: int,
out_hidden_channels: int,
mid_hidden_channels: int,
embedding_size: int,
num_embedding_basis: int,
max_num_elements: int,
act,
) -> None:
super(EmbeddingBlock, self).__init__()
self.in_hidden_channels = in_hidden_channels
self.out_hidden_channels = out_hidden_channels
self.act = act
self.embedding_size = embedding_size
self.mid_hidden_channels = mid_hidden_channels
self.num_embedding_basis = num_embedding_basis
self.max_num_elements = max_num_elements
self.fc1 = nn.Linear(self.in_hidden_channels, self.mid_hidden_channels)
self.fc2 = nn.Linear(
self.mid_hidden_channels,
self.num_embedding_basis * self.mid_hidden_channels,
)
self.fc3 = nn.Linear(
self.mid_hidden_channels, self.out_hidden_channels
)
self.source_embedding = nn.Embedding(
max_num_elements, self.embedding_size
)
self.target_embedding = nn.Embedding(
max_num_elements, self.embedding_size
)
nn.init.uniform_(self.source_embedding.weight.data, -0.0001, 0.0001)
nn.init.uniform_(self.target_embedding.weight.data, -0.0001, 0.0001)
self.embed_fc1 = nn.Linear(
2 * self.embedding_size, self.num_embedding_basis
)
self.softmax = nn.Softmax(dim=1)
def forward(
self, x: torch.Tensor, source_element, target_element
) -> torch.Tensor:
source_embedding = self.source_embedding(source_element)
target_embedding = self.target_embedding(target_element)
embedding = torch.cat([source_embedding, target_embedding], dim=1)
embedding = self.embed_fc1(embedding)
embedding = self.softmax(embedding)
x = self.fc1(x)
x = self.act(x)
x = self.fc2(x)
x = self.act(x)
x = (
x.view(-1, self.num_embedding_basis, self.mid_hidden_channels)
) * (embedding.view(-1, self.num_embedding_basis, 1))
x = torch.sum(x, dim=1)
x = self.fc3(x)
return x
class DistanceBlock(torch.nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
max_num_elements: int,
scalar_max,
distance_expansion,
scale_distances,
) -> None:
super(DistanceBlock, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.max_num_elements = max_num_elements
self.distance_expansion = distance_expansion
self.scalar_max = scalar_max
self.scale_distances = scale_distances
if self.scale_distances:
self.dist_scalar = nn.Embedding(
self.max_num_elements * self.max_num_elements, 1
)
self.dist_offset = nn.Embedding(
self.max_num_elements * self.max_num_elements, 1
)
nn.init.uniform_(self.dist_scalar.weight.data, -0.0001, 0.0001)
nn.init.uniform_(self.dist_offset.weight.data, -0.0001, 0.0001)
self.fc1 = nn.Linear(self.in_channels, self.out_channels)
def forward(self, edge_distance, source_element, target_element):
if self.scale_distances:
embedding_index = (
source_element * self.max_num_elements + target_element
)
# Restrict the scalar to range from 1 / self.scalar_max to self.scalar_max
scalar_max = math.log(self.scalar_max)
scalar = (
2.0 * torch.sigmoid(self.dist_scalar(embedding_index).view(-1))
- 1.0
)
scalar = torch.exp(scalar_max * scalar)
offset = self.dist_offset(embedding_index).view(-1)
x = self.distance_expansion(scalar * edge_distance + offset)
else:
x = self.distance_expansion(edge_distance)
x = self.fc1(x)
return x
class ProjectLatLongSphere(torch.nn.Module):
def __init__(self, sphere_size_lat: int, sphere_size_long: int) -> None:
super(ProjectLatLongSphere, self).__init__()
self.sphere_size_lat = sphere_size_lat
self.sphere_size_long = sphere_size_long
def forward(
self, x, length: int, index, delta, source_edge_index
) -> torch.Tensor:
device = x.device
hidden_channels = len(x[0])
x_proj = torch.zeros(
length * self.sphere_size_lat * self.sphere_size_long,
hidden_channels,
device=device,
)
splat_values = x[source_edge_index]
# Perform bilinear splatting
x_proj.index_add_(0, index[0], splat_values * (delta[0].view(-1, 1)))
x_proj.index_add_(0, index[1], splat_values * (delta[1].view(-1, 1)))
x_proj.index_add_(0, index[2], splat_values * (delta[2].view(-1, 1)))
x_proj.index_add_(0, index[3], splat_values * (delta[3].view(-1, 1)))
x_proj = x_proj.view(
length,
self.sphere_size_lat * self.sphere_size_long,
hidden_channels,
)
x_proj = torch.transpose(x_proj, 1, 2).contiguous()
x_proj = x_proj.view(
length,
hidden_channels,
self.sphere_size_lat,
self.sphere_size_long,
)
return x_proj
class Swish(torch.nn.Module):
def __init__(self) -> None:
super(Swish, self).__init__()
def forward(self, x):
return x * torch.sigmoid(x)
class GaussianSmearing(torch.nn.Module):
def __init__(
self,
start: float = -5.0,
stop: float = 5.0,
num_gaussians: int = 50,
basis_width_scalar: float = 1.0,
) -> None:
super(GaussianSmearing, self).__init__()
offset = torch.linspace(start, stop, num_gaussians)
self.coeff = (
-0.5 / (basis_width_scalar * (offset[1] - offset[0])).item() ** 2
)
self.register_buffer("offset", offset)
def forward(self, dist) -> torch.Tensor:
dist = dist.view(-1, 1) - self.offset.view(1, -1)
return torch.exp(self.coeff * torch.pow(dist, 2))
| 43,927 | 33.372457 | 137 | py |
ocp | ocp-main/ocpmodels/models/schnet.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import torch
from torch_geometric.nn import SchNet
from torch_scatter import scatter
from ocpmodels.common.registry import registry
from ocpmodels.common.utils import (
conditional_grad,
get_pbc_distances,
radius_graph_pbc,
)
from ocpmodels.models.base import BaseModel
@registry.register_model("schnet")
class SchNetWrap(SchNet, BaseModel):
r"""Wrapper around the continuous-filter convolutional neural network SchNet from the
`"SchNet: A Continuous-filter Convolutional Neural Network for Modeling
Quantum Interactions" <https://arxiv.org/abs/1706.08566>`_. Each layer uses interaction
block of the form:
.. math::
\mathbf{x}^{\prime}_i = \sum_{j \in \mathcal{N}(i)} \mathbf{x}_j \odot
h_{\mathbf{\Theta}} ( \exp(-\gamma(\mathbf{e}_{j,i} - \mathbf{\mu}))),
Args:
num_atoms (int): Unused argument
bond_feat_dim (int): Unused argument
num_targets (int): Number of targets to predict.
use_pbc (bool, optional): If set to :obj:`True`, account for periodic boundary conditions.
(default: :obj:`True`)
regress_forces (bool, optional): If set to :obj:`True`, predict forces by differentiating
energy with respect to positions.
(default: :obj:`True`)
otf_graph (bool, optional): If set to :obj:`True`, compute graph edges on the fly.
(default: :obj:`False`)
hidden_channels (int, optional): Number of hidden channels.
(default: :obj:`128`)
num_filters (int, optional): Number of filters to use.
(default: :obj:`128`)
num_interactions (int, optional): Number of interaction blocks
(default: :obj:`6`)
num_gaussians (int, optional): The number of gaussians :math:`\mu`.
(default: :obj:`50`)
cutoff (float, optional): Cutoff distance for interatomic interactions.
(default: :obj:`10.0`)
readout (string, optional): Whether to apply :obj:`"add"` or
:obj:`"mean"` global aggregation. (default: :obj:`"add"`)
"""
def __init__(
self,
num_atoms, # not used
bond_feat_dim, # not used
num_targets,
use_pbc=True,
regress_forces=True,
otf_graph=False,
hidden_channels=128,
num_filters=128,
num_interactions=6,
num_gaussians=50,
cutoff=10.0,
readout="add",
) -> None:
self.num_targets = num_targets
self.regress_forces = regress_forces
self.use_pbc = use_pbc
self.cutoff = cutoff
self.otf_graph = otf_graph
self.max_neighbors = 50
self.reduce = readout
super(SchNetWrap, self).__init__(
hidden_channels=hidden_channels,
num_filters=num_filters,
num_interactions=num_interactions,
num_gaussians=num_gaussians,
cutoff=cutoff,
readout=readout,
)
@conditional_grad(torch.enable_grad())
def _forward(self, data):
z = data.atomic_numbers.long()
pos = data.pos
batch = data.batch
(
edge_index,
edge_weight,
distance_vec,
cell_offsets,
_, # cell offset distances
neighbors,
) = self.generate_graph(data)
if self.use_pbc:
assert z.dim() == 1 and z.dtype == torch.long
edge_attr = self.distance_expansion(edge_weight)
h = self.embedding(z)
for interaction in self.interactions:
h = h + interaction(h, edge_index, edge_weight, edge_attr)
h = self.lin1(h)
h = self.act(h)
h = self.lin2(h)
batch = torch.zeros_like(z) if batch is None else batch
energy = scatter(h, batch, dim=0, reduce=self.reduce)
else:
energy = super(SchNetWrap, self).forward(z, pos, batch)
return energy
def forward(self, data):
if self.regress_forces:
data.pos.requires_grad_(True)
energy = self._forward(data)
if self.regress_forces:
forces = -1 * (
torch.autograd.grad(
energy,
data.pos,
grad_outputs=torch.ones_like(energy),
create_graph=True,
)[0]
)
return energy, forces
else:
return energy
@property
def num_params(self) -> int:
return sum(p.numel() for p in self.parameters())
| 4,755 | 32.258741 | 98 | py |
ocp | ocp-main/ocpmodels/models/__init__.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .base import BaseModel
from .cgcnn import CGCNN
from .dimenet import DimeNetWrap as DimeNet
from .dimenet_plus_plus import DimeNetPlusPlusWrap as DimeNetPlusPlus
from .forcenet import ForceNet
from .gemnet.gemnet import GemNetT
from .gemnet_gp.gemnet import GraphParallelGemNetT as GraphParallelGemNetT
from .gemnet_oc.gemnet_oc import GemNetOC
from .painn.painn import PaiNN
from .schnet import SchNetWrap as SchNet
from .scn.scn import SphericalChannelNetwork
from .spinconv import spinconv
| 676 | 36.611111 | 74 | py |
ocp | ocp-main/ocpmodels/models/cgcnn.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import torch
import torch.nn as nn
from torch_geometric.nn import MessagePassing, global_mean_pool, radius_graph
from torch_geometric.nn.models.schnet import GaussianSmearing
from ocpmodels.common.registry import registry
from ocpmodels.common.utils import (
conditional_grad,
get_pbc_distances,
radius_graph_pbc,
)
from ocpmodels.datasets.embeddings import KHOT_EMBEDDINGS, QMOF_KHOT_EMBEDDINGS
from ocpmodels.models.base import BaseModel
@registry.register_model("cgcnn")
class CGCNN(BaseModel):
r"""Implementation of the Crystal Graph CNN model from the
`"Crystal Graph Convolutional Neural Networks for an Accurate
and Interpretable Prediction of Material Properties"
<https://arxiv.org/abs/1710.10324>`_ paper.
Args:
num_atoms (int): Number of atoms.
bond_feat_dim (int): Dimension of bond features.
num_targets (int): Number of targets to predict.
use_pbc (bool, optional): If set to :obj:`True`, account for periodic boundary conditions.
(default: :obj:`True`)
regress_forces (bool, optional): If set to :obj:`True`, predict forces by differentiating
energy with respect to positions.
(default: :obj:`True`)
atom_embedding_size (int, optional): Size of atom embeddings.
(default: :obj:`64`)
num_graph_conv_layers (int, optional): Number of graph convolutional layers.
(default: :obj:`6`)
fc_feat_size (int, optional): Size of fully connected layers.
(default: :obj:`128`)
num_fc_layers (int, optional): Number of fully connected layers.
(default: :obj:`4`)
otf_graph (bool, optional): If set to :obj:`True`, compute graph edges on the fly.
(default: :obj:`False`)
cutoff (float, optional): Cutoff distance for interatomic interactions.
(default: :obj:`10.0`)
num_gaussians (int, optional): Number of Gaussians used for smearing.
(default: :obj:`50.0`)
"""
def __init__(
self,
num_atoms: int,
bond_feat_dim: int,
num_targets: int,
use_pbc: bool = True,
regress_forces: bool = True,
atom_embedding_size: int = 64,
num_graph_conv_layers: int = 6,
fc_feat_size: int = 128,
num_fc_layers: int = 4,
otf_graph: bool = False,
cutoff: float = 6.0,
num_gaussians: int = 50,
embeddings: str = "khot",
) -> None:
super(CGCNN, self).__init__(num_atoms, bond_feat_dim, num_targets)
self.regress_forces = regress_forces
self.use_pbc = use_pbc
self.cutoff = cutoff
self.otf_graph = otf_graph
self.max_neighbors = 50
# Get CGCNN atom embeddings
if embeddings == "khot":
embeddings = KHOT_EMBEDDINGS
elif embeddings == "qmof":
embeddings = QMOF_KHOT_EMBEDDINGS
else:
raise ValueError(
'embedding mnust be either "khot" for original CGCNN K-hot elemental embeddings or "qmof" for QMOF K-hot elemental embeddings'
)
self.embedding = torch.zeros(100, len(embeddings[1]))
for i in range(100):
self.embedding[i] = torch.tensor(embeddings[i + 1])
self.embedding_fc = nn.Linear(len(embeddings[1]), atom_embedding_size)
self.convs = nn.ModuleList(
[
CGCNNConv(
node_dim=atom_embedding_size,
edge_dim=bond_feat_dim,
cutoff=cutoff,
)
for _ in range(num_graph_conv_layers)
]
)
self.conv_to_fc = nn.Sequential(
nn.Linear(atom_embedding_size, fc_feat_size), nn.Softplus()
)
if num_fc_layers > 1:
layers = []
for _ in range(num_fc_layers - 1):
layers.append(nn.Linear(fc_feat_size, fc_feat_size))
layers.append(nn.Softplus())
self.fcs = nn.Sequential(*layers)
self.fc_out = nn.Linear(fc_feat_size, self.num_targets)
self.cutoff = cutoff
self.distance_expansion = GaussianSmearing(0.0, cutoff, num_gaussians)
@conditional_grad(torch.enable_grad())
def _forward(self, data):
# Get node features
if self.embedding.device != data.atomic_numbers.device:
self.embedding = self.embedding.to(data.atomic_numbers.device)
data.x = self.embedding[data.atomic_numbers.long() - 1]
(
edge_index,
distances,
distance_vec,
cell_offsets,
_, # cell offset distances
neighbors,
) = self.generate_graph(data)
data.edge_index = edge_index
data.edge_attr = self.distance_expansion(distances)
# Forward pass through the network
mol_feats = self._convolve(data)
mol_feats = self.conv_to_fc(mol_feats)
if hasattr(self, "fcs"):
mol_feats = self.fcs(mol_feats)
energy = self.fc_out(mol_feats)
return energy
def forward(self, data):
if self.regress_forces:
data.pos.requires_grad_(True)
energy = self._forward(data)
if self.regress_forces:
forces = -1 * (
torch.autograd.grad(
energy,
data.pos,
grad_outputs=torch.ones_like(energy),
create_graph=True,
)[0]
)
return energy, forces
else:
return energy
def _convolve(self, data):
"""
Returns the output of the convolution layers before they are passed
into the dense layers.
"""
node_feats = self.embedding_fc(data.x)
for f in self.convs:
node_feats = f(node_feats, data.edge_index, data.edge_attr)
mol_feats = global_mean_pool(node_feats, data.batch)
return mol_feats
class CGCNNConv(MessagePassing):
"""Implements the message passing layer from
`"Crystal Graph Convolutional Neural Networks for an
Accurate and Interpretable Prediction of Material Properties"
<https://journals.aps.org/prl/abstract/10.1103/PhysRevLett.120.145301>`.
"""
def __init__(
self, node_dim, edge_dim, cutoff: float = 6.0, **kwargs
) -> None:
super(CGCNNConv, self).__init__(aggr="add")
self.node_feat_size = node_dim
self.edge_feat_size = edge_dim
self.cutoff = cutoff
self.lin1 = nn.Linear(
2 * self.node_feat_size + self.edge_feat_size,
2 * self.node_feat_size,
)
self.bn1 = nn.BatchNorm1d(2 * self.node_feat_size)
self.ln1 = nn.LayerNorm(self.node_feat_size)
self.reset_parameters()
def reset_parameters(self) -> None:
torch.nn.init.xavier_uniform_(self.lin1.weight)
self.lin1.bias.data.fill_(0)
self.bn1.reset_parameters()
self.ln1.reset_parameters()
def forward(self, x, edge_index, edge_attr):
"""
Arguments:
x has shape [num_nodes, node_feat_size]
edge_index has shape [2, num_edges]
edge_attr is [num_edges, edge_feat_size]
"""
out = self.propagate(
edge_index, x=x, edge_attr=edge_attr, size=(x.size(0), x.size(0))
)
out = nn.Softplus()(self.ln1(out) + x)
return out
def message(self, x_i, x_j, edge_attr):
"""
Arguments:
x_i has shape [num_edges, node_feat_size]
x_j has shape [num_edges, node_feat_size]
edge_attr has shape [num_edges, edge_feat_size]
Returns:
tensor of shape [num_edges, node_feat_size]
"""
z = self.lin1(torch.cat([x_i, x_j, edge_attr], dim=1))
z = self.bn1(z)
z1, z2 = z.chunk(2, dim=1)
z1 = nn.Sigmoid()(z1)
z2 = nn.Softplus()(z2)
return z1 * z2
| 8,190 | 33.855319 | 142 | py |
ocp | ocp-main/ocpmodels/models/gemnet/initializers.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import torch
def _standardize(kernel):
"""
Makes sure that N*Var(W) = 1 and E[W] = 0
"""
eps = 1e-6
if len(kernel.shape) == 3:
axis = [0, 1] # last dimension is output dimension
else:
axis = 1
var, mean = torch.var_mean(kernel, dim=axis, unbiased=True, keepdim=True)
kernel = (kernel - mean) / (var + eps) ** 0.5
return kernel
def he_orthogonal_init(tensor):
"""
Generate a weight matrix with variance according to He (Kaiming) initialization.
Based on a random (semi-)orthogonal matrix neural networks
are expected to learn better when features are decorrelated
(stated by eg. "Reducing overfitting in deep networks by decorrelating representations",
"Dropout: a simple way to prevent neural networks from overfitting",
"Exact solutions to the nonlinear dynamics of learning in deep linear neural networks")
"""
tensor = torch.nn.init.orthogonal_(tensor)
if len(tensor.shape) == 3:
fan_in = tensor.shape[:-1].numel()
else:
fan_in = tensor.shape[1]
with torch.no_grad():
tensor.data = _standardize(tensor.data)
tensor.data *= (1 / fan_in) ** 0.5
return tensor
| 1,385 | 27.875 | 92 | py |
ocp | ocp-main/ocpmodels/models/gemnet/gemnet.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
from typing import Optional
import numpy as np
import torch
from torch_geometric.nn import radius_graph
from torch_scatter import scatter
from torch_sparse import SparseTensor
from ocpmodels.common.registry import registry
from ocpmodels.common.utils import (
compute_neighbors,
conditional_grad,
get_pbc_distances,
radius_graph_pbc,
)
from ocpmodels.models.base import BaseModel
from ocpmodels.modules.scaling.compat import load_scales_compat
from .layers.atom_update_block import OutputBlock
from .layers.base_layers import Dense
from .layers.efficient import EfficientInteractionDownProjection
from .layers.embedding_block import AtomEmbedding, EdgeEmbedding
from .layers.interaction_block import InteractionBlockTripletsOnly
from .layers.radial_basis import RadialBasis
from .layers.spherical_basis import CircularBasisLayer
from .utils import (
inner_product_normalized,
mask_neighbors,
ragged_range,
repeat_blocks,
)
@registry.register_model("gemnet_t")
class GemNetT(BaseModel):
"""
GemNet-T, triplets-only variant of GemNet
Parameters
----------
num_atoms (int): Unused argument
bond_feat_dim (int): Unused argument
num_targets: int
Number of prediction targets.
num_spherical: int
Controls maximum frequency.
num_radial: int
Controls maximum frequency.
num_blocks: int
Number of building blocks to be stacked.
emb_size_atom: int
Embedding size of the atoms.
emb_size_edge: int
Embedding size of the edges.
emb_size_trip: int
(Down-projected) Embedding size in the triplet message passing block.
emb_size_rbf: int
Embedding size of the radial basis transformation.
emb_size_cbf: int
Embedding size of the circular basis transformation (one angle).
emb_size_bil_trip: int
Embedding size of the edge embeddings in the triplet-based message passing block after the bilinear layer.
num_before_skip: int
Number of residual blocks before the first skip connection.
num_after_skip: int
Number of residual blocks after the first skip connection.
num_concat: int
Number of residual blocks after the concatenation.
num_atom: int
Number of residual blocks in the atom embedding blocks.
regress_forces: bool
Whether to predict forces. Default: True
direct_forces: bool
If True predict forces based on aggregation of interatomic directions.
If False predict forces based on negative gradient of energy potential.
cutoff: float
Embedding cutoff for interactomic directions in Angstrom.
rbf: dict
Name and hyperparameters of the radial basis function.
envelope: dict
Name and hyperparameters of the envelope function.
cbf: dict
Name and hyperparameters of the cosine basis function.
extensive: bool
Whether the output should be extensive (proportional to the number of atoms)
output_init: str
Initialization method for the final dense layer.
activation: str
Name of the activation function.
scale_file: str
Path to the json file containing the scaling factors.
"""
def __init__(
self,
num_atoms: Optional[int],
bond_feat_dim: int,
num_targets: int,
num_spherical: int,
num_radial: int,
num_blocks: int,
emb_size_atom: int,
emb_size_edge: int,
emb_size_trip: int,
emb_size_rbf: int,
emb_size_cbf: int,
emb_size_bil_trip: int,
num_before_skip: int,
num_after_skip: int,
num_concat: int,
num_atom: int,
regress_forces: bool = True,
direct_forces: bool = False,
cutoff: float = 6.0,
max_neighbors: int = 50,
rbf: dict = {"name": "gaussian"},
envelope: dict = {"name": "polynomial", "exponent": 5},
cbf: dict = {"name": "spherical_harmonics"},
extensive: bool = True,
otf_graph: bool = False,
use_pbc: bool = True,
output_init: str = "HeOrthogonal",
activation: str = "swish",
num_elements: int = 83,
scale_file: Optional[str] = None,
):
super().__init__()
self.num_targets = num_targets
assert num_blocks > 0
self.num_blocks = num_blocks
self.extensive = extensive
self.cutoff = cutoff
assert self.cutoff <= 6 or otf_graph
self.max_neighbors = max_neighbors
assert self.max_neighbors == 50 or otf_graph
self.regress_forces = regress_forces
self.otf_graph = otf_graph
self.use_pbc = use_pbc
# GemNet variants
self.direct_forces = direct_forces
### ---------------------------------- Basis Functions ---------------------------------- ###
self.radial_basis = RadialBasis(
num_radial=num_radial,
cutoff=cutoff,
rbf=rbf,
envelope=envelope,
)
radial_basis_cbf3 = RadialBasis(
num_radial=num_radial,
cutoff=cutoff,
rbf=rbf,
envelope=envelope,
)
self.cbf_basis3 = CircularBasisLayer(
num_spherical,
radial_basis=radial_basis_cbf3,
cbf=cbf,
efficient=True,
)
### ------------------------------------------------------------------------------------- ###
### ------------------------------- Share Down Projections ------------------------------ ###
# Share down projection across all interaction blocks
self.mlp_rbf3 = Dense(
num_radial,
emb_size_rbf,
activation=None,
bias=False,
)
self.mlp_cbf3 = EfficientInteractionDownProjection(
num_spherical, num_radial, emb_size_cbf
)
# Share the dense Layer of the atom embedding block accross the interaction blocks
self.mlp_rbf_h = Dense(
num_radial,
emb_size_rbf,
activation=None,
bias=False,
)
self.mlp_rbf_out = Dense(
num_radial,
emb_size_rbf,
activation=None,
bias=False,
)
### ------------------------------------------------------------------------------------- ###
# Embedding block
self.atom_emb = AtomEmbedding(emb_size_atom, num_elements)
self.edge_emb = EdgeEmbedding(
emb_size_atom, num_radial, emb_size_edge, activation=activation
)
out_blocks = []
int_blocks = []
# Interaction Blocks
interaction_block = InteractionBlockTripletsOnly # GemNet-(d)T
for i in range(num_blocks):
int_blocks.append(
interaction_block(
emb_size_atom=emb_size_atom,
emb_size_edge=emb_size_edge,
emb_size_trip=emb_size_trip,
emb_size_rbf=emb_size_rbf,
emb_size_cbf=emb_size_cbf,
emb_size_bil_trip=emb_size_bil_trip,
num_before_skip=num_before_skip,
num_after_skip=num_after_skip,
num_concat=num_concat,
num_atom=num_atom,
activation=activation,
name=f"IntBlock_{i+1}",
)
)
for i in range(num_blocks + 1):
out_blocks.append(
OutputBlock(
emb_size_atom=emb_size_atom,
emb_size_edge=emb_size_edge,
emb_size_rbf=emb_size_rbf,
nHidden=num_atom,
num_targets=num_targets,
activation=activation,
output_init=output_init,
direct_forces=direct_forces,
name=f"OutBlock_{i}",
)
)
self.out_blocks = torch.nn.ModuleList(out_blocks)
self.int_blocks = torch.nn.ModuleList(int_blocks)
self.shared_parameters = [
(self.mlp_rbf3.linear.weight, self.num_blocks),
(self.mlp_cbf3.weight, self.num_blocks),
(self.mlp_rbf_h.linear.weight, self.num_blocks),
(self.mlp_rbf_out.linear.weight, self.num_blocks + 1),
]
load_scales_compat(self, scale_file)
def get_triplets(self, edge_index, num_atoms):
"""
Get all b->a for each edge c->a.
It is possible that b=c, as long as the edges are distinct.
Returns
-------
id3_ba: torch.Tensor, shape (num_triplets,)
Indices of input edge b->a of each triplet b->a<-c
id3_ca: torch.Tensor, shape (num_triplets,)
Indices of output edge c->a of each triplet b->a<-c
id3_ragged_idx: torch.Tensor, shape (num_triplets,)
Indices enumerating the copies of id3_ca for creating a padded matrix
"""
idx_s, idx_t = edge_index # c->a (source=c, target=a)
value = torch.arange(
idx_s.size(0), device=idx_s.device, dtype=idx_s.dtype
)
# Possibly contains multiple copies of the same edge (for periodic interactions)
adj = SparseTensor(
row=idx_t,
col=idx_s,
value=value,
sparse_sizes=(num_atoms, num_atoms),
)
adj_edges = adj[idx_t]
# Edge indices (b->a, c->a) for triplets.
id3_ba = adj_edges.storage.value()
id3_ca = adj_edges.storage.row()
# Remove self-loop triplets
# Compare edge indices, not atom indices to correctly handle periodic interactions
mask = id3_ba != id3_ca
id3_ba = id3_ba[mask]
id3_ca = id3_ca[mask]
# Get indices to reshape the neighbor indices b->a into a dense matrix.
# id3_ca has to be sorted for this to work.
num_triplets = torch.bincount(id3_ca, minlength=idx_s.size(0))
id3_ragged_idx = ragged_range(num_triplets)
return id3_ba, id3_ca, id3_ragged_idx
def select_symmetric_edges(self, tensor, mask, reorder_idx, inverse_neg):
# Mask out counter-edges
tensor_directed = tensor[mask]
# Concatenate counter-edges after normal edges
sign = 1 - 2 * inverse_neg
tensor_cat = torch.cat([tensor_directed, sign * tensor_directed])
# Reorder everything so the edges of every image are consecutive
tensor_ordered = tensor_cat[reorder_idx]
return tensor_ordered
def reorder_symmetric_edges(
self, edge_index, cell_offsets, neighbors, edge_dist, edge_vector
):
"""
Reorder edges to make finding counter-directional edges easier.
Some edges are only present in one direction in the data,
since every atom has a maximum number of neighbors. Since we only use i->j
edges here, we lose some j->i edges and add others by
making it symmetric.
We could fix this by merging edge_index with its counter-edges,
including the cell_offsets, and then running torch.unique.
But this does not seem worth it.
"""
# Generate mask
mask_sep_atoms = edge_index[0] < edge_index[1]
# Distinguish edges between the same (periodic) atom by ordering the cells
cell_earlier = (
(cell_offsets[:, 0] < 0)
| ((cell_offsets[:, 0] == 0) & (cell_offsets[:, 1] < 0))
| (
(cell_offsets[:, 0] == 0)
& (cell_offsets[:, 1] == 0)
& (cell_offsets[:, 2] < 0)
)
)
mask_same_atoms = edge_index[0] == edge_index[1]
mask_same_atoms &= cell_earlier
mask = mask_sep_atoms | mask_same_atoms
# Mask out counter-edges
edge_index_new = edge_index[mask[None, :].expand(2, -1)].view(2, -1)
# Concatenate counter-edges after normal edges
edge_index_cat = torch.cat(
[
edge_index_new,
torch.stack([edge_index_new[1], edge_index_new[0]], dim=0),
],
dim=1,
)
# Count remaining edges per image
batch_edge = torch.repeat_interleave(
torch.arange(neighbors.size(0), device=edge_index.device),
neighbors,
)
batch_edge = batch_edge[mask]
neighbors_new = 2 * torch.bincount(
batch_edge, minlength=neighbors.size(0)
)
# Create indexing array
edge_reorder_idx = repeat_blocks(
neighbors_new // 2,
repeats=2,
continuous_indexing=True,
repeat_inc=edge_index_new.size(1),
)
# Reorder everything so the edges of every image are consecutive
edge_index_new = edge_index_cat[:, edge_reorder_idx]
cell_offsets_new = self.select_symmetric_edges(
cell_offsets, mask, edge_reorder_idx, True
)
edge_dist_new = self.select_symmetric_edges(
edge_dist, mask, edge_reorder_idx, False
)
edge_vector_new = self.select_symmetric_edges(
edge_vector, mask, edge_reorder_idx, True
)
return (
edge_index_new,
cell_offsets_new,
neighbors_new,
edge_dist_new,
edge_vector_new,
)
def select_edges(
self,
data,
edge_index,
cell_offsets,
neighbors,
edge_dist,
edge_vector,
cutoff=None,
):
if cutoff is not None:
edge_mask = edge_dist <= cutoff
edge_index = edge_index[:, edge_mask]
cell_offsets = cell_offsets[edge_mask]
neighbors = mask_neighbors(neighbors, edge_mask)
edge_dist = edge_dist[edge_mask]
edge_vector = edge_vector[edge_mask]
empty_image = neighbors == 0
if torch.any(empty_image):
raise ValueError(
f"An image has no neighbors: id={data.id[empty_image]}, "
f"sid={data.sid[empty_image]}, fid={data.fid[empty_image]}"
)
return edge_index, cell_offsets, neighbors, edge_dist, edge_vector
def generate_interaction_graph(self, data):
num_atoms = data.atomic_numbers.size(0)
(
edge_index,
D_st,
distance_vec,
cell_offsets,
_, # cell offset distances
neighbors,
) = self.generate_graph(data)
# These vectors actually point in the opposite direction.
# But we want to use col as idx_t for efficient aggregation.
V_st = -distance_vec / D_st[:, None]
# Mask interaction edges if required
if self.otf_graph or np.isclose(self.cutoff, 6):
select_cutoff = None
else:
select_cutoff = self.cutoff
(edge_index, cell_offsets, neighbors, D_st, V_st,) = self.select_edges(
data=data,
edge_index=edge_index,
cell_offsets=cell_offsets,
neighbors=neighbors,
edge_dist=D_st,
edge_vector=V_st,
cutoff=select_cutoff,
)
(
edge_index,
cell_offsets,
neighbors,
D_st,
V_st,
) = self.reorder_symmetric_edges(
edge_index, cell_offsets, neighbors, D_st, V_st
)
# Indices for swapping c->a and a->c (for symmetric MP)
block_sizes = neighbors // 2
id_swap = repeat_blocks(
block_sizes,
repeats=2,
continuous_indexing=False,
start_idx=block_sizes[0],
block_inc=block_sizes[:-1] + block_sizes[1:],
repeat_inc=-block_sizes,
)
id3_ba, id3_ca, id3_ragged_idx = self.get_triplets(
edge_index, num_atoms=num_atoms
)
return (
edge_index,
neighbors,
D_st,
V_st,
id_swap,
id3_ba,
id3_ca,
id3_ragged_idx,
)
@conditional_grad(torch.enable_grad())
def forward(self, data):
pos = data.pos
batch = data.batch
atomic_numbers = data.atomic_numbers.long()
if self.regress_forces and not self.direct_forces:
pos.requires_grad_(True)
(
edge_index,
neighbors,
D_st,
V_st,
id_swap,
id3_ba,
id3_ca,
id3_ragged_idx,
) = self.generate_interaction_graph(data)
idx_s, idx_t = edge_index
# Calculate triplet angles
cosφ_cab = inner_product_normalized(V_st[id3_ca], V_st[id3_ba])
rad_cbf3, cbf3 = self.cbf_basis3(D_st, cosφ_cab, id3_ca)
rbf = self.radial_basis(D_st)
# Embedding block
h = self.atom_emb(atomic_numbers)
# (nAtoms, emb_size_atom)
m = self.edge_emb(h, rbf, idx_s, idx_t) # (nEdges, emb_size_edge)
rbf3 = self.mlp_rbf3(rbf)
cbf3 = self.mlp_cbf3(rad_cbf3, cbf3, id3_ca, id3_ragged_idx)
rbf_h = self.mlp_rbf_h(rbf)
rbf_out = self.mlp_rbf_out(rbf)
E_t, F_st = self.out_blocks[0](h, m, rbf_out, idx_t)
# (nAtoms, num_targets), (nEdges, num_targets)
for i in range(self.num_blocks):
# Interaction block
h, m = self.int_blocks[i](
h=h,
m=m,
rbf3=rbf3,
cbf3=cbf3,
id3_ragged_idx=id3_ragged_idx,
id_swap=id_swap,
id3_ba=id3_ba,
id3_ca=id3_ca,
rbf_h=rbf_h,
idx_s=idx_s,
idx_t=idx_t,
) # (nAtoms, emb_size_atom), (nEdges, emb_size_edge)
E, F = self.out_blocks[i + 1](h, m, rbf_out, idx_t)
# (nAtoms, num_targets), (nEdges, num_targets)
F_st += F
E_t += E
nMolecules = torch.max(batch) + 1
if self.extensive:
E_t = scatter(
E_t, batch, dim=0, dim_size=nMolecules, reduce="add"
) # (nMolecules, num_targets)
else:
E_t = scatter(
E_t, batch, dim=0, dim_size=nMolecules, reduce="mean"
) # (nMolecules, num_targets)
if self.regress_forces:
if self.direct_forces:
# map forces in edge directions
F_st_vec = F_st[:, :, None] * V_st[:, None, :]
# (nEdges, num_targets, 3)
F_t = scatter(
F_st_vec,
idx_t,
dim=0,
dim_size=data.atomic_numbers.size(0),
reduce="add",
) # (nAtoms, num_targets, 3)
F_t = F_t.squeeze(1) # (nAtoms, 3)
else:
if self.num_targets > 1:
forces = []
for i in range(self.num_targets):
# maybe this can be solved differently
forces += [
-torch.autograd.grad(
E_t[:, i].sum(), pos, create_graph=True
)[0]
]
F_t = torch.stack(forces, dim=1)
# (nAtoms, num_targets, 3)
else:
F_t = -torch.autograd.grad(
E_t.sum(), pos, create_graph=True
)[0]
# (nAtoms, 3)
return E_t, F_t # (nMolecules, num_targets), (nAtoms, 3)
else:
return E_t
@property
def num_params(self):
return sum(p.numel() for p in self.parameters())
| 20,301 | 32.724252 | 118 | py |
ocp | ocp-main/ocpmodels/models/gemnet/utils.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import json
import torch
from torch_scatter import segment_csr
def read_json(path: str):
""""""
if not path.endswith(".json"):
raise UserWarning(f"Path {path} is not a json-path.")
with open(path, "r") as f:
content = json.load(f)
return content
def update_json(path: str, data) -> None:
""""""
if not path.endswith(".json"):
raise UserWarning(f"Path {path} is not a json-path.")
content = read_json(path)
content.update(data)
write_json(path, content)
def write_json(path: str, data) -> None:
""""""
if not path.endswith(".json"):
raise UserWarning(f"Path {path} is not a json-path.")
with open(path, "w", encoding="utf-8") as f:
json.dump(data, f, ensure_ascii=False, indent=4)
def read_value_json(path: str, key):
""""""
content = read_json(path)
if key in content.keys():
return content[key]
else:
return None
def ragged_range(sizes):
"""Multiple concatenated ranges.
Examples
--------
sizes = [1 4 2 3]
Return: [0 0 1 2 3 0 1 0 1 2]
"""
assert sizes.dim() == 1
if sizes.sum() == 0:
return sizes.new_empty(0)
# Remove 0 sizes
sizes_nonzero = sizes > 0
if not torch.all(sizes_nonzero):
sizes = torch.masked_select(sizes, sizes_nonzero)
# Initialize indexing array with ones as we need to setup incremental indexing
# within each group when cumulatively summed at the final stage.
id_steps = torch.ones(sizes.sum(), dtype=torch.long, device=sizes.device)
id_steps[0] = 0
insert_index = sizes[:-1].cumsum(0)
insert_val = (1 - sizes)[:-1]
# Assign index-offsetting values
id_steps[insert_index] = insert_val
# Finally index into input array for the group repeated o/p
res = id_steps.cumsum(0)
return res
def repeat_blocks(
sizes,
repeats,
continuous_indexing: bool = True,
start_idx: int = 0,
block_inc: int = 0,
repeat_inc: int = 0,
) -> torch.Tensor:
"""Repeat blocks of indices.
Adapted from https://stackoverflow.com/questions/51154989/numpy-vectorized-function-to-repeat-blocks-of-consecutive-elements
continuous_indexing: Whether to keep increasing the index after each block
start_idx: Starting index
block_inc: Number to increment by after each block,
either global or per block. Shape: len(sizes) - 1
repeat_inc: Number to increment by after each repetition,
either global or per block
Examples
--------
sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = False
Return: [0 0 0 0 1 2 0 1 2 0 1 0 1 0 1]
sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True
Return: [0 0 0 1 2 3 1 2 3 4 5 4 5 4 5]
sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True ;
repeat_inc = 4
Return: [0 4 8 1 2 3 5 6 7 4 5 8 9 12 13]
sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True ;
start_idx = 5
Return: [5 5 5 6 7 8 6 7 8 9 10 9 10 9 10]
sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True ;
block_inc = 1
Return: [0 0 0 2 3 4 2 3 4 6 7 6 7 6 7]
sizes = [0,3,2] ; repeats = [3,2,3] ; continuous_indexing = True
Return: [0 1 2 0 1 2 3 4 3 4 3 4]
sizes = [2,3,2] ; repeats = [2,0,2] ; continuous_indexing = True
Return: [0 1 0 1 5 6 5 6]
"""
assert sizes.dim() == 1
assert all(sizes >= 0)
# Remove 0 sizes
sizes_nonzero = sizes > 0
if not torch.all(sizes_nonzero):
assert block_inc == 0 # Implementing this is not worth the effort
sizes = torch.masked_select(sizes, sizes_nonzero)
if isinstance(repeats, torch.Tensor):
repeats = torch.masked_select(repeats, sizes_nonzero)
if isinstance(repeat_inc, torch.Tensor):
repeat_inc = torch.masked_select(repeat_inc, sizes_nonzero)
if isinstance(repeats, torch.Tensor):
assert all(repeats >= 0)
insert_dummy = repeats[0] == 0
if insert_dummy:
one = sizes.new_ones(1)
zero = sizes.new_zeros(1)
sizes = torch.cat((one, sizes))
repeats = torch.cat((one, repeats))
if isinstance(block_inc, torch.Tensor):
block_inc = torch.cat((zero, block_inc))
if isinstance(repeat_inc, torch.Tensor):
repeat_inc = torch.cat((zero, repeat_inc))
else:
assert repeats >= 0
insert_dummy = False
# Get repeats for each group using group lengths/sizes
r1 = torch.repeat_interleave(
torch.arange(len(sizes), device=sizes.device), repeats
)
# Get total size of output array, as needed to initialize output indexing array
N = (sizes * repeats).sum()
# Initialize indexing array with ones as we need to setup incremental indexing
# within each group when cumulatively summed at the final stage.
# Two steps here:
# 1. Within each group, we have multiple sequences, so setup the offsetting
# at each sequence lengths by the seq. lengths preceding those.
id_ar = torch.ones(N, dtype=torch.long, device=sizes.device)
id_ar[0] = 0
insert_index = sizes[r1[:-1]].cumsum(0)
insert_val = (1 - sizes)[r1[:-1]]
if isinstance(repeats, torch.Tensor) and torch.any(repeats == 0):
diffs = r1[1:] - r1[:-1]
indptr = torch.cat((sizes.new_zeros(1), diffs.cumsum(0)))
if continuous_indexing:
# If a group was skipped (repeats=0) we need to add its size
insert_val += segment_csr(sizes[: r1[-1]], indptr, reduce="sum")
# Add block increments
if isinstance(block_inc, torch.Tensor):
insert_val += segment_csr(
block_inc[: r1[-1]], indptr, reduce="sum"
)
else:
insert_val += block_inc * (indptr[1:] - indptr[:-1])
if insert_dummy:
insert_val[0] -= block_inc
else:
idx = r1[1:] != r1[:-1]
if continuous_indexing:
# 2. For each group, make sure the indexing starts from the next group's
# first element. So, simply assign 1s there.
insert_val[idx] = 1
# Add block increments
insert_val[idx] += block_inc
# Add repeat_inc within each group
if isinstance(repeat_inc, torch.Tensor):
insert_val += repeat_inc[r1[:-1]]
if isinstance(repeats, torch.Tensor):
repeat_inc_inner = repeat_inc[repeats > 0][:-1]
else:
repeat_inc_inner = repeat_inc[:-1]
else:
insert_val += repeat_inc
repeat_inc_inner = repeat_inc
# Subtract the increments between groups
if isinstance(repeats, torch.Tensor):
repeats_inner = repeats[repeats > 0][:-1]
else:
repeats_inner = repeats
insert_val[r1[1:] != r1[:-1]] -= repeat_inc_inner * repeats_inner
# Assign index-offsetting values
id_ar[insert_index] = insert_val
if insert_dummy:
id_ar = id_ar[1:]
if continuous_indexing:
id_ar[0] -= 1
# Set start index now, in case of insertion due to leading repeats=0
id_ar[0] += start_idx
# Finally index into input array for the group repeated o/p
res = id_ar.cumsum(0)
return res
def calculate_interatomic_vectors(R, id_s, id_t, offsets_st):
"""
Calculate the vectors connecting the given atom pairs,
considering offsets from periodic boundary conditions (PBC).
Parameters
----------
R: Tensor, shape = (nAtoms, 3)
Atom positions.
id_s: Tensor, shape = (nEdges,)
Indices of the source atom of the edges.
id_t: Tensor, shape = (nEdges,)
Indices of the target atom of the edges.
offsets_st: Tensor, shape = (nEdges,)
PBC offsets of the edges.
Subtract this from the correct direction.
Returns
-------
(D_st, V_st): tuple
D_st: Tensor, shape = (nEdges,)
Distance from atom t to s.
V_st: Tensor, shape = (nEdges,)
Unit direction from atom t to s.
"""
Rs = R[id_s]
Rt = R[id_t]
# ReLU prevents negative numbers in sqrt
if offsets_st is None:
V_st = Rt - Rs # s -> t
else:
V_st = Rt - Rs + offsets_st # s -> t
D_st = torch.sqrt(torch.sum(V_st**2, dim=1))
V_st = V_st / D_st[..., None]
return D_st, V_st
def inner_product_normalized(x, y) -> torch.Tensor:
"""
Calculate the inner product between the given normalized vectors,
giving a result between -1 and 1.
"""
return torch.sum(x * y, dim=-1).clamp(min=-1, max=1)
def mask_neighbors(neighbors, edge_mask):
neighbors_old_indptr = torch.cat([neighbors.new_zeros(1), neighbors])
neighbors_old_indptr = torch.cumsum(neighbors_old_indptr, dim=0)
neighbors = segment_csr(edge_mask.long(), neighbors_old_indptr)
return neighbors
| 9,228 | 31.960714 | 128 | py |
ocp | ocp-main/ocpmodels/models/gemnet/__init__.py | 0 | 0 | 0 | py |
|
ocp | ocp-main/ocpmodels/models/gemnet/layers/base_layers.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import math
import torch
from ..initializers import he_orthogonal_init
class Dense(torch.nn.Module):
"""
Combines dense layer with scaling for swish activation.
Parameters
----------
units: int
Output embedding size.
activation: str
Name of the activation function to use.
bias: bool
True if use bias.
"""
def __init__(
self, in_features, out_features, bias: bool = False, activation=None
) -> None:
super().__init__()
self.linear = torch.nn.Linear(in_features, out_features, bias=bias)
self.reset_parameters()
if isinstance(activation, str):
activation = activation.lower()
if activation in ["swish", "silu"]:
self._activation = ScaledSiLU()
elif activation == "siqu":
self._activation = SiQU()
elif activation is None:
self._activation = torch.nn.Identity()
else:
raise NotImplementedError(
"Activation function not implemented for GemNet (yet)."
)
def reset_parameters(self, initializer=he_orthogonal_init) -> None:
initializer(self.linear.weight)
if self.linear.bias is not None:
self.linear.bias.data.fill_(0)
def forward(self, x):
x = self.linear(x)
x = self._activation(x)
return x
class ScaledSiLU(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.scale_factor = 1 / 0.6
self._activation = torch.nn.SiLU()
def forward(self, x):
return self._activation(x) * self.scale_factor
class SiQU(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self._activation = torch.nn.SiLU()
def forward(self, x):
return x * self._activation(x)
class ResidualLayer(torch.nn.Module):
"""
Residual block with output scaled by 1/sqrt(2).
Parameters
----------
units: int
Output embedding size.
nLayers: int
Number of dense layers.
layer_kwargs: str
Keyword arguments for initializing the layers.
"""
def __init__(
self, units: int, nLayers: int = 2, layer=Dense, **layer_kwargs
) -> None:
super().__init__()
self.dense_mlp = torch.nn.Sequential(
*[
layer(
in_features=units,
out_features=units,
bias=False,
**layer_kwargs
)
for _ in range(nLayers)
]
)
self.inv_sqrt_2 = 1 / math.sqrt(2)
def forward(self, input):
x = self.dense_mlp(input)
x = input + x
x = x * self.inv_sqrt_2
return x
| 3,001 | 24.87931 | 76 | py |
ocp | ocp-main/ocpmodels/models/gemnet/layers/atom_update_block.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import torch
from torch_scatter import scatter
from ocpmodels.modules.scaling import ScaleFactor
from ..initializers import he_orthogonal_init
from .base_layers import Dense, ResidualLayer
class AtomUpdateBlock(torch.nn.Module):
"""
Aggregate the message embeddings of the atoms
Parameters
----------
emb_size_atom: int
Embedding size of the atoms.
emb_size_atom: int
Embedding size of the edges.
nHidden: int
Number of residual blocks.
activation: callable/str
Name of the activation function to use in the dense layers.
"""
def __init__(
self,
emb_size_atom: int,
emb_size_edge: int,
emb_size_rbf: int,
nHidden: int,
activation=None,
name: str = "atom_update",
) -> None:
super().__init__()
self.name = name
self.dense_rbf = Dense(
emb_size_rbf, emb_size_edge, activation=None, bias=False
)
self.scale_sum = ScaleFactor(name + "_sum")
self.layers = self.get_mlp(
emb_size_edge, emb_size_atom, nHidden, activation
)
def get_mlp(self, units_in, units, nHidden, activation):
dense1 = Dense(units_in, units, activation=activation, bias=False)
mlp = [dense1]
res = [
ResidualLayer(units, nLayers=2, activation=activation)
for i in range(nHidden)
]
mlp += res
return torch.nn.ModuleList(mlp)
def forward(self, h, m, rbf, id_j):
"""
Returns
-------
h: torch.Tensor, shape=(nAtoms, emb_size_atom)
Atom embedding.
"""
nAtoms = h.shape[0]
mlp_rbf = self.dense_rbf(rbf) # (nEdges, emb_size_edge)
x = m * mlp_rbf
x2 = scatter(x, id_j, dim=0, dim_size=nAtoms, reduce="sum")
# (nAtoms, emb_size_edge)
x = self.scale_sum(x2, ref=m)
for layer in self.layers:
x = layer(x) # (nAtoms, emb_size_atom)
return x
class OutputBlock(AtomUpdateBlock):
"""
Combines the atom update block and subsequent final dense layer.
Parameters
----------
emb_size_atom: int
Embedding size of the atoms.
emb_size_atom: int
Embedding size of the edges.
nHidden: int
Number of residual blocks.
num_targets: int
Number of targets.
activation: str
Name of the activation function to use in the dense layers except for the final dense layer.
direct_forces: bool
If true directly predict forces without taking the gradient of the energy potential.
output_init: int
Kernel initializer of the final dense layer.
"""
def __init__(
self,
emb_size_atom: int,
emb_size_edge: int,
emb_size_rbf: int,
nHidden: int,
num_targets: int,
activation=None,
direct_forces: bool = True,
output_init: str = "HeOrthogonal",
name: str = "output",
**kwargs,
) -> None:
super().__init__(
name=name,
emb_size_atom=emb_size_atom,
emb_size_edge=emb_size_edge,
emb_size_rbf=emb_size_rbf,
nHidden=nHidden,
activation=activation,
**kwargs,
)
assert isinstance(output_init, str)
self.output_init = output_init.lower()
self.direct_forces = direct_forces
self.seq_energy = self.layers # inherited from parent class
self.out_energy = Dense(
emb_size_atom, num_targets, bias=False, activation=None
)
if self.direct_forces:
self.scale_rbf_F = ScaleFactor(name + "_had")
self.seq_forces = self.get_mlp(
emb_size_edge, emb_size_edge, nHidden, activation
)
self.out_forces = Dense(
emb_size_edge, num_targets, bias=False, activation=None
)
self.dense_rbf_F = Dense(
emb_size_rbf, emb_size_edge, activation=None, bias=False
)
self.reset_parameters()
def reset_parameters(self) -> None:
if self.output_init == "heorthogonal":
self.out_energy.reset_parameters(he_orthogonal_init)
if self.direct_forces:
self.out_forces.reset_parameters(he_orthogonal_init)
elif self.output_init == "zeros":
self.out_energy.reset_parameters(torch.nn.init.zeros_)
if self.direct_forces:
self.out_forces.reset_parameters(torch.nn.init.zeros_)
else:
raise UserWarning(f"Unknown output_init: {self.output_init}")
def forward(self, h, m, rbf, id_j):
"""
Returns
-------
(E, F): tuple
- E: torch.Tensor, shape=(nAtoms, num_targets)
- F: torch.Tensor, shape=(nEdges, num_targets)
Energy and force prediction
"""
nAtoms = h.shape[0]
# -------------------------------------- Energy Prediction -------------------------------------- #
rbf_emb_E = self.dense_rbf(rbf) # (nEdges, emb_size_edge)
x = m * rbf_emb_E
x_E = scatter(x, id_j, dim=0, dim_size=nAtoms, reduce="sum")
# (nAtoms, emb_size_edge)
x_E = self.scale_sum(x_E, ref=m)
for layer in self.seq_energy:
x_E = layer(x_E) # (nAtoms, emb_size_atom)
x_E = self.out_energy(x_E) # (nAtoms, num_targets)
# --------------------------------------- Force Prediction -------------------------------------- #
if self.direct_forces:
x_F = m
for i, layer in enumerate(self.seq_forces):
x_F = layer(x_F) # (nEdges, emb_size_edge)
rbf_emb_F = self.dense_rbf_F(rbf) # (nEdges, emb_size_edge)
x_F_rbf = x_F * rbf_emb_F
x_F = self.scale_rbf_F(x_F_rbf, ref=x_F)
x_F = self.out_forces(x_F) # (nEdges, num_targets)
else:
x_F = 0
# ----------------------------------------------------------------------------------------------- #
return x_E, x_F
| 6,443 | 30.281553 | 107 | py |
ocp | ocp-main/ocpmodels/models/gemnet/layers/embedding_block.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import numpy as np
import torch
from .base_layers import Dense
class AtomEmbedding(torch.nn.Module):
"""
Initial atom embeddings based on the atom type
Parameters
----------
emb_size: int
Atom embeddings size
"""
def __init__(self, emb_size, num_elements: int) -> None:
super().__init__()
self.emb_size = emb_size
self.embeddings = torch.nn.Embedding(num_elements, emb_size)
# init by uniform distribution
torch.nn.init.uniform_(
self.embeddings.weight, a=-np.sqrt(3), b=np.sqrt(3)
)
def forward(self, Z):
"""
Returns
-------
h: torch.Tensor, shape=(nAtoms, emb_size)
Atom embeddings.
"""
h = self.embeddings(Z - 1) # -1 because Z.min()=1 (==Hydrogen)
return h
class EdgeEmbedding(torch.nn.Module):
"""
Edge embedding based on the concatenation of atom embeddings and subsequent dense layer.
Parameters
----------
emb_size: int
Embedding size after the dense layer.
activation: str
Activation function used in the dense layer.
"""
def __init__(
self,
atom_features,
edge_features,
out_features,
activation=None,
) -> None:
super().__init__()
in_features = 2 * atom_features + edge_features
self.dense = Dense(
in_features, out_features, activation=activation, bias=False
)
def forward(
self,
h,
m_rbf,
idx_s,
idx_t,
):
"""
Arguments
---------
h
m_rbf: shape (nEdges, nFeatures)
in embedding block: m_rbf = rbf ; In interaction block: m_rbf = m_st
idx_s
idx_t
Returns
-------
m_st: torch.Tensor, shape=(nEdges, emb_size)
Edge embeddings.
"""
h_s = h[idx_s] # shape=(nEdges, emb_size)
h_t = h[idx_t] # shape=(nEdges, emb_size)
m_st = torch.cat(
[h_s, h_t, m_rbf], dim=-1
) # (nEdges, 2*emb_size+nFeatures)
m_st = self.dense(m_st) # (nEdges, emb_size)
return m_st
| 2,424 | 23.25 | 92 | py |
ocp | ocp-main/ocpmodels/models/gemnet/layers/radial_basis.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import math
from typing import Dict, Union
import numpy as np
import torch
from scipy.special import binom
from torch_geometric.nn.models.schnet import GaussianSmearing
class PolynomialEnvelope(torch.nn.Module):
"""
Polynomial envelope function that ensures a smooth cutoff.
Parameters
----------
exponent: int
Exponent of the envelope function.
"""
def __init__(self, exponent: int) -> None:
super().__init__()
assert exponent > 0
self.p = exponent
self.a = -(self.p + 1) * (self.p + 2) / 2
self.b = self.p * (self.p + 2)
self.c = -self.p * (self.p + 1) / 2
def forward(self, d_scaled: torch.Tensor) -> torch.Tensor:
env_val = (
1
+ self.a * d_scaled**self.p
+ self.b * d_scaled ** (self.p + 1)
+ self.c * d_scaled ** (self.p + 2)
)
return torch.where(d_scaled < 1, env_val, torch.zeros_like(d_scaled))
class ExponentialEnvelope(torch.nn.Module):
"""
Exponential envelope function that ensures a smooth cutoff,
as proposed in Unke, Chmiela, Gastegger, Schütt, Sauceda, Müller 2021.
SpookyNet: Learning Force Fields with Electronic Degrees of Freedom
and Nonlocal Effects
"""
def __init__(self) -> None:
super().__init__()
def forward(self, d_scaled: torch.Tensor) -> torch.Tensor:
env_val = torch.exp(
-(d_scaled**2) / ((1 - d_scaled) * (1 + d_scaled))
)
return torch.where(d_scaled < 1, env_val, torch.zeros_like(d_scaled))
class SphericalBesselBasis(torch.nn.Module):
"""
1D spherical Bessel basis
Parameters
----------
num_radial: int
Controls maximum frequency.
cutoff: float
Cutoff distance in Angstrom.
"""
def __init__(
self,
num_radial: int,
cutoff: float,
) -> None:
super().__init__()
self.norm_const = math.sqrt(2 / (cutoff**3))
# cutoff ** 3 to counteract dividing by d_scaled = d / cutoff
# Initialize frequencies at canonical positions
self.frequencies = torch.nn.Parameter(
data=torch.tensor(
np.pi * np.arange(1, num_radial + 1, dtype=np.float32)
),
requires_grad=True,
)
def forward(self, d_scaled: torch.Tensor) -> torch.Tensor:
return (
self.norm_const
/ d_scaled[:, None]
* torch.sin(self.frequencies * d_scaled[:, None])
) # (num_edges, num_radial)
class BernsteinBasis(torch.nn.Module):
"""
Bernstein polynomial basis,
as proposed in Unke, Chmiela, Gastegger, Schütt, Sauceda, Müller 2021.
SpookyNet: Learning Force Fields with Electronic Degrees of Freedom
and Nonlocal Effects
Parameters
----------
num_radial: int
Controls maximum frequency.
pregamma_initial: float
Initial value of exponential coefficient gamma.
Default: gamma = 0.5 * a_0**-1 = 0.94486,
inverse softplus -> pregamma = log e**gamma - 1 = 0.45264
"""
def __init__(
self,
num_radial: int,
pregamma_initial: float = 0.45264,
) -> None:
super().__init__()
prefactor = binom(num_radial - 1, np.arange(num_radial))
self.register_buffer(
"prefactor",
torch.tensor(prefactor, dtype=torch.float),
persistent=False,
)
self.pregamma = torch.nn.Parameter(
data=torch.tensor(pregamma_initial, dtype=torch.float),
requires_grad=True,
)
self.softplus = torch.nn.Softplus()
exp1 = torch.arange(num_radial)
self.register_buffer("exp1", exp1[None, :], persistent=False)
exp2 = num_radial - 1 - exp1
self.register_buffer("exp2", exp2[None, :], persistent=False)
def forward(self, d_scaled: torch.Tensor) -> torch.Tensor:
gamma = self.softplus(self.pregamma) # constrain to positive
exp_d = torch.exp(-gamma * d_scaled)[:, None]
return (
self.prefactor * (exp_d**self.exp1) * ((1 - exp_d) ** self.exp2)
)
class RadialBasis(torch.nn.Module):
"""
Parameters
----------
num_radial: int
Controls maximum frequency.
cutoff: float
Cutoff distance in Angstrom.
rbf: dict = {"name": "gaussian"}
Basis function and its hyperparameters.
envelope: dict = {"name": "polynomial", "exponent": 5}
Envelope function and its hyperparameters.
"""
def __init__(
self,
num_radial: int,
cutoff: float,
rbf: Dict[str, str] = {"name": "gaussian"},
envelope: Dict[str, Union[str, int]] = {
"name": "polynomial",
"exponent": 5,
},
) -> None:
super().__init__()
self.inv_cutoff = 1 / cutoff
env_name = envelope["name"].lower()
env_hparams = envelope.copy()
del env_hparams["name"]
self.envelope: Union[PolynomialEnvelope, ExponentialEnvelope]
if env_name == "polynomial":
self.envelope = PolynomialEnvelope(**env_hparams)
elif env_name == "exponential":
self.envelope = ExponentialEnvelope(**env_hparams)
else:
raise ValueError(f"Unknown envelope function '{env_name}'.")
rbf_name = rbf["name"].lower()
rbf_hparams = rbf.copy()
del rbf_hparams["name"]
# RBFs get distances scaled to be in [0, 1]
if rbf_name == "gaussian":
self.rbf = GaussianSmearing(
start=0, stop=1, num_gaussians=num_radial, **rbf_hparams
)
elif rbf_name == "spherical_bessel":
self.rbf = SphericalBesselBasis(
num_radial=num_radial, cutoff=cutoff, **rbf_hparams
)
elif rbf_name == "bernstein":
self.rbf = BernsteinBasis(num_radial=num_radial, **rbf_hparams)
else:
raise ValueError(f"Unknown radial basis function '{rbf_name}'.")
def forward(self, d):
d_scaled = d * self.inv_cutoff
env = self.envelope(d_scaled)
return env[:, None] * self.rbf(d_scaled) # (nEdges, num_radial)
| 6,434 | 29.353774 | 77 | py |
ocp | ocp-main/ocpmodels/models/gemnet/layers/basis_utils.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import math
import numpy as np
import sympy as sym
from scipy import special as sp
from scipy.optimize import brentq
def Jn(r, n):
"""
numerical spherical bessel functions of order n
"""
return sp.spherical_jn(n, r)
def Jn_zeros(n: int, k: int):
"""
Compute the first k zeros of the spherical bessel functions up to order n (excluded)
"""
zerosj = np.zeros((n, k), dtype="float32")
zerosj[0] = np.arange(1, k + 1) * np.pi
points = np.arange(1, k + n) * np.pi
racines = np.zeros(k + n - 1, dtype="float32")
for i in range(1, n):
for j in range(k + n - 1 - i):
foo = brentq(Jn, points[j], points[j + 1], (i,))
racines[j] = foo
points = racines
zerosj[i][:k] = racines[:k]
return zerosj
def spherical_bessel_formulas(n: int):
"""
Computes the sympy formulas for the spherical bessel functions up to order n (excluded)
"""
x = sym.symbols("x")
# j_i = (-x)^i * (1/x * d/dx)^î * sin(x)/x
j = [sym.sin(x) / x] # j_0
a = sym.sin(x) / x
for i in range(1, n):
b = sym.diff(a, x) / x
j += [sym.simplify(b * (-x) ** i)]
a = sym.simplify(b)
return j
def bessel_basis(n: int, k: int):
"""
Compute the sympy formulas for the normalized and rescaled spherical bessel functions up to
order n (excluded) and maximum frequency k (excluded).
Returns:
bess_basis: list
Bessel basis formulas taking in a single argument x.
Has length n where each element has length k. -> In total n*k many.
"""
zeros = Jn_zeros(n, k)
normalizer = []
for order in range(n):
normalizer_tmp = []
for i in range(k):
normalizer_tmp += [0.5 * Jn(zeros[order, i], order + 1) ** 2]
normalizer_tmp = (
1 / np.array(normalizer_tmp) ** 0.5
) # sqrt(2/(j_l+1)**2) , sqrt(1/c**3) not taken into account yet
normalizer += [normalizer_tmp]
f = spherical_bessel_formulas(n)
x = sym.symbols("x")
bess_basis = []
for order in range(n):
bess_basis_tmp = []
for i in range(k):
bess_basis_tmp += [
sym.simplify(
normalizer[order][i]
* f[order].subs(x, zeros[order, i] * x)
)
]
bess_basis += [bess_basis_tmp]
return bess_basis
def sph_harm_prefactor(l_degree: int, m_order: int):
"""Computes the constant pre-factor for the spherical harmonic of degree l and order m.
Parameters
----------
l_degree: int
Degree of the spherical harmonic. l >= 0
m_order: int
Order of the spherical harmonic. -l <= m <= l
Returns
-------
factor: float
"""
# sqrt((2*l+1)/4*pi * (l-m)!/(l+m)! )
return (
(2 * l_degree + 1)
/ (4 * np.pi)
* math.factorial(l_degree - abs(m_order))
/ math.factorial(l_degree + abs(m_order))
) ** 0.5
def associated_legendre_polynomials(
L_maxdegree: int, zero_m_only: bool = True, pos_m_only: bool = True
):
"""Computes string formulas of the associated legendre polynomials up to degree L (excluded).
Parameters
----------
L_maxdegree: int
Degree up to which to calculate the associated legendre polynomials (degree L is excluded).
zero_m_only: bool
If True only calculate the polynomials for the polynomials where m=0.
pos_m_only: bool
If True only calculate the polynomials for the polynomials where m>=0. Overwritten by zero_m_only.
Returns
-------
polynomials: list
Contains the sympy functions of the polynomials (in total L many if zero_m_only is True else L^2 many).
"""
# calculations from http://web.cmb.usc.edu/people/alber/Software/tomominer/docs/cpp/group__legendre__polynomials.html
z = sym.symbols("z")
P_l_m = [
[0] * (2 * l_degree + 1) for l_degree in range(L_maxdegree)
] # for order l: -l <= m <= l
P_l_m[0][0] = 1
if L_maxdegree > 0:
if zero_m_only:
# m = 0
P_l_m[1][0] = z
for l_degree in range(2, L_maxdegree):
P_l_m[l_degree][0] = sym.simplify(
(
(2 * l_degree - 1) * z * P_l_m[l_degree - 1][0]
- (l_degree - 1) * P_l_m[l_degree - 2][0]
)
/ l_degree
)
return P_l_m
else:
# for m >= 0
for l_degree in range(1, L_maxdegree):
P_l_m[l_degree][l_degree] = sym.simplify(
(1 - 2 * l_degree)
* (1 - z**2) ** 0.5
* P_l_m[l_degree - 1][l_degree - 1]
) # P_00, P_11, P_22, P_33
for m_order in range(0, L_maxdegree - 1):
P_l_m[m_order + 1][m_order] = sym.simplify(
(2 * m_order + 1) * z * P_l_m[m_order][m_order]
) # P_10, P_21, P_32, P_43
for l_degree in range(2, L_maxdegree):
for m_order in range(l_degree - 1): # P_20, P_30, P_31
P_l_m[l_degree][m_order] = sym.simplify(
(
(2 * l_degree - 1)
* z
* P_l_m[l_degree - 1][m_order]
- (l_degree + m_order - 1)
* P_l_m[l_degree - 2][m_order]
)
/ (l_degree - m_order)
)
if not pos_m_only:
# for m < 0: P_l(-m) = (-1)^m * (l-m)!/(l+m)! * P_lm
for l_degree in range(1, L_maxdegree):
for m_order in range(
1, l_degree + 1
): # P_1(-1), P_2(-1) P_2(-2)
P_l_m[l_degree][-m_order] = sym.simplify(
(-1) ** m_order
* math.factorial(l_degree - m_order)
/ math.factorial(l_degree + m_order)
* P_l_m[l_degree][m_order]
)
return P_l_m
def real_sph_harm(
L_maxdegree: int,
use_theta: bool,
use_phi: bool = True,
zero_m_only: bool = True,
):
"""
Computes formula strings of the the real part of the spherical harmonics up to degree L (excluded).
Variables are either spherical coordinates phi and theta (or cartesian coordinates x,y,z) on the UNIT SPHERE.
Parameters
----------
L_maxdegree: int
Degree up to which to calculate the spherical harmonics (degree L is excluded).
use_theta: bool
- True: Expects the input of the formula strings to contain theta.
- False: Expects the input of the formula strings to contain z.
use_phi: bool
- True: Expects the input of the formula strings to contain phi.
- False: Expects the input of the formula strings to contain x and y.
Does nothing if zero_m_only is True
zero_m_only: bool
If True only calculate the harmonics where m=0.
Returns
-------
Y_lm_real: list
Computes formula strings of the the real part of the spherical harmonics up
to degree L (where degree L is not excluded).
In total L^2 many sph harm exist up to degree L (excluded). However, if zero_m_only only is True then
the total count is reduced to be only L many.
"""
z = sym.symbols("z")
P_l_m = associated_legendre_polynomials(L_maxdegree, zero_m_only)
if zero_m_only:
# for all m != 0: Y_lm = 0
Y_l_m = [[0] for l_degree in range(L_maxdegree)]
else:
Y_l_m = [
[0] * (2 * l_degree + 1) for l_degree in range(L_maxdegree)
] # for order l: -l <= m <= l
# convert expressions to spherical coordiantes
if use_theta:
# replace z by cos(theta)
theta = sym.symbols("theta")
for l_degree in range(L_maxdegree):
for m_order in range(len(P_l_m[l_degree])):
if not isinstance(P_l_m[l_degree][m_order], int):
P_l_m[l_degree][m_order] = P_l_m[l_degree][m_order].subs(
z, sym.cos(theta)
)
## calculate Y_lm
# Y_lm = N * P_lm(cos(theta)) * exp(i*m*phi)
# { sqrt(2) * (-1)^m * N * P_l|m| * sin(|m|*phi) if m < 0
# Y_lm_real = { Y_lm if m = 0
# { sqrt(2) * (-1)^m * N * P_lm * cos(m*phi) if m > 0
for l_degree in range(L_maxdegree):
Y_l_m[l_degree][0] = sym.simplify(
sph_harm_prefactor(l_degree, 0) * P_l_m[l_degree][0]
) # Y_l0
if not zero_m_only:
phi = sym.symbols("phi")
for l_degree in range(1, L_maxdegree):
# m > 0
for m_order in range(1, l_degree + 1):
Y_l_m[l_degree][m_order] = sym.simplify(
2**0.5
* (-1) ** m_order
* sph_harm_prefactor(l_degree, m_order)
* P_l_m[l_degree][m_order]
* sym.cos(m_order * phi)
)
# m < 0
for m_order in range(1, l_degree + 1):
Y_l_m[l_degree][-m_order] = sym.simplify(
2**0.5
* (-1) ** m_order
* sph_harm_prefactor(l_degree, -m_order)
* P_l_m[l_degree][m_order]
* sym.sin(m_order * phi)
)
# convert expressions to cartesian coordinates
if not use_phi:
# replace phi by atan2(y,x)
x = sym.symbols("x")
y = sym.symbols("y")
for l_degree in range(L_maxdegree):
for m_order in range(len(Y_l_m[l_degree])):
Y_l_m[l_degree][m_order] = sym.simplify(
Y_l_m[l_degree][m_order].subs(phi, sym.atan2(y, x))
)
return Y_l_m
| 10,403 | 34.148649 | 121 | py |
ocp | ocp-main/ocpmodels/models/gemnet/layers/spherical_basis.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import sympy as sym
import torch
from torch_geometric.nn.models.schnet import GaussianSmearing
from .basis_utils import real_sph_harm
from .radial_basis import RadialBasis
from ocpmodels.common.typing import assert_is_instance
class CircularBasisLayer(torch.nn.Module):
"""
2D Fourier Bessel Basis
Parameters
----------
num_spherical: int
Controls maximum frequency.
radial_basis: RadialBasis
Radial basis functions
cbf: dict
Name and hyperparameters of the cosine basis function
efficient: bool
Whether to use the "efficient" summation order
"""
def __init__(
self,
num_spherical: int,
radial_basis: RadialBasis,
cbf,
efficient: bool = False,
) -> None:
super().__init__()
self.radial_basis = radial_basis
self.efficient = efficient
cbf_name = assert_is_instance(cbf["name"], str).lower()
cbf_hparams = cbf.copy()
del cbf_hparams["name"]
if cbf_name == "gaussian":
self.cosφ_basis = GaussianSmearing(
start=-1, stop=1, num_gaussians=num_spherical, **cbf_hparams
)
elif cbf_name == "spherical_harmonics":
Y_lm = real_sph_harm(
num_spherical, use_theta=False, zero_m_only=True
)
sph_funcs = [] # (num_spherical,)
# convert to tensorflow functions
z = sym.symbols("z")
modules = {"sin": torch.sin, "cos": torch.cos, "sqrt": torch.sqrt}
m_order = 0 # only single angle
for l_degree in range(len(Y_lm)): # num_spherical
if (
l_degree == 0
): # Y_00 is only a constant -> function returns value and not tensor
first_sph = sym.lambdify(
[z], Y_lm[l_degree][m_order], modules
)
sph_funcs.append(
lambda z: torch.zeros_like(z) + first_sph(z)
)
else:
sph_funcs.append(
sym.lambdify([z], Y_lm[l_degree][m_order], modules)
)
self.cosφ_basis = lambda cosφ: torch.stack(
[f(cosφ) for f in sph_funcs], dim=1
)
else:
raise ValueError(f"Unknown cosine basis function '{cbf_name}'.")
def forward(self, D_ca, cosφ_cab, id3_ca):
rbf = self.radial_basis(D_ca) # (num_edges, num_radial)
cbf = self.cosφ_basis(cosφ_cab) # (num_triplets, num_spherical)
if not self.efficient:
rbf = rbf[id3_ca] # (num_triplets, num_radial)
out = (rbf[:, None, :] * cbf[:, :, None]).view(
-1, rbf.shape[-1] * cbf.shape[-1]
)
return (out,)
# (num_triplets, num_radial * num_spherical)
else:
return (rbf[None, :, :], cbf)
# (1, num_edges, num_radial), (num_edges, num_spherical)
| 3,221 | 32.216495 | 86 | py |
ocp | ocp-main/ocpmodels/models/gemnet/layers/interaction_block.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import math
import torch
from ocpmodels.modules.scaling.scale_factor import ScaleFactor
from .atom_update_block import AtomUpdateBlock
from .base_layers import Dense, ResidualLayer
from .efficient import EfficientInteractionBilinear
from .embedding_block import EdgeEmbedding
class InteractionBlockTripletsOnly(torch.nn.Module):
"""
Interaction block for GemNet-T/dT.
Parameters
----------
emb_size_atom: int
Embedding size of the atoms.
emb_size_edge: int
Embedding size of the edges.
emb_size_trip: int
(Down-projected) Embedding size in the triplet message passing block.
emb_size_rbf: int
Embedding size of the radial basis transformation.
emb_size_cbf: int
Embedding size of the circular basis transformation (one angle).
emb_size_bil_trip: int
Embedding size of the edge embeddings in the triplet-based message passing block after the bilinear layer.
num_before_skip: int
Number of residual blocks before the first skip connection.
num_after_skip: int
Number of residual blocks after the first skip connection.
num_concat: int
Number of residual blocks after the concatenation.
num_atom: int
Number of residual blocks in the atom embedding blocks.
activation: str
Name of the activation function to use in the dense layers except for the final dense layer.
"""
def __init__(
self,
emb_size_atom,
emb_size_edge,
emb_size_trip,
emb_size_rbf,
emb_size_cbf,
emb_size_bil_trip,
num_before_skip,
num_after_skip,
num_concat,
num_atom,
activation=None,
name="Interaction",
) -> None:
super().__init__()
self.name = name
block_nr = name.split("_")[-1]
## -------------------------------------------- Message Passing ------------------------------------------- ##
# Dense transformation of skip connection
self.dense_ca = Dense(
emb_size_edge,
emb_size_edge,
activation=activation,
bias=False,
)
# Triplet Interaction
self.trip_interaction = TripletInteraction(
emb_size_edge=emb_size_edge,
emb_size_trip=emb_size_trip,
emb_size_bilinear=emb_size_bil_trip,
emb_size_rbf=emb_size_rbf,
emb_size_cbf=emb_size_cbf,
activation=activation,
name=f"TripInteraction_{block_nr}",
)
## ---------------------------------------- Update Edge Embeddings ---------------------------------------- ##
# Residual layers before skip connection
self.layers_before_skip = torch.nn.ModuleList(
[
ResidualLayer(
emb_size_edge,
activation=activation,
)
for i in range(num_before_skip)
]
)
# Residual layers after skip connection
self.layers_after_skip = torch.nn.ModuleList(
[
ResidualLayer(
emb_size_edge,
activation=activation,
)
for i in range(num_after_skip)
]
)
## ---------------------------------------- Update Atom Embeddings ---------------------------------------- ##
self.atom_update = AtomUpdateBlock(
emb_size_atom=emb_size_atom,
emb_size_edge=emb_size_edge,
emb_size_rbf=emb_size_rbf,
nHidden=num_atom,
activation=activation,
name=f"AtomUpdate_{block_nr}",
)
## ------------------------------ Update Edge Embeddings with Atom Embeddings ----------------------------- ##
self.concat_layer = EdgeEmbedding(
emb_size_atom,
emb_size_edge,
emb_size_edge,
activation=activation,
)
self.residual_m = torch.nn.ModuleList(
[
ResidualLayer(emb_size_edge, activation=activation)
for _ in range(num_concat)
]
)
self.inv_sqrt_2 = 1 / math.sqrt(2.0)
def forward(
self,
h,
m,
rbf3,
cbf3,
id3_ragged_idx,
id_swap,
id3_ba,
id3_ca,
rbf_h,
idx_s,
idx_t,
):
"""
Returns
-------
h: torch.Tensor, shape=(nEdges, emb_size_atom)
Atom embeddings.
m: torch.Tensor, shape=(nEdges, emb_size_edge)
Edge embeddings (c->a).
"""
# Initial transformation
x_ca_skip = self.dense_ca(m) # (nEdges, emb_size_edge)
x3 = self.trip_interaction(
m,
rbf3,
cbf3,
id3_ragged_idx,
id_swap,
id3_ba,
id3_ca,
)
## ----------------------------- Merge Embeddings after Triplet Interaction ------------------------------ ##
x = x_ca_skip + x3 # (nEdges, emb_size_edge)
x = x * self.inv_sqrt_2
## ---------------------------------------- Update Edge Embeddings --------------------------------------- ##
# Transformations before skip connection
for _, layer in enumerate(self.layers_before_skip):
x = layer(x) # (nEdges, emb_size_edge)
# Skip connection
m = m + x # (nEdges, emb_size_edge)
m = m * self.inv_sqrt_2
# Transformations after skip connection
for _, layer in enumerate(self.layers_after_skip):
m = layer(m) # (nEdges, emb_size_edge)
## ---------------------------------------- Update Atom Embeddings --------------------------------------- ##
h2 = self.atom_update(h, m, rbf_h, idx_t)
# Skip connection
h = h + h2 # (nAtoms, emb_size_atom)
h = h * self.inv_sqrt_2
## ----------------------------- Update Edge Embeddings with Atom Embeddings ----------------------------- ##
m2 = self.concat_layer(h, m, idx_s, idx_t) # (nEdges, emb_size_edge)
for _, layer in enumerate(self.residual_m):
m2 = layer(m2) # (nEdges, emb_size_edge)
# Skip connection
m = m + m2 # (nEdges, emb_size_edge)
m = m * self.inv_sqrt_2
return h, m
class TripletInteraction(torch.nn.Module):
"""
Triplet-based message passing block.
Parameters
----------
emb_size_edge: int
Embedding size of the edges.
emb_size_trip: int
(Down-projected) Embedding size of the edge embeddings after the hadamard product with rbf.
emb_size_bilinear: int
Embedding size of the edge embeddings after the bilinear layer.
emb_size_rbf: int
Embedding size of the radial basis transformation.
emb_size_cbf: int
Embedding size of the circular basis transformation (one angle).
activation: str
Name of the activation function to use in the dense layers except for the final dense layer.
"""
def __init__(
self,
emb_size_edge,
emb_size_trip,
emb_size_bilinear,
emb_size_rbf,
emb_size_cbf,
activation=None,
name="TripletInteraction",
**kwargs,
) -> None:
super().__init__()
self.name = name
# Dense transformation
self.dense_ba = Dense(
emb_size_edge,
emb_size_edge,
activation=activation,
bias=False,
)
# Up projections of basis representations, bilinear layer and scaling factors
self.mlp_rbf = Dense(
emb_size_rbf,
emb_size_edge,
activation=None,
bias=False,
)
self.scale_rbf = ScaleFactor(name + "_had_rbf")
self.mlp_cbf = EfficientInteractionBilinear(
emb_size_trip, emb_size_cbf, emb_size_bilinear
)
# combines scaling for bilinear layer and summation
self.scale_cbf_sum = ScaleFactor(name + "_sum_cbf")
# Down and up projections
self.down_projection = Dense(
emb_size_edge,
emb_size_trip,
activation=activation,
bias=False,
)
self.up_projection_ca = Dense(
emb_size_bilinear,
emb_size_edge,
activation=activation,
bias=False,
)
self.up_projection_ac = Dense(
emb_size_bilinear,
emb_size_edge,
activation=activation,
bias=False,
)
self.inv_sqrt_2 = 1 / math.sqrt(2.0)
def forward(
self,
m,
rbf3,
cbf3,
id3_ragged_idx,
id_swap,
id3_ba,
id3_ca,
):
"""
Returns
-------
m: torch.Tensor, shape=(nEdges, emb_size_edge)
Edge embeddings (c->a).
"""
# Dense transformation
x_ba = self.dense_ba(m) # (nEdges, emb_size_edge)
# Transform via radial bessel basis
rbf_emb = self.mlp_rbf(rbf3) # (nEdges, emb_size_edge)
x_ba2 = x_ba * rbf_emb
x_ba = self.scale_rbf(x_ba2, ref=x_ba)
x_ba = self.down_projection(x_ba) # (nEdges, emb_size_trip)
# Transform via circular spherical basis
x_ba = x_ba[id3_ba]
# Efficient bilinear layer
x = self.mlp_cbf(cbf3, x_ba, id3_ca, id3_ragged_idx)
# (nEdges, emb_size_quad)
x = self.scale_cbf_sum(x, ref=x_ba)
# =>
# rbf(d_ba)
# cbf(d_ca, angle_cab)
# Up project embeddings
x_ca = self.up_projection_ca(x) # (nEdges, emb_size_edge)
x_ac = self.up_projection_ac(x) # (nEdges, emb_size_edge)
# Merge interaction of c->a and a->c
x_ac = x_ac[id_swap] # swap to add to edge a->c and not c->a
x3 = x_ca + x_ac
x3 = x3 * self.inv_sqrt_2
return x3
| 10,381 | 29.356725 | 118 | py |
ocp | ocp-main/ocpmodels/models/gemnet/layers/__init__.py | 0 | 0 | 0 | py |
|
ocp | ocp-main/ocpmodels/models/gemnet/layers/efficient.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import torch
from ..initializers import he_orthogonal_init
class EfficientInteractionDownProjection(torch.nn.Module):
"""
Down projection in the efficient reformulation.
Parameters
----------
emb_size_interm: int
Intermediate embedding size (down-projection size).
kernel_initializer: callable
Initializer of the weight matrix.
"""
def __init__(
self,
num_spherical: int,
num_radial: int,
emb_size_interm: int,
) -> None:
super().__init__()
self.num_spherical = num_spherical
self.num_radial = num_radial
self.emb_size_interm = emb_size_interm
self.reset_parameters()
def reset_parameters(self) -> None:
self.weight = torch.nn.Parameter(
torch.empty(
(self.num_spherical, self.num_radial, self.emb_size_interm)
),
requires_grad=True,
)
he_orthogonal_init(self.weight)
def forward(self, rbf, sph, id_ca, id_ragged_idx):
"""
Arguments
---------
rbf: torch.Tensor, shape=(1, nEdges, num_radial)
sph: torch.Tensor, shape=(nEdges, Kmax, num_spherical)
id_ca
id_ragged_idx
Returns
-------
rbf_W1: torch.Tensor, shape=(nEdges, emb_size_interm, num_spherical)
sph: torch.Tensor, shape=(nEdges, Kmax, num_spherical)
Kmax = maximum number of neighbors of the edges
"""
num_edges = rbf.shape[1]
# MatMul: mul + sum over num_radial
rbf_W1 = torch.matmul(rbf, self.weight)
# (num_spherical, nEdges , emb_size_interm)
rbf_W1 = rbf_W1.permute(1, 2, 0)
# (nEdges, emb_size_interm, num_spherical)
# Zero padded dense matrix
# maximum number of neighbors, catch empty id_ca with maximum
if sph.shape[0] == 0:
Kmax = 0
else:
Kmax = torch.max(
torch.max(id_ragged_idx + 1),
torch.tensor(0).to(id_ragged_idx.device),
)
sph2 = sph.new_zeros(num_edges, Kmax, self.num_spherical)
sph2[id_ca, id_ragged_idx] = sph
sph2 = torch.transpose(sph2, 1, 2)
# (nEdges, num_spherical/emb_size_interm, Kmax)
return rbf_W1, sph2
class EfficientInteractionBilinear(torch.nn.Module):
"""
Efficient reformulation of the bilinear layer and subsequent summation.
Parameters
----------
units_out: int
Embedding output size of the bilinear layer.
kernel_initializer: callable
Initializer of the weight matrix.
"""
def __init__(
self,
emb_size: int,
emb_size_interm: int,
units_out: int,
) -> None:
super().__init__()
self.emb_size = emb_size
self.emb_size_interm = emb_size_interm
self.units_out = units_out
self.reset_parameters()
def reset_parameters(self) -> None:
self.weight = torch.nn.Parameter(
torch.empty(
(self.emb_size, self.emb_size_interm, self.units_out),
requires_grad=True,
)
)
he_orthogonal_init(self.weight)
def forward(
self,
basis,
m,
id_reduce,
id_ragged_idx,
) -> torch.Tensor:
"""
Arguments
---------
basis
m: quadruplets: m = m_db , triplets: m = m_ba
id_reduce
id_ragged_idx
Returns
-------
m_ca: torch.Tensor, shape=(nEdges, units_out)
Edge embeddings.
"""
# num_spherical is actually num_spherical**2 for quadruplets
(rbf_W1, sph) = basis
# (nEdges, emb_size_interm, num_spherical), (nEdges, num_spherical, Kmax)
nEdges = rbf_W1.shape[0]
# Create (zero-padded) dense matrix of the neighboring edge embeddings.
Kmax = torch.max(
torch.max(id_ragged_idx) + 1,
torch.tensor(0).to(id_ragged_idx.device),
)
# maximum number of neighbors, catch empty id_reduce_ji with maximum
m2 = m.new_zeros(nEdges, Kmax, self.emb_size)
m2[id_reduce, id_ragged_idx] = m
# (num_quadruplets or num_triplets, emb_size) -> (nEdges, Kmax, emb_size)
sum_k = torch.matmul(sph, m2) # (nEdges, num_spherical, emb_size)
# MatMul: mul + sum over num_spherical
rbf_W1_sum_k = torch.matmul(rbf_W1, sum_k)
# (nEdges, emb_size_interm, emb_size)
# Bilinear: Sum over emb_size_interm and emb_size
m_ca = torch.matmul(rbf_W1_sum_k.permute(2, 0, 1), self.weight)
# (emb_size, nEdges, units_out)
m_ca = torch.sum(m_ca, dim=0)
# (nEdges, units_out)
return m_ca
| 5,005 | 27.770115 | 81 | py |
ocp | ocp-main/ocpmodels/models/painn/painn.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
---
MIT License
Copyright (c) 2021 www.compscience.org
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import logging
import math
import os
from typing import Dict, Optional, Tuple, Union
import torch
from torch import nn
from torch_geometric.nn import MessagePassing, radius_graph
from torch_scatter import scatter, segment_coo
from ocpmodels.common.registry import registry
from ocpmodels.common.utils import (
compute_neighbors,
conditional_grad,
get_pbc_distances,
radius_graph_pbc,
)
from ocpmodels.models.base import BaseModel
from ocpmodels.models.gemnet.layers.base_layers import ScaledSiLU
from ocpmodels.models.gemnet.layers.embedding_block import AtomEmbedding
from ocpmodels.models.gemnet.layers.radial_basis import RadialBasis
from ocpmodels.modules.scaling import ScaleFactor
from ocpmodels.modules.scaling.compat import load_scales_compat
from .utils import get_edge_id, repeat_blocks
@registry.register_model("painn")
class PaiNN(BaseModel):
r"""PaiNN model based on the description in Schütt et al. (2021):
Equivariant message passing for the prediction of tensorial properties
and molecular spectra, https://arxiv.org/abs/2102.03150.
"""
def __init__(
self,
num_atoms: int,
bond_feat_dim: int,
num_targets: int,
hidden_channels: int = 512,
num_layers: int = 6,
num_rbf: int = 128,
cutoff: float = 12.0,
max_neighbors: int = 50,
rbf: Dict[str, str] = {"name": "gaussian"},
envelope: Dict[str, Union[str, int]] = {
"name": "polynomial",
"exponent": 5,
},
regress_forces: bool = True,
direct_forces: bool = True,
use_pbc: bool = True,
otf_graph: bool = True,
num_elements: int = 83,
scale_file: Optional[str] = None,
) -> None:
super(PaiNN, self).__init__()
self.hidden_channels = hidden_channels
self.num_layers = num_layers
self.num_rbf = num_rbf
self.cutoff = cutoff
self.max_neighbors = max_neighbors
self.regress_forces = regress_forces
self.direct_forces = direct_forces
self.otf_graph = otf_graph
self.use_pbc = use_pbc
# Borrowed from GemNet.
self.symmetric_edge_symmetrization = False
#### Learnable parameters #############################################
self.atom_emb = AtomEmbedding(hidden_channels, num_elements)
self.radial_basis = RadialBasis(
num_radial=num_rbf,
cutoff=self.cutoff,
rbf=rbf,
envelope=envelope,
)
self.message_layers = nn.ModuleList()
self.update_layers = nn.ModuleList()
for i in range(num_layers):
self.message_layers.append(
PaiNNMessage(hidden_channels, num_rbf).jittable()
)
self.update_layers.append(PaiNNUpdate(hidden_channels))
setattr(self, "upd_out_scalar_scale_%d" % i, ScaleFactor())
self.out_energy = nn.Sequential(
nn.Linear(hidden_channels, hidden_channels // 2),
ScaledSiLU(),
nn.Linear(hidden_channels // 2, 1),
)
if self.regress_forces is True and self.direct_forces is True:
self.out_forces = PaiNNOutput(hidden_channels)
self.inv_sqrt_2 = 1 / math.sqrt(2.0)
self.reset_parameters()
load_scales_compat(self, scale_file)
def reset_parameters(self) -> None:
nn.init.xavier_uniform_(self.out_energy[0].weight)
self.out_energy[0].bias.data.fill_(0)
nn.init.xavier_uniform_(self.out_energy[2].weight)
self.out_energy[2].bias.data.fill_(0)
# Borrowed from GemNet.
def select_symmetric_edges(
self, tensor, mask, reorder_idx, inverse_neg
) -> torch.Tensor:
# Mask out counter-edges
tensor_directed = tensor[mask]
# Concatenate counter-edges after normal edges
sign = 1 - 2 * inverse_neg
tensor_cat = torch.cat([tensor_directed, sign * tensor_directed])
# Reorder everything so the edges of every image are consecutive
tensor_ordered = tensor_cat[reorder_idx]
return tensor_ordered
# Borrowed from GemNet.
def symmetrize_edges(
self,
edge_index,
cell_offsets,
neighbors,
batch_idx,
reorder_tensors,
reorder_tensors_invneg,
):
"""
Symmetrize edges to ensure existence of counter-directional edges.
Some edges are only present in one direction in the data,
since every atom has a maximum number of neighbors.
If `symmetric_edge_symmetrization` is False,
we only use i->j edges here. So we lose some j->i edges
and add others by making it symmetric.
If `symmetric_edge_symmetrization` is True,
we always use both directions.
"""
num_atoms = batch_idx.shape[0]
if self.symmetric_edge_symmetrization:
edge_index_bothdir = torch.cat(
[edge_index, edge_index.flip(0)],
dim=1,
)
cell_offsets_bothdir = torch.cat(
[cell_offsets, -cell_offsets],
dim=0,
)
# Filter for unique edges
edge_ids = get_edge_id(
edge_index_bothdir, cell_offsets_bothdir, num_atoms
)
unique_ids, unique_inv = torch.unique(
edge_ids, return_inverse=True
)
perm = torch.arange(
unique_inv.size(0),
dtype=unique_inv.dtype,
device=unique_inv.device,
)
unique_idx = scatter(
perm,
unique_inv,
dim=0,
dim_size=unique_ids.shape[0],
reduce="min",
)
edge_index_new = edge_index_bothdir[:, unique_idx]
# Order by target index
edge_index_order = torch.argsort(edge_index_new[1])
edge_index_new = edge_index_new[:, edge_index_order]
unique_idx = unique_idx[edge_index_order]
# Subindex remaining tensors
cell_offsets_new = cell_offsets_bothdir[unique_idx]
reorder_tensors = [
self.symmetrize_tensor(tensor, unique_idx, False)
for tensor in reorder_tensors
]
reorder_tensors_invneg = [
self.symmetrize_tensor(tensor, unique_idx, True)
for tensor in reorder_tensors_invneg
]
# Count edges per image
# segment_coo assumes sorted edge_index_new[1] and batch_idx
ones = edge_index_new.new_ones(1).expand_as(edge_index_new[1])
neighbors_per_atom = segment_coo(
ones, edge_index_new[1], dim_size=num_atoms
)
neighbors_per_image = segment_coo(
neighbors_per_atom, batch_idx, dim_size=neighbors.shape[0]
)
else:
# Generate mask
mask_sep_atoms = edge_index[0] < edge_index[1]
# Distinguish edges between the same (periodic) atom by ordering the cells
cell_earlier = (
(cell_offsets[:, 0] < 0)
| ((cell_offsets[:, 0] == 0) & (cell_offsets[:, 1] < 0))
| (
(cell_offsets[:, 0] == 0)
& (cell_offsets[:, 1] == 0)
& (cell_offsets[:, 2] < 0)
)
)
mask_same_atoms = edge_index[0] == edge_index[1]
mask_same_atoms &= cell_earlier
mask = mask_sep_atoms | mask_same_atoms
# Mask out counter-edges
edge_index_new = edge_index[mask[None, :].expand(2, -1)].view(
2, -1
)
# Concatenate counter-edges after normal edges
edge_index_cat = torch.cat(
[edge_index_new, edge_index_new.flip(0)],
dim=1,
)
# Count remaining edges per image
batch_edge = torch.repeat_interleave(
torch.arange(neighbors.size(0), device=edge_index.device),
neighbors,
)
batch_edge = batch_edge[mask]
# segment_coo assumes sorted batch_edge
# Factor 2 since this is only one half of the edges
ones = batch_edge.new_ones(1).expand_as(batch_edge)
neighbors_per_image = 2 * segment_coo(
ones, batch_edge, dim_size=neighbors.size(0)
)
# Create indexing array
edge_reorder_idx = repeat_blocks(
torch.div(neighbors_per_image, 2, rounding_mode="floor"),
repeats=2,
continuous_indexing=True,
repeat_inc=edge_index_new.size(1),
)
# Reorder everything so the edges of every image are consecutive
edge_index_new = edge_index_cat[:, edge_reorder_idx]
cell_offsets_new = self.select_symmetric_edges(
cell_offsets, mask, edge_reorder_idx, True
)
reorder_tensors = [
self.select_symmetric_edges(
tensor, mask, edge_reorder_idx, False
)
for tensor in reorder_tensors
]
reorder_tensors_invneg = [
self.select_symmetric_edges(
tensor, mask, edge_reorder_idx, True
)
for tensor in reorder_tensors_invneg
]
# Indices for swapping c->a and a->c (for symmetric MP)
# To obtain these efficiently and without any index assumptions,
# we get order the counter-edge IDs and then
# map this order back to the edge IDs.
# Double argsort gives the desired mapping
# from the ordered tensor to the original tensor.
edge_ids = get_edge_id(edge_index_new, cell_offsets_new, num_atoms)
order_edge_ids = torch.argsort(edge_ids)
inv_order_edge_ids = torch.argsort(order_edge_ids)
edge_ids_counter = get_edge_id(
edge_index_new.flip(0), -cell_offsets_new, num_atoms
)
order_edge_ids_counter = torch.argsort(edge_ids_counter)
id_swap = order_edge_ids_counter[inv_order_edge_ids]
return (
edge_index_new,
cell_offsets_new,
neighbors_per_image,
reorder_tensors,
reorder_tensors_invneg,
id_swap,
)
def generate_graph_values(self, data):
(
edge_index,
edge_dist,
distance_vec,
cell_offsets,
_, # cell offset distances
neighbors,
) = self.generate_graph(data)
# Unit vectors pointing from edge_index[1] to edge_index[0],
# i.e., edge_index[0] - edge_index[1] divided by the norm.
# make sure that the distances are not close to zero before dividing
mask_zero = torch.isclose(edge_dist, torch.tensor(0.0), atol=1e-6)
edge_dist[mask_zero] = 1.0e-6
edge_vector = distance_vec / edge_dist[:, None]
empty_image = neighbors == 0
if torch.any(empty_image):
raise ValueError(
f"An image has no neighbors: id={data.id[empty_image]}, "
f"sid={data.sid[empty_image]}, fid={data.fid[empty_image]}"
)
# Symmetrize edges for swapping in symmetric message passing
(
edge_index,
cell_offsets,
neighbors,
[edge_dist],
[edge_vector],
id_swap,
) = self.symmetrize_edges(
edge_index,
cell_offsets,
neighbors,
data.batch,
[edge_dist],
[edge_vector],
)
return (
edge_index,
neighbors,
edge_dist,
edge_vector,
id_swap,
)
@conditional_grad(torch.enable_grad())
def forward(self, data):
pos = data.pos
batch = data.batch
z = data.atomic_numbers.long()
if self.regress_forces and not self.direct_forces:
pos = pos.requires_grad_(True)
(
edge_index,
neighbors,
edge_dist,
edge_vector,
id_swap,
) = self.generate_graph_values(data)
assert z.dim() == 1 and z.dtype == torch.long
edge_rbf = self.radial_basis(edge_dist) # rbf * envelope
x = self.atom_emb(z)
vec = torch.zeros(x.size(0), 3, x.size(1), device=x.device)
#### Interaction blocks ###############################################
for i in range(self.num_layers):
dx, dvec = self.message_layers[i](
x, vec, edge_index, edge_rbf, edge_vector
)
x = x + dx
vec = vec + dvec
x = x * self.inv_sqrt_2
dx, dvec = self.update_layers[i](x, vec)
x = x + dx
vec = vec + dvec
x = getattr(self, "upd_out_scalar_scale_%d" % i)(x)
#### Output block #####################################################
per_atom_energy = self.out_energy(x).squeeze(1)
energy = scatter(per_atom_energy, batch, dim=0)
if self.regress_forces:
if self.direct_forces:
forces = self.out_forces(x, vec)
return energy, forces
else:
forces = (
-1
* torch.autograd.grad(
x,
pos,
grad_outputs=torch.ones_like(x),
create_graph=True,
)[0]
)
return energy, forces
else:
return energy
@property
def num_params(self) -> int:
return sum(p.numel() for p in self.parameters())
def __repr__(self) -> str:
return (
f"{self.__class__.__name__}("
f"hidden_channels={self.hidden_channels}, "
f"num_layers={self.num_layers}, "
f"num_rbf={self.num_rbf}, "
f"max_neighbors={self.max_neighbors}, "
f"cutoff={self.cutoff})"
)
class PaiNNMessage(MessagePassing):
def __init__(
self,
hidden_channels,
num_rbf,
) -> None:
super(PaiNNMessage, self).__init__(aggr="add", node_dim=0)
self.hidden_channels = hidden_channels
self.x_proj = nn.Sequential(
nn.Linear(hidden_channels, hidden_channels),
ScaledSiLU(),
nn.Linear(hidden_channels, hidden_channels * 3),
)
self.rbf_proj = nn.Linear(num_rbf, hidden_channels * 3)
self.inv_sqrt_3 = 1 / math.sqrt(3.0)
self.inv_sqrt_h = 1 / math.sqrt(hidden_channels)
self.x_layernorm = nn.LayerNorm(hidden_channels)
self.reset_parameters()
def reset_parameters(self) -> None:
nn.init.xavier_uniform_(self.x_proj[0].weight)
self.x_proj[0].bias.data.fill_(0)
nn.init.xavier_uniform_(self.x_proj[2].weight)
self.x_proj[2].bias.data.fill_(0)
nn.init.xavier_uniform_(self.rbf_proj.weight)
self.rbf_proj.bias.data.fill_(0)
self.x_layernorm.reset_parameters()
def forward(self, x, vec, edge_index, edge_rbf, edge_vector):
xh = self.x_proj(self.x_layernorm(x))
# TODO(@abhshkdz): Nans out with AMP here during backprop. Debug / fix.
rbfh = self.rbf_proj(edge_rbf)
# propagate_type: (xh: Tensor, vec: Tensor, rbfh_ij: Tensor, r_ij: Tensor)
dx, dvec = self.propagate(
edge_index,
xh=xh,
vec=vec,
rbfh_ij=rbfh,
r_ij=edge_vector,
size=None,
)
return dx, dvec
def message(self, xh_j, vec_j, rbfh_ij, r_ij):
x, xh2, xh3 = torch.split(xh_j * rbfh_ij, self.hidden_channels, dim=-1)
xh2 = xh2 * self.inv_sqrt_3
vec = vec_j * xh2.unsqueeze(1) + xh3.unsqueeze(1) * r_ij.unsqueeze(2)
vec = vec * self.inv_sqrt_h
return x, vec
def aggregate(
self,
features: Tuple[torch.Tensor, torch.Tensor],
index: torch.Tensor,
ptr: Optional[torch.Tensor],
dim_size: Optional[int],
) -> Tuple[torch.Tensor, torch.Tensor]:
x, vec = features
x = scatter(x, index, dim=self.node_dim, dim_size=dim_size)
vec = scatter(vec, index, dim=self.node_dim, dim_size=dim_size)
return x, vec
def update(
self, inputs: Tuple[torch.Tensor, torch.Tensor]
) -> Tuple[torch.Tensor, torch.Tensor]:
return inputs
class PaiNNUpdate(nn.Module):
def __init__(self, hidden_channels) -> None:
super().__init__()
self.hidden_channels = hidden_channels
self.vec_proj = nn.Linear(
hidden_channels, hidden_channels * 2, bias=False
)
self.xvec_proj = nn.Sequential(
nn.Linear(hidden_channels * 2, hidden_channels),
ScaledSiLU(),
nn.Linear(hidden_channels, hidden_channels * 3),
)
self.inv_sqrt_2 = 1 / math.sqrt(2.0)
self.inv_sqrt_h = 1 / math.sqrt(hidden_channels)
self.reset_parameters()
def reset_parameters(self) -> None:
nn.init.xavier_uniform_(self.vec_proj.weight)
nn.init.xavier_uniform_(self.xvec_proj[0].weight)
self.xvec_proj[0].bias.data.fill_(0)
nn.init.xavier_uniform_(self.xvec_proj[2].weight)
self.xvec_proj[2].bias.data.fill_(0)
def forward(self, x, vec):
vec1, vec2 = torch.split(
self.vec_proj(vec), self.hidden_channels, dim=-1
)
vec_dot = (vec1 * vec2).sum(dim=1) * self.inv_sqrt_h
# NOTE: Can't use torch.norm because the gradient is NaN for input = 0.
# Add an epsilon offset to make sure sqrt is always positive.
x_vec_h = self.xvec_proj(
torch.cat(
[x, torch.sqrt(torch.sum(vec2**2, dim=-2) + 1e-8)], dim=-1
)
)
xvec1, xvec2, xvec3 = torch.split(
x_vec_h, self.hidden_channels, dim=-1
)
dx = xvec1 + xvec2 * vec_dot
dx = dx * self.inv_sqrt_2
dvec = xvec3.unsqueeze(1) * vec1
return dx, dvec
class PaiNNOutput(nn.Module):
def __init__(self, hidden_channels) -> None:
super().__init__()
self.hidden_channels = hidden_channels
self.output_network = nn.ModuleList(
[
GatedEquivariantBlock(
hidden_channels,
hidden_channels // 2,
),
GatedEquivariantBlock(hidden_channels // 2, 1),
]
)
self.reset_parameters()
def reset_parameters(self) -> None:
for layer in self.output_network:
layer.reset_parameters()
def forward(self, x, vec):
for layer in self.output_network:
x, vec = layer(x, vec)
return vec.squeeze()
# Borrowed from TorchMD-Net
class GatedEquivariantBlock(nn.Module):
"""Gated Equivariant Block as defined in Schütt et al. (2021):
Equivariant message passing for the prediction of tensorial properties and molecular spectra
"""
def __init__(
self,
hidden_channels,
out_channels,
) -> None:
super(GatedEquivariantBlock, self).__init__()
self.out_channels = out_channels
self.vec1_proj = nn.Linear(
hidden_channels, hidden_channels, bias=False
)
self.vec2_proj = nn.Linear(hidden_channels, out_channels, bias=False)
self.update_net = nn.Sequential(
nn.Linear(hidden_channels * 2, hidden_channels),
ScaledSiLU(),
nn.Linear(hidden_channels, out_channels * 2),
)
self.act = ScaledSiLU()
def reset_parameters(self) -> None:
nn.init.xavier_uniform_(self.vec1_proj.weight)
nn.init.xavier_uniform_(self.vec2_proj.weight)
nn.init.xavier_uniform_(self.update_net[0].weight)
self.update_net[0].bias.data.fill_(0)
nn.init.xavier_uniform_(self.update_net[2].weight)
self.update_net[2].bias.data.fill_(0)
def forward(self, x, v):
vec1 = torch.norm(self.vec1_proj(v), dim=-2)
vec2 = self.vec2_proj(v)
x = torch.cat([x, vec1], dim=-1)
x, v = torch.split(self.update_net(x), self.out_channels, dim=-1)
v = v.unsqueeze(1) * vec2
x = self.act(x)
return x, v
| 21,957 | 32.472561 | 96 | py |
ocp | ocp-main/ocpmodels/models/painn/utils.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import torch
from torch_scatter import segment_csr
def repeat_blocks(
sizes,
repeats,
continuous_indexing: bool = True,
start_idx: int = 0,
block_inc: int = 0,
repeat_inc: int = 0,
) -> torch.Tensor:
"""Repeat blocks of indices.
Adapted from https://stackoverflow.com/questions/51154989/numpy-vectorized-function-to-repeat-blocks-of-consecutive-elements
continuous_indexing: Whether to keep increasing the index after each block
start_idx: Starting index
block_inc: Number to increment by after each block,
either global or per block. Shape: len(sizes) - 1
repeat_inc: Number to increment by after each repetition,
either global or per block
Examples
--------
sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = False
Return: [0 0 0 0 1 2 0 1 2 0 1 0 1 0 1]
sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True
Return: [0 0 0 1 2 3 1 2 3 4 5 4 5 4 5]
sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True ;
repeat_inc = 4
Return: [0 4 8 1 2 3 5 6 7 4 5 8 9 12 13]
sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True ;
start_idx = 5
Return: [5 5 5 6 7 8 6 7 8 9 10 9 10 9 10]
sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True ;
block_inc = 1
Return: [0 0 0 2 3 4 2 3 4 6 7 6 7 6 7]
sizes = [0,3,2] ; repeats = [3,2,3] ; continuous_indexing = True
Return: [0 1 2 0 1 2 3 4 3 4 3 4]
sizes = [2,3,2] ; repeats = [2,0,2] ; continuous_indexing = True
Return: [0 1 0 1 5 6 5 6]
"""
assert sizes.dim() == 1
assert all(sizes >= 0)
# Remove 0 sizes
sizes_nonzero = sizes > 0
if not torch.all(sizes_nonzero):
assert block_inc == 0 # Implementing this is not worth the effort
sizes = torch.masked_select(sizes, sizes_nonzero)
if isinstance(repeats, torch.Tensor):
repeats = torch.masked_select(repeats, sizes_nonzero)
if isinstance(repeat_inc, torch.Tensor):
repeat_inc = torch.masked_select(repeat_inc, sizes_nonzero)
if isinstance(repeats, torch.Tensor):
assert all(repeats >= 0)
insert_dummy = repeats[0] == 0
if insert_dummy:
one = sizes.new_ones(1)
zero = sizes.new_zeros(1)
sizes = torch.cat((one, sizes))
repeats = torch.cat((one, repeats))
if isinstance(block_inc, torch.Tensor):
block_inc = torch.cat((zero, block_inc))
if isinstance(repeat_inc, torch.Tensor):
repeat_inc = torch.cat((zero, repeat_inc))
else:
assert repeats >= 0
insert_dummy = False
# Get repeats for each group using group lengths/sizes
r1 = torch.repeat_interleave(
torch.arange(len(sizes), device=sizes.device), repeats
)
# Get total size of output array, as needed to initialize output indexing array
N = (sizes * repeats).sum()
# Initialize indexing array with ones as we need to setup incremental indexing
# within each group when cumulatively summed at the final stage.
# Two steps here:
# 1. Within each group, we have multiple sequences, so setup the offsetting
# at each sequence lengths by the seq. lengths preceding those.
id_ar = torch.ones(N, dtype=torch.long, device=sizes.device)
id_ar[0] = 0
insert_index = sizes[r1[:-1]].cumsum(0)
insert_val = (1 - sizes)[r1[:-1]]
if isinstance(repeats, torch.Tensor) and torch.any(repeats == 0):
diffs = r1[1:] - r1[:-1]
indptr = torch.cat((sizes.new_zeros(1), diffs.cumsum(0)))
if continuous_indexing:
# If a group was skipped (repeats=0) we need to add its size
insert_val += segment_csr(sizes[: r1[-1]], indptr, reduce="sum")
# Add block increments
if isinstance(block_inc, torch.Tensor):
insert_val += segment_csr(
block_inc[: r1[-1]], indptr, reduce="sum"
)
else:
insert_val += block_inc * (indptr[1:] - indptr[:-1])
if insert_dummy:
insert_val[0] -= block_inc
else:
idx = r1[1:] != r1[:-1]
if continuous_indexing:
# 2. For each group, make sure the indexing starts from the next group's
# first element. So, simply assign 1s there.
insert_val[idx] = 1
# Add block increments
insert_val[idx] += block_inc
# Add repeat_inc within each group
if isinstance(repeat_inc, torch.Tensor):
insert_val += repeat_inc[r1[:-1]]
if isinstance(repeats, torch.Tensor):
repeat_inc_inner = repeat_inc[repeats > 0][:-1]
else:
repeat_inc_inner = repeat_inc[:-1]
else:
insert_val += repeat_inc
repeat_inc_inner = repeat_inc
# Subtract the increments between groups
if isinstance(repeats, torch.Tensor):
repeats_inner = repeats[repeats > 0][:-1]
else:
repeats_inner = repeats
insert_val[r1[1:] != r1[:-1]] -= repeat_inc_inner * repeats_inner
# Assign index-offsetting values
id_ar[insert_index] = insert_val
if insert_dummy:
id_ar = id_ar[1:]
if continuous_indexing:
id_ar[0] -= 1
# Set start index now, in case of insertion due to leading repeats=0
id_ar[0] += start_idx
# Finally index into input array for the group repeated o/p
res = id_ar.cumsum(0)
return res
def get_edge_id(edge_idx, cell_offsets, num_atoms: int):
cell_basis = cell_offsets.max() - cell_offsets.min() + 1
cell_id = (
(
cell_offsets
* cell_offsets.new_tensor([[1, cell_basis, cell_basis**2]])
)
.sum(-1)
.long()
)
edge_id = edge_idx[0] + edge_idx[1] * num_atoms + cell_id * num_atoms**2
return edge_id
| 6,138 | 35.325444 | 128 | py |
ocp | ocp-main/ocpmodels/models/painn/__init__.py | 0 | 0 | 0 | py |
|
ocp | ocp-main/ocpmodels/models/escn/so3.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import os
import torch
import torch.nn as nn
try:
from e3nn import o3
from e3nn.o3 import FromS2Grid, ToS2Grid
except ImportError:
pass
# Borrowed from e3nn @ 0.4.0:
# https://github.com/e3nn/e3nn/blob/0.4.0/e3nn/o3/_wigner.py#L10
# _Jd is a list of tensors of shape (2l+1, 2l+1)
_Jd = torch.load(os.path.join(os.path.dirname(__file__), "Jd.pt"))
class CoefficientMapping:
"""
Helper functions for coefficients used to reshape l<-->m and to get coefficients of specific degree or order
Args:
lmax_list (list:int): List of maximum degree of the spherical harmonics
mmax_list (list:int): List of maximum order of the spherical harmonics
device: Device of the output
"""
def __init__(
self,
lmax_list,
mmax_list,
device,
) -> None:
super().__init__()
self.lmax_list = lmax_list
self.mmax_list = mmax_list
self.num_resolutions = len(lmax_list)
self.device = device
# Compute the degree (l) and order (m) for each
# entry of the embedding
self.l_harmonic = torch.tensor([], device=self.device).long()
self.m_harmonic = torch.tensor([], device=self.device).long()
self.m_complex = torch.tensor([], device=self.device).long()
self.res_size = torch.zeros(
[self.num_resolutions], device=self.device
).long()
offset = 0
for i in range(self.num_resolutions):
for l in range(0, self.lmax_list[i] + 1):
mmax = min(self.mmax_list[i], l)
m = torch.arange(-mmax, mmax + 1, device=self.device).long()
self.m_complex = torch.cat([self.m_complex, m], dim=0)
self.m_harmonic = torch.cat(
[self.m_harmonic, torch.abs(m).long()], dim=0
)
self.l_harmonic = torch.cat(
[self.l_harmonic, m.fill_(l).long()], dim=0
)
self.res_size[i] = len(self.l_harmonic) - offset
offset = len(self.l_harmonic)
num_coefficients = len(self.l_harmonic)
self.to_m = torch.zeros(
[num_coefficients, num_coefficients], device=self.device
)
self.m_size = torch.zeros(
[max(self.mmax_list) + 1], device=self.device
).long()
# The following is implemented poorly - very slow. It only gets called
# a few times so haven't optimized.
offset = 0
for m in range(max(self.mmax_list) + 1):
idx_r, idx_i = self.complex_idx(m)
for idx_out, idx_in in enumerate(idx_r):
self.to_m[idx_out + offset, idx_in] = 1.0
offset = offset + len(idx_r)
self.m_size[m] = int(len(idx_r))
for idx_out, idx_in in enumerate(idx_i):
self.to_m[idx_out + offset, idx_in] = 1.0
offset = offset + len(idx_i)
self.to_m = self.to_m.detach()
# Return mask containing coefficients of order m (real and imaginary parts)
def complex_idx(self, m, lmax: int = -1):
if lmax == -1:
lmax = max(self.lmax_list)
indices = torch.arange(len(self.l_harmonic), device=self.device)
# Real part
mask_r = torch.bitwise_and(
self.l_harmonic.le(lmax), self.m_complex.eq(m)
)
mask_idx_r = torch.masked_select(indices, mask_r)
mask_idx_i = torch.tensor([], device=self.device).long()
# Imaginary part
if m != 0:
mask_i = torch.bitwise_and(
self.l_harmonic.le(lmax), self.m_complex.eq(-m)
)
mask_idx_i = torch.masked_select(indices, mask_i)
return mask_idx_r, mask_idx_i
# Return mask containing coefficients less than or equal to degree (l) and order (m)
def coefficient_idx(self, lmax, mmax) -> torch.Tensor:
mask = torch.bitwise_and(
self.l_harmonic.le(lmax), self.m_harmonic.le(mmax)
)
indices = torch.arange(len(mask), device=self.device)
return torch.masked_select(indices, mask)
class SO3_Embedding(torch.nn.Module):
"""
Helper functions for irreps embedding
Args:
length (int): Batch size
lmax_list (list:int): List of maximum degree of the spherical harmonics
num_channels (int): Number of channels
device: Device of the output
dtype: type of the output tensors
"""
def __init__(
self,
length,
lmax_list,
num_channels,
device,
dtype,
) -> None:
super().__init__()
self.num_channels = num_channels
self.device = device
self.dtype = dtype
self.num_resolutions = len(lmax_list)
self.num_coefficients = 0
for i in range(self.num_resolutions):
self.num_coefficients = self.num_coefficients + int(
(lmax_list[i] + 1) ** 2
)
embedding = torch.zeros(
length,
self.num_coefficients,
self.num_channels,
device=self.device,
dtype=self.dtype,
)
self.set_embedding(embedding)
self.set_lmax_mmax(lmax_list, lmax_list.copy())
# Clone an embedding of irreps
def clone(self) -> "SO3_Embedding":
clone = SO3_Embedding(
0,
self.lmax_list.copy(),
self.num_channels,
self.device,
self.dtype,
)
clone.set_embedding(self.embedding.clone())
return clone
# Initialize an embedding of irreps
def set_embedding(self, embedding) -> None:
self.length = len(embedding)
self.embedding = embedding
# Set the maximum order to be the maximum degree
def set_lmax_mmax(self, lmax_list, mmax_list) -> None:
self.lmax_list = lmax_list
self.mmax_list = mmax_list
# Expand the node embeddings to the number of edges
def _expand_edge(self, edge_index) -> None:
embedding = self.embedding[edge_index]
self.set_embedding(embedding)
# Initialize an embedding of irreps of a neighborhood
def expand_edge(self, edge_index) -> "SO3_Embedding":
x_expand = SO3_Embedding(
0,
self.lmax_list.copy(),
self.num_channels,
self.device,
self.dtype,
)
x_expand.set_embedding(self.embedding[edge_index])
return x_expand
# Compute the sum of the embeddings of the neighborhood
def _reduce_edge(self, edge_index, num_nodes: int) -> None:
new_embedding = torch.zeros(
num_nodes,
self.num_coefficients,
self.num_channels,
device=self.embedding.device,
dtype=self.embedding.dtype,
)
new_embedding.index_add_(0, edge_index, self.embedding)
self.set_embedding(new_embedding)
# Reshape the embedding l-->m
def _m_primary(self, mapping) -> None:
self.embedding = torch.einsum(
"nac,ba->nbc", self.embedding, mapping.to_m
)
# Reshape the embedding m-->l
def _l_primary(self, mapping) -> None:
self.embedding = torch.einsum(
"nac,ab->nbc", self.embedding, mapping.to_m
)
# Rotate the embedding
def _rotate(self, SO3_rotation, lmax_list, mmax_list) -> None:
embedding_rotate = torch.tensor(
[], device=self.device, dtype=self.dtype
)
offset = 0
for i in range(self.num_resolutions):
num_coefficients = int((self.lmax_list[i] + 1) ** 2)
embedding_i = self.embedding[:, offset : offset + num_coefficients]
embedding_rotate = torch.cat(
[
embedding_rotate,
SO3_rotation[i].rotate(
embedding_i, lmax_list[i], mmax_list[i]
),
],
dim=1,
)
offset = offset + num_coefficients
self.embedding = embedding_rotate
self.set_lmax_mmax(lmax_list.copy(), mmax_list.copy())
# Rotate the embedding by the inverse of the rotation matrix
def _rotate_inv(self, SO3_rotation, mappingReduced) -> None:
embedding_rotate = torch.tensor(
[], device=self.device, dtype=self.dtype
)
offset = 0
for i in range(self.num_resolutions):
num_coefficients = mappingReduced.res_size[i]
embedding_i = self.embedding[:, offset : offset + num_coefficients]
embedding_rotate = torch.cat(
[
embedding_rotate,
SO3_rotation[i].rotate_inv(
embedding_i, self.lmax_list[i], self.mmax_list[i]
),
],
dim=1,
)
offset = offset + num_coefficients
self.embedding = embedding_rotate
# Assume mmax = lmax when rotating back
for i in range(self.num_resolutions):
self.mmax_list[i] = int(self.lmax_list[i])
self.set_lmax_mmax(self.lmax_list, self.mmax_list)
# Compute point-wise spherical non-linearity
def _grid_act(self, SO3_grid, act, mappingReduced) -> None:
offset = 0
for i in range(self.num_resolutions):
num_coefficients = mappingReduced.res_size[i]
x_res = self.embedding[
:, offset : offset + num_coefficients
].contiguous()
to_grid_mat = SO3_grid[self.lmax_list[i]][
self.mmax_list[i]
].get_to_grid_mat(self.device)
from_grid_mat = SO3_grid[self.lmax_list[i]][
self.mmax_list[i]
].get_from_grid_mat(self.device)
x_grid = torch.einsum("bai,zic->zbac", to_grid_mat, x_res)
x_grid = act(x_grid)
x_res = torch.einsum("bai,zbac->zic", from_grid_mat, x_grid)
self.embedding[:, offset : offset + num_coefficients] = x_res
offset = offset + num_coefficients
# Compute a sample of the grid
def to_grid(self, SO3_grid, lmax: int = -1) -> torch.Tensor:
if lmax == -1:
lmax = max(self.lmax_list)
to_grid_mat_lmax = SO3_grid[lmax][lmax].get_to_grid_mat(self.device)
grid_mapping = SO3_grid[lmax][lmax].mapping
offset = 0
x_grid = torch.tensor([], device=self.device)
for i in range(self.num_resolutions):
num_coefficients = int((self.lmax_list[i] + 1) ** 2)
x_res = self.embedding[
:, offset : offset + num_coefficients
].contiguous()
to_grid_mat = to_grid_mat_lmax[
:,
:,
grid_mapping.coefficient_idx(
self.lmax_list[i], self.lmax_list[i]
),
]
x_grid = torch.cat(
[x_grid, torch.einsum("bai,zic->zbac", to_grid_mat, x_res)],
dim=3,
)
offset = offset + num_coefficients
return x_grid
# Compute irreps from grid representation
def _from_grid(self, x_grid, SO3_grid, lmax: int = -1) -> None:
if lmax == -1:
lmax = max(self.lmax_list)
from_grid_mat_lmax = SO3_grid[lmax][lmax].get_from_grid_mat(
self.device
)
grid_mapping = SO3_grid[lmax][lmax].mapping
offset = 0
offset_channel = 0
for i in range(self.num_resolutions):
from_grid_mat = from_grid_mat_lmax[
:,
:,
grid_mapping.coefficient_idx(
self.lmax_list[i], self.lmax_list[i]
),
]
x_res = torch.einsum(
"bai,zbac->zic",
from_grid_mat,
x_grid[
:,
:,
:,
offset_channel : offset_channel + self.num_channels,
],
)
num_coefficients = int((self.lmax_list[i] + 1) ** 2)
self.embedding[:, offset : offset + num_coefficients] = x_res
offset = offset + num_coefficients
offset_channel = offset_channel + self.num_channels
class SO3_Rotation(torch.nn.Module):
"""
Helper functions for Wigner-D rotations
Args:
rot_mat3x3 (tensor): Rotation matrix
lmax_list (list:int): List of maximum degree of the spherical harmonics
"""
def __init__(
self,
rot_mat3x3,
lmax,
) -> None:
super().__init__()
self.device = rot_mat3x3.device
self.dtype = rot_mat3x3.dtype
length = len(rot_mat3x3)
self.wigner = self.RotationToWignerDMatrix(rot_mat3x3, 0, lmax)
self.wigner_inv = torch.transpose(self.wigner, 1, 2).contiguous()
self.wigner = self.wigner.detach()
self.wigner_inv = self.wigner_inv.detach()
self.set_lmax(lmax)
# Initialize coefficients for reshape l<-->m
def set_lmax(self, lmax) -> None:
self.lmax = lmax
self.mapping = CoefficientMapping(
[self.lmax], [self.lmax], self.device
)
# Rotate the embedding
def rotate(self, embedding, out_lmax, out_mmax) -> torch.Tensor:
out_mask = self.mapping.coefficient_idx(out_lmax, out_mmax)
wigner = self.wigner[:, out_mask, :]
return torch.bmm(wigner, embedding)
# Rotate the embedding by the inverse of the rotation matrix
def rotate_inv(self, embedding, in_lmax, in_mmax) -> torch.Tensor:
in_mask = self.mapping.coefficient_idx(in_lmax, in_mmax)
wigner_inv = self.wigner_inv[:, :, in_mask]
return torch.bmm(wigner_inv, embedding)
# Compute Wigner matrices from rotation matrix
def RotationToWignerDMatrix(
self, edge_rot_mat, start_lmax: int, end_lmax: int
):
x = edge_rot_mat @ edge_rot_mat.new_tensor([0.0, 1.0, 0.0])
alpha, beta = o3.xyz_to_angles(x)
R = (
o3.angles_to_matrix(
alpha, beta, torch.zeros_like(alpha)
).transpose(-1, -2)
@ edge_rot_mat
)
gamma = torch.atan2(R[..., 0, 2], R[..., 0, 0])
size = (end_lmax + 1) ** 2 - (start_lmax) ** 2
wigner = torch.zeros(len(alpha), size, size, device=self.device)
start = 0
for lmax in range(start_lmax, end_lmax + 1):
block = self.wigner_D(lmax, alpha, beta, gamma)
end = start + block.size()[1]
wigner[:, start:end, start:end] = block
start = end
return wigner.detach()
# Borrowed from e3nn @ 0.4.0:
# https://github.com/e3nn/e3nn/blob/0.4.0/e3nn/o3/_wigner.py#L37
#
# In 0.5.0, e3nn shifted to torch.matrix_exp which is significantly slower:
# https://github.com/e3nn/e3nn/blob/0.5.0/e3nn/o3/_wigner.py#L92
def wigner_D(self, l, alpha, beta, gamma):
if not l < len(_Jd):
raise NotImplementedError(
f"wigner D maximum l implemented is {len(_Jd) - 1}, send us an email to ask for more"
)
alpha, beta, gamma = torch.broadcast_tensors(alpha, beta, gamma)
J = _Jd[l].to(dtype=alpha.dtype, device=alpha.device)
Xa = self._z_rot_mat(alpha, l)
Xb = self._z_rot_mat(beta, l)
Xc = self._z_rot_mat(gamma, l)
return Xa @ J @ Xb @ J @ Xc
def _z_rot_mat(self, angle, l):
shape, device, dtype = angle.shape, angle.device, angle.dtype
M = angle.new_zeros((*shape, 2 * l + 1, 2 * l + 1))
inds = torch.arange(0, 2 * l + 1, 1, device=device)
reversed_inds = torch.arange(2 * l, -1, -1, device=device)
frequencies = torch.arange(l, -l - 1, -1, dtype=dtype, device=device)
M[..., inds, reversed_inds] = torch.sin(frequencies * angle[..., None])
M[..., inds, inds] = torch.cos(frequencies * angle[..., None])
return M
class SO3_Grid(torch.nn.Module):
"""
Helper functions for grid representation of the irreps
Args:
lmax (int): Maximum degree of the spherical harmonics
mmax (int): Maximum order of the spherical harmonics
"""
def __init__(
self,
lmax: int,
mmax: int,
) -> None:
super().__init__()
self.lmax = lmax
self.mmax = mmax
self.lat_resolution = 2 * (self.lmax + 1)
if lmax == mmax:
self.long_resolution = 2 * (self.mmax + 1) + 1
else:
self.long_resolution = 2 * (self.mmax) + 1
self.initialized = False
def _initialize(self, device) -> None:
if self.initialized is True:
return
self.mapping = CoefficientMapping([self.lmax], [self.lmax], device)
to_grid = ToS2Grid(
self.lmax,
(self.lat_resolution, self.long_resolution),
normalization="integral",
device=device,
)
self.to_grid_mat = torch.einsum(
"mbi,am->bai", to_grid.shb, to_grid.sha
).detach()
self.to_grid_mat = self.to_grid_mat[
:, :, self.mapping.coefficient_idx(self.lmax, self.mmax)
]
from_grid = FromS2Grid(
(self.lat_resolution, self.long_resolution),
self.lmax,
normalization="integral",
device=device,
)
self.from_grid_mat = torch.einsum(
"am,mbi->bai", from_grid.sha, from_grid.shb
).detach()
self.from_grid_mat = self.from_grid_mat[
:, :, self.mapping.coefficient_idx(self.lmax, self.mmax)
]
self.initialized = True
# Compute matrices to transform irreps to grid
def get_to_grid_mat(self, device):
self._initialize(device)
return self.to_grid_mat
# Compute matrices to transform grid to irreps
def get_from_grid_mat(self, device):
self._initialize(device)
return self.from_grid_mat
# Compute grid from irreps representation
def to_grid(self, embedding, lmax, mmax) -> torch.Tensor:
self._initialize(embedding.device)
to_grid_mat = self.to_grid_mat[
:, :, self.mapping.coefficient_idx(lmax, mmax)
]
grid = torch.einsum("bai,zic->zbac", to_grid_mat, embedding)
return grid
# Compute irreps from grid representation
def from_grid(self, grid, lmax, mmax) -> torch.Tensor:
self._initialize(grid.device)
from_grid_mat = self.from_grid_mat[
:, :, self.mapping.coefficient_idx(lmax, mmax)
]
embedding = torch.einsum("bai,zbac->zic", from_grid_mat, grid)
return embedding
| 19,050 | 32.422807 | 112 | py |
ocp | ocp-main/ocpmodels/models/escn/escn.py | """
Copyright (c) Meta, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import logging
import time
from typing import List
import numpy as np
import torch
import torch.nn as nn
from pyexpat.model import XML_CQUANT_OPT
from ocpmodels.common.registry import registry
from ocpmodels.common.utils import conditional_grad
from ocpmodels.models.base import BaseModel
from ocpmodels.models.escn.so3 import (
CoefficientMapping,
SO3_Embedding,
SO3_Grid,
SO3_Rotation,
)
from ocpmodels.models.scn.sampling import CalcSpherePoints
from ocpmodels.models.scn.smearing import (
GaussianSmearing,
LinearSigmoidSmearing,
SigmoidSmearing,
SiLUSmearing,
)
try:
from e3nn import o3
except ImportError:
pass
@registry.register_model("escn")
class eSCN(BaseModel):
"""Equivariant Spherical Channel Network
Paper: Reducing SO(3) Convolutions to SO(2) for Efficient Equivariant GNNs
Args:
use_pbc (bool): Use periodic boundary conditions
regress_forces (bool): Compute forces
otf_graph (bool): Compute graph On The Fly (OTF)
max_neighbors (int): Maximum number of neighbors per atom
cutoff (float): Maximum distance between nieghboring atoms in Angstroms
max_num_elements (int): Maximum atomic number
num_layers (int): Number of layers in the GNN
lmax_list (int): List of maximum degree of the spherical harmonics (1 to 10)
mmax_list (int): List of maximum order of the spherical harmonics (0 to lmax)
sphere_channels (int): Number of spherical channels (one set per resolution)
hidden_channels (int): Number of hidden units in message passing
num_sphere_samples (int): Number of samples used to approximate the integration of the sphere in the output blocks
edge_channels (int): Number of channels for the edge invariant features
distance_function ("gaussian", "sigmoid", "linearsigmoid", "silu"): Basis function used for distances
basis_width_scalar (float): Width of distance basis function
distance_resolution (float): Distance between distance basis functions in Angstroms
show_timing_info (bool): Show timing and memory info
"""
def __init__(
self,
num_atoms: int, # not used
bond_feat_dim: int, # not used
num_targets: int, # not used
use_pbc: bool = True,
regress_forces: bool = True,
otf_graph: bool = False,
max_neighbors: int = 40,
cutoff: float = 8.0,
max_num_elements: int = 90,
num_layers: int = 8,
lmax_list: List[int] = [6],
mmax_list: List[int] = [2],
sphere_channels: int = 128,
hidden_channels: int = 256,
edge_channels: int = 128,
use_grid: bool = True,
num_sphere_samples: int = 128,
distance_function: str = "gaussian",
basis_width_scalar: float = 1.0,
distance_resolution: float = 0.02,
show_timing_info: bool = False,
) -> None:
super().__init__()
import sys
if "e3nn" not in sys.modules:
logging.error(
"You need to install the e3nn library to use the SCN model"
)
raise ImportError
self.regress_forces = regress_forces
self.use_pbc = use_pbc
self.cutoff = cutoff
self.otf_graph = otf_graph
self.show_timing_info = show_timing_info
self.max_num_elements = max_num_elements
self.hidden_channels = hidden_channels
self.num_layers = num_layers
self.num_atoms = 0
self.num_sphere_samples = num_sphere_samples
self.sphere_channels = sphere_channels
self.max_neighbors = max_neighbors
self.edge_channels = edge_channels
self.distance_resolution = distance_resolution
self.grad_forces = False
self.lmax_list = lmax_list
self.mmax_list = mmax_list
self.num_resolutions = len(self.lmax_list)
self.sphere_channels_all = self.num_resolutions * self.sphere_channels
self.basis_width_scalar = basis_width_scalar
self.distance_function = distance_function
# variables used for display purposes
self.counter = 0
# non-linear activation function used throughout the network
self.act = nn.SiLU()
# Weights for message initialization
self.sphere_embedding = nn.Embedding(
self.max_num_elements, self.sphere_channels_all
)
# Initialize the function used to measure the distances between atoms
assert self.distance_function in [
"gaussian",
"sigmoid",
"linearsigmoid",
"silu",
]
self.num_gaussians = int(cutoff / self.distance_resolution)
if self.distance_function == "gaussian":
self.distance_expansion = GaussianSmearing(
0.0,
cutoff,
self.num_gaussians,
basis_width_scalar,
)
if self.distance_function == "sigmoid":
self.distance_expansion = SigmoidSmearing(
0.0,
cutoff,
self.num_gaussians,
basis_width_scalar,
)
if self.distance_function == "linearsigmoid":
self.distance_expansion = LinearSigmoidSmearing(
0.0,
cutoff,
self.num_gaussians,
basis_width_scalar,
)
if self.distance_function == "silu":
self.distance_expansion = SiLUSmearing(
0.0,
cutoff,
self.num_gaussians,
basis_width_scalar,
)
# Initialize the transformations between spherical and grid representations
self.SO3_grid = nn.ModuleList()
for l in range(max(self.lmax_list) + 1):
SO3_m_grid = nn.ModuleList()
for m in range(max(self.lmax_list) + 1):
SO3_m_grid.append(SO3_Grid(l, m))
self.SO3_grid.append(SO3_m_grid)
# Initialize the blocks for each layer of the GNN
self.layer_blocks = nn.ModuleList()
for i in range(self.num_layers):
block = LayerBlock(
i,
self.sphere_channels,
self.hidden_channels,
self.edge_channels,
self.lmax_list,
self.mmax_list,
self.distance_expansion,
self.max_num_elements,
self.SO3_grid,
self.act,
)
self.layer_blocks.append(block)
# Output blocks for energy and forces
self.energy_block = EnergyBlock(
self.sphere_channels_all, self.num_sphere_samples, self.act
)
if self.regress_forces:
self.force_block = ForceBlock(
self.sphere_channels_all, self.num_sphere_samples, self.act
)
# Create a roughly evenly distributed point sampling of the sphere for the output blocks
self.sphere_points = nn.Parameter(
CalcSpherePoints(self.num_sphere_samples), requires_grad=False
)
# For each spherical point, compute the spherical harmonic coefficient weights
sphharm_weights: List[nn.Parameter] = []
for i in range(self.num_resolutions):
sphharm_weights.append(
nn.Parameter(
o3.spherical_harmonics(
torch.arange(0, self.lmax_list[i] + 1).tolist(),
self.sphere_points,
False,
),
requires_grad=False,
)
)
self.sphharm_weights = nn.ParameterList(sphharm_weights)
@conditional_grad(torch.enable_grad())
def forward(self, data):
device = data.pos.device
self.batch_size = len(data.natoms)
self.dtype = data.pos.dtype
start_time = time.time()
atomic_numbers = data.atomic_numbers.long()
num_atoms = len(atomic_numbers)
(
edge_index,
edge_distance,
edge_distance_vec,
cell_offsets,
_, # cell offset distances
neighbors,
) = self.generate_graph(data)
###############################################################
# Initialize data structures
###############################################################
# Compute 3x3 rotation matrix per edge
edge_rot_mat = self._init_edge_rot_mat(
data, edge_index, edge_distance_vec
)
# Initialize the WignerD matrices and other values for spherical harmonic calculations
self.SO3_edge_rot = nn.ModuleList()
for i in range(self.num_resolutions):
self.SO3_edge_rot.append(
SO3_Rotation(edge_rot_mat, self.lmax_list[i])
)
###############################################################
# Initialize node embeddings
###############################################################
# Init per node representations using an atomic number based embedding
offset = 0
x = SO3_Embedding(
num_atoms,
self.lmax_list,
self.sphere_channels,
device,
self.dtype,
)
offset_res = 0
offset = 0
# Initialize the l=0,m=0 coefficients for each resolution
for i in range(self.num_resolutions):
x.embedding[:, offset_res, :] = self.sphere_embedding(
atomic_numbers
)[:, offset : offset + self.sphere_channels]
offset = offset + self.sphere_channels
offset_res = offset_res + int((self.lmax_list[i] + 1) ** 2)
# This can be expensive to compute (not implemented efficiently), so only do it once and pass it along to each layer
mappingReduced = CoefficientMapping(
self.lmax_list, self.mmax_list, device
)
###############################################################
# Update spherical node embeddings
###############################################################
for i in range(self.num_layers):
if i > 0:
x_message = self.layer_blocks[i](
x,
atomic_numbers,
edge_distance,
edge_index,
self.SO3_edge_rot,
mappingReduced,
)
# Residual layer for all layers past the first
x.embedding = x.embedding + x_message.embedding
else:
# No residual for the first layer
x = self.layer_blocks[i](
x,
atomic_numbers,
edge_distance,
edge_index,
self.SO3_edge_rot,
mappingReduced,
)
# Sample the spherical channels (node embeddings) at evenly distributed points on the sphere.
# These values are fed into the output blocks.
x_pt = torch.tensor([], device=device)
offset = 0
# Compute the embedding values at every sampled point on the sphere
for i in range(self.num_resolutions):
num_coefficients = int((x.lmax_list[i] + 1) ** 2)
x_pt = torch.cat(
[
x_pt,
torch.einsum(
"abc, pb->apc",
x.embedding[:, offset : offset + num_coefficients],
self.sphharm_weights[i],
).contiguous(),
],
dim=2,
)
offset = offset + num_coefficients
x_pt = x_pt.view(-1, self.sphere_channels_all)
###############################################################
# Energy estimation
###############################################################
node_energy = self.energy_block(x_pt)
energy = torch.zeros(len(data.natoms), device=device)
energy.index_add_(0, data.batch, node_energy.view(-1))
# Scale energy to help balance numerical precision w.r.t. forces
energy = energy * 0.001
###############################################################
# Force estimation
###############################################################
if self.regress_forces:
forces = self.force_block(x_pt, self.sphere_points)
if self.show_timing_info is True:
torch.cuda.synchronize()
print(
"{} Time: {}\tMemory: {}\t{}".format(
self.counter,
time.time() - start_time,
len(data.pos),
torch.cuda.max_memory_allocated() / 1000000,
)
)
self.counter = self.counter + 1
if not self.regress_forces:
return energy
else:
return energy, forces
# Initialize the edge rotation matrics
def _init_edge_rot_mat(self, data, edge_index, edge_distance_vec):
edge_vec_0 = edge_distance_vec
edge_vec_0_distance = torch.sqrt(torch.sum(edge_vec_0**2, dim=1))
# Make sure the atoms are far enough apart
if torch.min(edge_vec_0_distance) < 0.0001:
print(
"Error edge_vec_0_distance: {}".format(
torch.min(edge_vec_0_distance)
)
)
(minval, minidx) = torch.min(edge_vec_0_distance, 0)
print(
"Error edge_vec_0_distance: {} {} {} {} {}".format(
minidx,
edge_index[0, minidx],
edge_index[1, minidx],
data.pos[edge_index[0, minidx]],
data.pos[edge_index[1, minidx]],
)
)
norm_x = edge_vec_0 / (edge_vec_0_distance.view(-1, 1))
edge_vec_2 = torch.rand_like(edge_vec_0) - 0.5
edge_vec_2 = edge_vec_2 / (
torch.sqrt(torch.sum(edge_vec_2**2, dim=1)).view(-1, 1)
)
# Create two rotated copys of the random vectors in case the random vector is aligned with norm_x
# With two 90 degree rotated vectors, at least one should not be aligned with norm_x
edge_vec_2b = edge_vec_2.clone()
edge_vec_2b[:, 0] = -edge_vec_2[:, 1]
edge_vec_2b[:, 1] = edge_vec_2[:, 0]
edge_vec_2c = edge_vec_2.clone()
edge_vec_2c[:, 1] = -edge_vec_2[:, 2]
edge_vec_2c[:, 2] = edge_vec_2[:, 1]
vec_dot_b = torch.abs(torch.sum(edge_vec_2b * norm_x, dim=1)).view(
-1, 1
)
vec_dot_c = torch.abs(torch.sum(edge_vec_2c * norm_x, dim=1)).view(
-1, 1
)
vec_dot = torch.abs(torch.sum(edge_vec_2 * norm_x, dim=1)).view(-1, 1)
edge_vec_2 = torch.where(
torch.gt(vec_dot, vec_dot_b), edge_vec_2b, edge_vec_2
)
vec_dot = torch.abs(torch.sum(edge_vec_2 * norm_x, dim=1)).view(-1, 1)
edge_vec_2 = torch.where(
torch.gt(vec_dot, vec_dot_c), edge_vec_2c, edge_vec_2
)
vec_dot = torch.abs(torch.sum(edge_vec_2 * norm_x, dim=1))
# Check the vectors aren't aligned
assert torch.max(vec_dot) < 0.99
norm_z = torch.cross(norm_x, edge_vec_2, dim=1)
norm_z = norm_z / (
torch.sqrt(torch.sum(norm_z**2, dim=1, keepdim=True))
)
norm_z = norm_z / (
torch.sqrt(torch.sum(norm_z**2, dim=1)).view(-1, 1)
)
norm_y = torch.cross(norm_x, norm_z, dim=1)
norm_y = norm_y / (
torch.sqrt(torch.sum(norm_y**2, dim=1, keepdim=True))
)
# Construct the 3D rotation matrix
norm_x = norm_x.view(-1, 3, 1)
norm_y = -norm_y.view(-1, 3, 1)
norm_z = norm_z.view(-1, 3, 1)
edge_rot_mat_inv = torch.cat([norm_z, norm_x, norm_y], dim=2)
edge_rot_mat = torch.transpose(edge_rot_mat_inv, 1, 2)
return edge_rot_mat.detach()
@property
def num_params(self) -> int:
return sum(p.numel() for p in self.parameters())
class LayerBlock(torch.nn.Module):
"""
Layer block: Perform one layer (message passing and aggregation) of the GNN
Args:
layer_idx (int): Layer number
sphere_channels (int): Number of spherical channels
hidden_channels (int): Number of hidden channels used during the SO(2) conv
edge_channels (int): Size of invariant edge embedding
lmax_list (list:int): List of degrees (l) for each resolution
mmax_list (list:int): List of orders (m) for each resolution
distance_expansion (func): Function used to compute distance embedding
max_num_elements (int): Maximum number of atomic numbers
SO3_grid (SO3_grid): Class used to convert from grid the spherical harmonic representations
act (function): Non-linear activation function
"""
def __init__(
self,
layer_idx,
sphere_channels,
hidden_channels,
edge_channels,
lmax_list,
mmax_list,
distance_expansion,
max_num_elements,
SO3_grid,
act,
) -> None:
super(LayerBlock, self).__init__()
self.layer_idx = layer_idx
self.act = act
self.lmax_list = lmax_list
self.mmax_list = mmax_list
self.num_resolutions = len(lmax_list)
self.sphere_channels = sphere_channels
self.sphere_channels_all = self.num_resolutions * self.sphere_channels
self.SO3_grid = SO3_grid
# Message block
self.message_block = MessageBlock(
self.layer_idx,
self.sphere_channels,
hidden_channels,
edge_channels,
self.lmax_list,
self.mmax_list,
distance_expansion,
max_num_elements,
self.SO3_grid,
self.act,
)
# Non-linear point-wise comvolution for the aggregated messages
self.fc1_sphere = nn.Linear(
2 * self.sphere_channels_all, self.sphere_channels_all, bias=False
)
self.fc2_sphere = nn.Linear(
self.sphere_channels_all, self.sphere_channels_all, bias=False
)
self.fc3_sphere = nn.Linear(
self.sphere_channels_all, self.sphere_channels_all, bias=False
)
def forward(
self,
x,
atomic_numbers,
edge_distance,
edge_index,
SO3_edge_rot,
mappingReduced,
):
# Compute messages by performing message block
x_message = self.message_block(
x,
atomic_numbers,
edge_distance,
edge_index,
SO3_edge_rot,
mappingReduced,
)
# Compute point-wise spherical non-linearity on aggregated messages
max_lmax = max(self.lmax_list)
# Project to grid
x_grid_message = x_message.to_grid(self.SO3_grid, lmax=max_lmax)
x_grid = x.to_grid(self.SO3_grid, lmax=max_lmax)
x_grid = torch.cat([x_grid, x_grid_message], dim=3)
# Perform point-wise convolution
x_grid = self.act(self.fc1_sphere(x_grid))
x_grid = self.act(self.fc2_sphere(x_grid))
x_grid = self.fc3_sphere(x_grid)
# Project back to spherical harmonic coefficients
x_message._from_grid(x_grid, self.SO3_grid, lmax=max_lmax)
# Return aggregated messages
return x_message
class MessageBlock(torch.nn.Module):
"""
Message block: Perform message passing
Args:
layer_idx (int): Layer number
sphere_channels (int): Number of spherical channels
hidden_channels (int): Number of hidden channels used during the SO(2) conv
edge_channels (int): Size of invariant edge embedding
lmax_list (list:int): List of degrees (l) for each resolution
mmax_list (list:int): List of orders (m) for each resolution
distance_expansion (func): Function used to compute distance embedding
max_num_elements (int): Maximum number of atomic numbers
SO3_grid (SO3_grid): Class used to convert from grid the spherical harmonic representations
act (function): Non-linear activation function
"""
def __init__(
self,
layer_idx,
sphere_channels,
hidden_channels,
edge_channels,
lmax_list,
mmax_list,
distance_expansion,
max_num_elements,
SO3_grid,
act,
) -> None:
super(MessageBlock, self).__init__()
self.layer_idx = layer_idx
self.act = act
self.hidden_channels = hidden_channels
self.sphere_channels = sphere_channels
self.SO3_grid = SO3_grid
self.num_resolutions = len(lmax_list)
self.lmax_list = lmax_list
self.mmax_list = mmax_list
self.edge_channels = edge_channels
# Create edge scalar (invariant to rotations) features
self.edge_block = EdgeBlock(
self.edge_channels,
distance_expansion,
max_num_elements,
self.act,
)
# Create SO(2) convolution blocks
self.so2_block_source = SO2Block(
self.sphere_channels,
self.hidden_channels,
self.edge_channels,
self.lmax_list,
self.mmax_list,
self.act,
)
self.so2_block_target = SO2Block(
self.sphere_channels,
self.hidden_channels,
self.edge_channels,
self.lmax_list,
self.mmax_list,
self.act,
)
def forward(
self,
x,
atomic_numbers,
edge_distance,
edge_index,
SO3_edge_rot,
mappingReduced,
):
###############################################################
# Compute messages
###############################################################
# Compute edge scalar features (invariant to rotations)
# Uses atomic numbers and edge distance as inputs
x_edge = self.edge_block(
edge_distance,
atomic_numbers[edge_index[0]], # Source atom atomic number
atomic_numbers[edge_index[1]], # Target atom atomic number
)
# Copy embeddings for each edge's source and target nodes
x_source = x.clone()
x_target = x.clone()
x_source._expand_edge(edge_index[0, :])
x_target._expand_edge(edge_index[1, :])
# Rotate the irreps to align with the edge
x_source._rotate(SO3_edge_rot, self.lmax_list, self.mmax_list)
x_target._rotate(SO3_edge_rot, self.lmax_list, self.mmax_list)
# Compute messages
x_source = self.so2_block_source(x_source, x_edge, mappingReduced)
x_target = self.so2_block_target(x_target, x_edge, mappingReduced)
# Add together the source and target results
x_target.embedding = x_source.embedding + x_target.embedding
# Point-wise spherical non-linearity
x_target._grid_act(self.SO3_grid, self.act, mappingReduced)
# Rotate back the irreps
x_target._rotate_inv(SO3_edge_rot, mappingReduced)
# Compute the sum of the incoming neighboring messages for each target node
x_target._reduce_edge(edge_index[1], len(x.embedding))
return x_target
class SO2Block(torch.nn.Module):
"""
SO(2) Block: Perform SO(2) convolutions for all m (orders)
Args:
sphere_channels (int): Number of spherical channels
hidden_channels (int): Number of hidden channels used during the SO(2) conv
edge_channels (int): Size of invariant edge embedding
lmax_list (list:int): List of degrees (l) for each resolution
mmax_list (list:int): List of orders (m) for each resolution
act (function): Non-linear activation function
"""
def __init__(
self,
sphere_channels,
hidden_channels,
edge_channels,
lmax_list,
mmax_list,
act,
) -> None:
super(SO2Block, self).__init__()
self.sphere_channels = sphere_channels
self.hidden_channels = hidden_channels
self.lmax_list = lmax_list
self.mmax_list = mmax_list
self.num_resolutions = len(lmax_list)
self.act = act
num_channels_m0 = 0
for i in range(self.num_resolutions):
num_coefficents = self.lmax_list[i] + 1
num_channels_m0 = (
num_channels_m0 + num_coefficents * self.sphere_channels
)
# SO(2) convolution for m=0
self.fc1_dist0 = nn.Linear(edge_channels, self.hidden_channels)
self.fc1_m0 = nn.Linear(
num_channels_m0, self.hidden_channels, bias=False
)
self.fc2_m0 = nn.Linear(
self.hidden_channels, num_channels_m0, bias=False
)
# SO(2) convolution for non-zero m
self.so2_conv = nn.ModuleList()
for m in range(1, max(self.mmax_list) + 1):
so2_conv = SO2Conv(
m,
self.sphere_channels,
self.hidden_channels,
edge_channels,
self.lmax_list,
self.mmax_list,
self.act,
)
self.so2_conv.append(so2_conv)
def forward(
self,
x,
x_edge,
mappingReduced,
):
num_edges = len(x_edge)
# Reshape the spherical harmonics based on m (order)
x._m_primary(mappingReduced)
# Compute m=0 coefficients separately since they only have real values (no imaginary)
# Compute edge scalar features for m=0
x_edge_0 = self.act(self.fc1_dist0(x_edge))
x_0 = x.embedding[:, 0 : mappingReduced.m_size[0]].contiguous()
x_0 = x_0.view(num_edges, -1)
x_0 = self.fc1_m0(x_0)
x_0 = x_0 * x_edge_0
x_0 = self.fc2_m0(x_0)
x_0 = x_0.view(num_edges, -1, x.num_channels)
# Update the m=0 coefficients
x.embedding[:, 0 : mappingReduced.m_size[0]] = x_0
# Compute the values for the m > 0 coefficients
offset = mappingReduced.m_size[0]
for m in range(1, max(self.mmax_list) + 1):
# Get the m order coefficients
x_m = x.embedding[
:, offset : offset + 2 * mappingReduced.m_size[m]
].contiguous()
x_m = x_m.view(num_edges, 2, -1)
# Perform SO(2) convolution
x_m = self.so2_conv[m - 1](x_m, x_edge)
x_m = x_m.view(num_edges, -1, x.num_channels)
x.embedding[
:, offset : offset + 2 * mappingReduced.m_size[m]
] = x_m
offset = offset + 2 * mappingReduced.m_size[m]
# Reshape the spherical harmonics based on l (degree)
x._l_primary(mappingReduced)
return x
class SO2Conv(torch.nn.Module):
"""
SO(2) Conv: Perform an SO(2) convolution
Args:
m (int): Order of the spherical harmonic coefficients
sphere_channels (int): Number of spherical channels
hidden_channels (int): Number of hidden channels used during the SO(2) conv
edge_channels (int): Size of invariant edge embedding
lmax_list (list:int): List of degrees (l) for each resolution
mmax_list (list:int): List of orders (m) for each resolution
act (function): Non-linear activation function
"""
def __init__(
self,
m,
sphere_channels,
hidden_channels,
edge_channels,
lmax_list,
mmax_list,
act,
) -> None:
super(SO2Conv, self).__init__()
self.hidden_channels = hidden_channels
self.lmax_list = lmax_list
self.mmax_list = mmax_list
self.sphere_channels = sphere_channels
self.num_resolutions = len(self.lmax_list)
self.m = m
self.act = act
num_channels = 0
for i in range(self.num_resolutions):
num_coefficents = 0
if self.mmax_list[i] >= m:
num_coefficents = self.lmax_list[i] - m + 1
num_channels = (
num_channels + num_coefficents * self.sphere_channels
)
assert num_channels > 0
# Embedding function of the distance
self.fc1_dist = nn.Linear(edge_channels, 2 * self.hidden_channels)
# Real weights of SO(2) convolution
self.fc1_r = nn.Linear(num_channels, self.hidden_channels, bias=False)
self.fc2_r = nn.Linear(self.hidden_channels, num_channels, bias=False)
# Imaginary weights of SO(2) convolution
self.fc1_i = nn.Linear(num_channels, self.hidden_channels, bias=False)
self.fc2_i = nn.Linear(self.hidden_channels, num_channels, bias=False)
def forward(self, x_m, x_edge) -> torch.Tensor:
# Compute edge scalar features
x_edge = self.act(self.fc1_dist(x_edge))
x_edge = x_edge.view(-1, 2, self.hidden_channels)
# Perform the complex weight multiplication
x_r = self.fc1_r(x_m)
x_r = x_r * x_edge[:, 0:1, :]
x_r = self.fc2_r(x_r)
x_i = self.fc1_i(x_m)
x_i = x_i * x_edge[:, 1:2, :]
x_i = self.fc2_i(x_i)
x_m_r = x_r[:, 0] - x_i[:, 1]
x_m_i = x_r[:, 1] + x_i[:, 0]
return torch.stack((x_m_r, x_m_i), dim=1).contiguous()
class EdgeBlock(torch.nn.Module):
"""
Edge Block: Compute invariant edge representation from edge diatances and atomic numbers
Args:
edge_channels (int): Size of invariant edge embedding
distance_expansion (func): Function used to compute distance embedding
max_num_elements (int): Maximum number of atomic numbers
act (function): Non-linear activation function
"""
def __init__(
self,
edge_channels,
distance_expansion,
max_num_elements,
act,
) -> None:
super(EdgeBlock, self).__init__()
self.in_channels = distance_expansion.num_output
self.distance_expansion = distance_expansion
self.act = act
self.edge_channels = edge_channels
self.max_num_elements = max_num_elements
# Embedding function of the distance
self.fc1_dist = nn.Linear(self.in_channels, self.edge_channels)
# Embedding function of the atomic numbers
self.source_embedding = nn.Embedding(
self.max_num_elements, self.edge_channels
)
self.target_embedding = nn.Embedding(
self.max_num_elements, self.edge_channels
)
nn.init.uniform_(self.source_embedding.weight.data, -0.001, 0.001)
nn.init.uniform_(self.target_embedding.weight.data, -0.001, 0.001)
# Embedding function of the edge
self.fc1_edge_attr = nn.Linear(
self.edge_channels,
self.edge_channels,
)
def forward(self, edge_distance, source_element, target_element):
# Compute distance embedding
x_dist = self.distance_expansion(edge_distance)
x_dist = self.fc1_dist(x_dist)
# Compute atomic number embeddings
source_embedding = self.source_embedding(source_element)
target_embedding = self.target_embedding(target_element)
# Compute invariant edge embedding
x_edge = self.act(source_embedding + target_embedding + x_dist)
x_edge = self.act(self.fc1_edge_attr(x_edge))
return x_edge
class EnergyBlock(torch.nn.Module):
"""
Energy Block: Output block computing the energy
Args:
num_channels (int): Number of channels
num_sphere_samples (int): Number of samples used to approximate the integral on the sphere
act (function): Non-linear activation function
"""
def __init__(
self,
num_channels: int,
num_sphere_samples: int,
act,
) -> None:
super(EnergyBlock, self).__init__()
self.num_channels = num_channels
self.num_sphere_samples = num_sphere_samples
self.act = act
self.fc1 = nn.Linear(self.num_channels, self.num_channels)
self.fc2 = nn.Linear(self.num_channels, self.num_channels)
self.fc3 = nn.Linear(self.num_channels, 1, bias=False)
def forward(self, x_pt) -> torch.Tensor:
# x_pt are the values of the channels sampled at different points on the sphere
x_pt = self.act(self.fc1(x_pt))
x_pt = self.act(self.fc2(x_pt))
x_pt = self.fc3(x_pt)
x_pt = x_pt.view(-1, self.num_sphere_samples, 1)
node_energy = torch.sum(x_pt, dim=1) / self.num_sphere_samples
return node_energy
class ForceBlock(torch.nn.Module):
"""
Force Block: Output block computing the per atom forces
Args:
num_channels (int): Number of channels
num_sphere_samples (int): Number of samples used to approximate the integral on the sphere
act (function): Non-linear activation function
"""
def __init__(
self,
num_channels: int,
num_sphere_samples: int,
act,
) -> None:
super(ForceBlock, self).__init__()
self.num_channels = num_channels
self.num_sphere_samples = num_sphere_samples
self.act = act
self.fc1 = nn.Linear(self.num_channels, self.num_channels)
self.fc2 = nn.Linear(self.num_channels, self.num_channels)
self.fc3 = nn.Linear(self.num_channels, 1, bias=False)
def forward(self, x_pt, sphere_points) -> torch.Tensor:
# x_pt are the values of the channels sampled at different points on the sphere
x_pt = self.act(self.fc1(x_pt))
x_pt = self.act(self.fc2(x_pt))
x_pt = self.fc3(x_pt)
x_pt = x_pt.view(-1, self.num_sphere_samples, 1)
forces = x_pt * sphere_points.view(1, self.num_sphere_samples, 3)
forces = torch.sum(forces, dim=1) / self.num_sphere_samples
return forces
| 34,881 | 33.951904 | 126 | py |
ocp | ocp-main/ocpmodels/models/escn/__init__.py | 0 | 0 | 0 | py |
|
ocp | ocp-main/ocpmodels/models/scn/scn.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import logging
import sys
import time
import numpy as np
import torch
import torch.nn as nn
from torch_geometric.nn import radius_graph
from ocpmodels.common.registry import registry
from ocpmodels.common.utils import (
conditional_grad,
get_pbc_distances,
radius_graph_pbc,
)
from ocpmodels.models.base import BaseModel
from ocpmodels.models.scn.sampling import CalcSpherePoints
from ocpmodels.models.scn.smearing import (
GaussianSmearing,
LinearSigmoidSmearing,
SigmoidSmearing,
SiLUSmearing,
)
from ocpmodels.models.scn.spherical_harmonics import SphericalHarmonicsHelper
try:
import e3nn
from e3nn import o3
except ImportError:
pass
@registry.register_model("scn")
class SphericalChannelNetwork(BaseModel):
"""Spherical Channel Network
Paper: Spherical Channels for Modeling Atomic Interactions
Args:
use_pbc (bool): Use periodic boundary conditions
regress_forces (bool): Compute forces
otf_graph (bool): Compute graph On The Fly (OTF)
max_num_neighbors (int): Maximum number of neighbors per atom
cutoff (float): Maximum distance between nieghboring atoms in Angstroms
max_num_elements (int): Maximum atomic number
num_interactions (int): Number of layers in the GNN
lmax (int): Maximum degree of the spherical harmonics (1 to 10)
mmax (int): Maximum order of the spherical harmonics (0 or 1)
num_resolutions (int): Number of resolutions used to compute messages, further away atoms has lower resolution (1 or 2)
sphere_channels (int): Number of spherical channels
sphere_channels_reduce (int): Number of spherical channels used during message passing (downsample or upsample)
hidden_channels (int): Number of hidden units in message passing
num_taps (int): Number of taps or rotations used during message passing (1 or otherwise set automatically based on mmax)
use_grid (bool): Use non-linear pointwise convolution during aggregation
num_bands (int): Number of bands used during message aggregation for the 1x1 pointwise convolution (1 or 2)
num_sphere_samples (int): Number of samples used to approximate the integration of the sphere in the output blocks
num_basis_functions (int): Number of basis functions used for distance and atomic number blocks
distance_function ("gaussian", "sigmoid", "linearsigmoid", "silu"): Basis function used for distances
basis_width_scalar (float): Width of distance basis function
distance_resolution (float): Distance between distance basis functions in Angstroms
show_timing_info (bool): Show timing and memory info
"""
def __init__(
self,
num_atoms: int, # not used
bond_feat_dim: int, # not used
num_targets: int, # not used
use_pbc: bool = True,
regress_forces: bool = True,
otf_graph: bool = False,
max_num_neighbors: int = 20,
cutoff: float = 8.0,
max_num_elements: int = 90,
num_interactions: int = 8,
lmax: int = 6,
mmax: int = 1,
num_resolutions: int = 2,
sphere_channels: int = 128,
sphere_channels_reduce: int = 128,
hidden_channels: int = 256,
num_taps: int = -1,
use_grid: bool = True,
num_bands: int = 1,
num_sphere_samples: int = 128,
num_basis_functions: int = 128,
distance_function: str = "gaussian",
basis_width_scalar: float = 1.0,
distance_resolution: float = 0.02,
show_timing_info: bool = False,
direct_forces: bool = True,
) -> None:
super().__init__()
if "e3nn" not in sys.modules:
logging.error(
"You need to install e3nn v0.2.6 to use the SCN model"
)
raise ImportError
assert e3nn.__version__ == "0.2.6"
self.regress_forces = regress_forces
self.use_pbc = use_pbc
self.cutoff = cutoff
self.otf_graph = otf_graph
self.show_timing_info = show_timing_info
self.max_num_elements = max_num_elements
self.hidden_channels = hidden_channels
self.num_interactions = num_interactions
self.num_atoms = 0
self.num_sphere_samples = num_sphere_samples
self.sphere_channels = sphere_channels
self.sphere_channels_reduce = sphere_channels_reduce
self.max_num_neighbors = self.max_neighbors = max_num_neighbors
self.num_basis_functions = num_basis_functions
self.distance_resolution = distance_resolution
self.grad_forces = False
self.lmax = lmax
self.mmax = mmax
self.basis_width_scalar = basis_width_scalar
self.sphere_basis = (self.lmax + 1) ** 2
self.use_grid = use_grid
self.distance_function = distance_function
# variables used for display purposes
self.counter = 0
self.act = nn.SiLU()
# Weights for message initialization
self.sphere_embedding = nn.Embedding(
self.max_num_elements, self.sphere_channels
)
assert self.distance_function in [
"gaussian",
"sigmoid",
"linearsigmoid",
"silu",
]
self.num_gaussians = int(cutoff / self.distance_resolution)
if self.distance_function == "gaussian":
self.distance_expansion = GaussianSmearing(
0.0,
cutoff,
self.num_gaussians,
basis_width_scalar,
)
if self.distance_function == "sigmoid":
self.distance_expansion = SigmoidSmearing(
0.0,
cutoff,
self.num_gaussians,
basis_width_scalar,
)
if self.distance_function == "linearsigmoid":
self.distance_expansion = LinearSigmoidSmearing(
0.0,
cutoff,
self.num_gaussians,
basis_width_scalar,
)
if self.distance_function == "silu":
self.distance_expansion = SiLUSmearing(
0.0,
cutoff,
self.num_gaussians,
basis_width_scalar,
)
if num_resolutions == 1:
self.num_resolutions = 1
self.hidden_channels_list = torch.tensor([self.hidden_channels])
self.lmax_list = torch.tensor(
[self.lmax, -1]
) # always end with -1
self.cutoff_list = torch.tensor([self.max_num_neighbors - 0.01])
if num_resolutions == 2:
self.num_resolutions = 2
self.hidden_channels_list = torch.tensor(
[self.hidden_channels, self.hidden_channels // 4]
)
self.lmax_list = torch.tensor([self.lmax, max(4, self.lmax - 2)])
self.cutoff_list = torch.tensor(
[12 - 0.01, self.max_num_neighbors - 0.01]
)
self.sphharm_list = []
for i in range(self.num_resolutions):
self.sphharm_list.append(
SphericalHarmonicsHelper(
self.lmax_list[i],
self.mmax,
num_taps,
num_bands,
)
)
self.edge_blocks = nn.ModuleList()
for _ in range(self.num_interactions):
block = EdgeBlock(
self.num_resolutions,
self.sphere_channels_reduce,
self.hidden_channels_list,
self.cutoff_list,
self.sphharm_list,
self.sphere_channels,
self.distance_expansion,
self.max_num_elements,
self.num_basis_functions,
self.num_gaussians,
self.use_grid,
self.act,
)
self.edge_blocks.append(block)
# Energy estimation
self.energy_fc1 = nn.Linear(self.sphere_channels, self.sphere_channels)
self.energy_fc2 = nn.Linear(
self.sphere_channels, self.sphere_channels_reduce
)
self.energy_fc3 = nn.Linear(self.sphere_channels_reduce, 1)
# Force estimation
if self.regress_forces:
self.force_fc1 = nn.Linear(
self.sphere_channels, self.sphere_channels
)
self.force_fc2 = nn.Linear(
self.sphere_channels, self.sphere_channels_reduce
)
self.force_fc3 = nn.Linear(self.sphere_channels_reduce, 1)
@conditional_grad(torch.enable_grad())
def forward(self, data):
self.device = data.pos.device
self.num_atoms = len(data.batch)
self.batch_size = len(data.natoms)
# torch.autograd.set_detect_anomaly(True)
start_time = time.time()
outputs = self._forward_helper(
data,
)
if self.show_timing_info is True:
torch.cuda.synchronize()
print(
"{} Time: {}\tMemory: {}\t{}".format(
self.counter,
time.time() - start_time,
len(data.pos),
torch.cuda.max_memory_allocated() / 1000000,
)
)
self.counter = self.counter + 1
return outputs
# restructure forward helper for conditional grad
def _forward_helper(self, data):
atomic_numbers = data.atomic_numbers.long()
num_atoms = len(atomic_numbers)
pos = data.pos
(
edge_index,
edge_distance,
edge_distance_vec,
cell_offsets,
_, # cell offset distances
neighbors,
) = self.generate_graph(data)
###############################################################
# Initialize data structures
###############################################################
# Calculate which message block each edge should use. Based on edge distance rank.
edge_rank = self._rank_edge_distances(
edge_distance, edge_index, self.max_num_neighbors
)
# Reorder edges so that they are grouped by distance rank (lowest to highest)
last_cutoff = -0.1
message_block_idx = torch.zeros(len(edge_distance), device=pos.device)
edge_distance_reorder = torch.tensor([], device=self.device)
edge_index_reorder = torch.tensor([], device=self.device)
edge_distance_vec_reorder = torch.tensor([], device=self.device)
cutoff_index = torch.tensor([0], device=self.device)
for i in range(self.num_resolutions):
mask = torch.logical_and(
edge_rank.gt(last_cutoff), edge_rank.le(self.cutoff_list[i])
)
last_cutoff = self.cutoff_list[i]
message_block_idx.masked_fill_(mask, i)
edge_distance_reorder = torch.cat(
[
edge_distance_reorder,
torch.masked_select(edge_distance, mask),
],
dim=0,
)
edge_index_reorder = torch.cat(
[
edge_index_reorder,
torch.masked_select(
edge_index, mask.view(1, -1).repeat(2, 1)
).view(2, -1),
],
dim=1,
)
edge_distance_vec_mask = torch.masked_select(
edge_distance_vec, mask.view(-1, 1).repeat(1, 3)
).view(-1, 3)
edge_distance_vec_reorder = torch.cat(
[edge_distance_vec_reorder, edge_distance_vec_mask], dim=0
)
cutoff_index = torch.cat(
[
cutoff_index,
torch.tensor(
[len(edge_distance_reorder)], device=self.device
),
],
dim=0,
)
edge_index = edge_index_reorder.long()
edge_distance = edge_distance_reorder
edge_distance_vec = edge_distance_vec_reorder
# Compute 3x3 rotation matrix per edge
edge_rot_mat = self._init_edge_rot_mat(
data, edge_index, edge_distance_vec
)
# Initialize the WignerD matrices and other values for spherical harmonic calculations
for i in range(self.num_resolutions):
self.sphharm_list[i].InitWignerDMatrix(
edge_rot_mat[cutoff_index[i] : cutoff_index[i + 1]],
)
###############################################################
# Initialize node embeddings
###############################################################
# Init per node representations using an atomic number based embedding
x = torch.zeros(
num_atoms,
self.sphere_basis,
self.sphere_channels,
device=pos.device,
)
x[:, 0, :] = self.sphere_embedding(atomic_numbers)
###############################################################
# Update spherical node embeddings
###############################################################
for i, interaction in enumerate(self.edge_blocks):
if i > 0:
x = x + interaction(
x, atomic_numbers, edge_distance, edge_index, cutoff_index
)
else:
x = interaction(
x, atomic_numbers, edge_distance, edge_index, cutoff_index
)
###############################################################
# Estimate energy and forces using the node embeddings
###############################################################
# Create a roughly evenly distributed point sampling of the sphere
sphere_points = CalcSpherePoints(
self.num_sphere_samples, x.device
).detach()
sphharm_weights = o3.spherical_harmonics(
torch.arange(0, self.lmax + 1).tolist(), sphere_points, False
).detach()
# Energy estimation
node_energy = torch.einsum(
"abc, pb->apc", x, sphharm_weights
).contiguous()
node_energy = node_energy.view(-1, self.sphere_channels)
node_energy = self.act(self.energy_fc1(node_energy))
node_energy = self.act(self.energy_fc2(node_energy))
node_energy = self.energy_fc3(node_energy)
node_energy = node_energy.view(-1, self.num_sphere_samples, 1)
node_energy = torch.sum(node_energy, dim=1) / self.num_sphere_samples
energy = torch.zeros(len(data.natoms), device=pos.device)
energy.index_add_(0, data.batch, node_energy.view(-1))
# Force estimation
if self.regress_forces:
forces = torch.einsum(
"abc, pb->apc", x, sphharm_weights
).contiguous()
forces = forces.view(-1, self.sphere_channels)
forces = self.act(self.force_fc1(forces))
forces = self.act(self.force_fc2(forces))
forces = self.force_fc3(forces)
forces = forces.view(-1, self.num_sphere_samples, 1)
forces = forces * sphere_points.view(1, self.num_sphere_samples, 3)
forces = torch.sum(forces, dim=1) / self.num_sphere_samples
if not self.regress_forces:
return energy
else:
return energy, forces
def _init_edge_rot_mat(self, data, edge_index, edge_distance_vec):
edge_vec_0 = edge_distance_vec
edge_vec_0_distance = torch.sqrt(torch.sum(edge_vec_0**2, dim=1))
if torch.min(edge_vec_0_distance) < 0.0001:
print(
"Error edge_vec_0_distance: {}".format(
torch.min(edge_vec_0_distance)
)
)
(minval, minidx) = torch.min(edge_vec_0_distance, 0)
print(
"Error edge_vec_0_distance: {} {} {} {} {}".format(
minidx,
edge_index[0, minidx],
edge_index[1, minidx],
data.pos[edge_index[0, minidx]],
data.pos[edge_index[1, minidx]],
)
)
norm_x = edge_vec_0 / (edge_vec_0_distance.view(-1, 1))
edge_vec_2 = torch.rand_like(edge_vec_0) - 0.5
edge_vec_2 = edge_vec_2 / (
torch.sqrt(torch.sum(edge_vec_2**2, dim=1)).view(-1, 1)
)
# Create two rotated copys of the random vectors in case the random vector is aligned with norm_x
# With two 90 degree rotated vectors, at least one should not be aligned with norm_x
edge_vec_2b = edge_vec_2.clone()
edge_vec_2b[:, 0] = -edge_vec_2[:, 1]
edge_vec_2b[:, 1] = edge_vec_2[:, 0]
edge_vec_2c = edge_vec_2.clone()
edge_vec_2c[:, 1] = -edge_vec_2[:, 2]
edge_vec_2c[:, 2] = edge_vec_2[:, 1]
vec_dot_b = torch.abs(torch.sum(edge_vec_2b * norm_x, dim=1)).view(
-1, 1
)
vec_dot_c = torch.abs(torch.sum(edge_vec_2c * norm_x, dim=1)).view(
-1, 1
)
vec_dot = torch.abs(torch.sum(edge_vec_2 * norm_x, dim=1)).view(-1, 1)
edge_vec_2 = torch.where(
torch.gt(vec_dot, vec_dot_b), edge_vec_2b, edge_vec_2
)
vec_dot = torch.abs(torch.sum(edge_vec_2 * norm_x, dim=1)).view(-1, 1)
edge_vec_2 = torch.where(
torch.gt(vec_dot, vec_dot_c), edge_vec_2c, edge_vec_2
)
vec_dot = torch.abs(torch.sum(edge_vec_2 * norm_x, dim=1))
# Check the vectors aren't aligned
assert torch.max(vec_dot) < 0.99
norm_z = torch.cross(norm_x, edge_vec_2, dim=1)
norm_z = norm_z / (
torch.sqrt(torch.sum(norm_z**2, dim=1, keepdim=True))
)
norm_z = norm_z / (
torch.sqrt(torch.sum(norm_z**2, dim=1)).view(-1, 1)
)
norm_y = torch.cross(norm_x, norm_z, dim=1)
norm_y = norm_y / (
torch.sqrt(torch.sum(norm_y**2, dim=1, keepdim=True))
)
norm_x = norm_x.view(-1, 3, 1)
norm_y = -norm_y.view(-1, 3, 1)
norm_z = norm_z.view(-1, 3, 1)
edge_rot_mat_inv = torch.cat([norm_z, norm_x, norm_y], dim=2)
edge_rot_mat = torch.transpose(edge_rot_mat_inv, 1, 2)
return edge_rot_mat.detach()
def _rank_edge_distances(
self, edge_distance, edge_index, max_num_neighbors: int
) -> torch.Tensor:
device = edge_distance.device
# Create an index map to map distances from atom_distance to distance_sort
# index_sort_map assumes index to be sorted
output, num_neighbors = torch.unique(edge_index[1], return_counts=True)
index_neighbor_offset = (
torch.cumsum(num_neighbors, dim=0) - num_neighbors
)
index_neighbor_offset_expand = torch.repeat_interleave(
index_neighbor_offset, num_neighbors
)
index_sort_map = (
edge_index[1] * max_num_neighbors
+ torch.arange(len(edge_distance), device=device)
- index_neighbor_offset_expand
)
num_atoms = int(torch.max(edge_index)) + 1
distance_sort = torch.full(
[num_atoms * max_num_neighbors], np.inf, device=device
)
distance_sort.index_copy_(0, index_sort_map, edge_distance)
distance_sort = distance_sort.view(num_atoms, max_num_neighbors)
no_op, index_sort = torch.sort(distance_sort, dim=1)
index_map = (
torch.arange(max_num_neighbors, device=device)
.view(1, -1)
.repeat(num_atoms, 1)
.view(-1)
)
index_sort = index_sort + (
torch.arange(num_atoms, device=device) * max_num_neighbors
).view(-1, 1).repeat(1, max_num_neighbors)
edge_rank = torch.zeros_like(index_map)
edge_rank.index_copy_(0, index_sort.view(-1), index_map)
edge_rank = edge_rank.view(num_atoms, max_num_neighbors)
index_sort_mask = distance_sort.lt(1000.0)
edge_rank = torch.masked_select(edge_rank, index_sort_mask)
return edge_rank
@property
def num_params(self) -> int:
return sum(p.numel() for p in self.parameters())
class EdgeBlock(torch.nn.Module):
def __init__(
self,
num_resolutions: int,
sphere_channels_reduce,
hidden_channels_list,
cutoff_list,
sphharm_list,
sphere_channels,
distance_expansion,
max_num_elements: int,
num_basis_functions: int,
num_gaussians: int,
use_grid: bool,
act,
) -> None:
super(EdgeBlock, self).__init__()
self.num_resolutions = num_resolutions
self.act = act
self.hidden_channels_list = hidden_channels_list
self.sphere_channels = sphere_channels
self.sphere_channels_reduce = sphere_channels_reduce
self.distance_expansion = distance_expansion
self.cutoff_list = cutoff_list
self.sphharm_list = sphharm_list
self.max_num_elements = max_num_elements
self.num_basis_functions = num_basis_functions
self.use_grid = use_grid
self.num_gaussians = num_gaussians
# Edge features
self.dist_block = DistanceBlock(
self.num_gaussians,
self.num_basis_functions,
self.distance_expansion,
self.max_num_elements,
self.act,
)
# Create a message block for each cutoff
self.message_blocks = nn.ModuleList()
for i in range(self.num_resolutions):
block = MessageBlock(
self.sphere_channels_reduce,
int(self.hidden_channels_list[i]),
self.num_basis_functions,
self.sphharm_list[i],
self.act,
)
self.message_blocks.append(block)
# Downsampling number of sphere channels
# Make sure bias is false unless equivariance is lost
if self.sphere_channels != self.sphere_channels_reduce:
self.downsample = nn.Linear(
self.sphere_channels,
self.sphere_channels_reduce,
bias=False,
)
self.upsample = nn.Linear(
self.sphere_channels_reduce,
self.sphere_channels,
bias=False,
)
# Use non-linear message aggregation?
if self.use_grid:
# Network for each node to combine edge messages
self.fc1_sphere = nn.Linear(
self.sphharm_list[0].num_bands
* 2
* self.sphere_channels_reduce,
self.sphharm_list[0].num_bands
* 2
* self.sphere_channels_reduce,
)
self.fc2_sphere = nn.Linear(
self.sphharm_list[0].num_bands
* 2
* self.sphere_channels_reduce,
2 * self.sphere_channels_reduce,
)
self.fc3_sphere = nn.Linear(
2 * self.sphere_channels_reduce, self.sphere_channels_reduce
)
def forward(
self,
x,
atomic_numbers,
edge_distance,
edge_index,
cutoff_index,
):
###############################################################
# Update spherical node embeddings
###############################################################
x_edge = self.dist_block(
edge_distance,
atomic_numbers[edge_index[0]],
atomic_numbers[edge_index[1]],
)
x_new = torch.zeros(
len(x),
self.sphharm_list[0].sphere_basis,
self.sphere_channels_reduce,
dtype=x.dtype,
device=x.device,
)
if self.sphere_channels != self.sphere_channels_reduce:
x_down = self.downsample(x.view(-1, self.sphere_channels))
else:
x_down = x
x_down = x_down.view(
-1, self.sphharm_list[0].sphere_basis, self.sphere_channels_reduce
)
for i, interaction in enumerate(self.message_blocks):
start_idx = cutoff_index[i]
end_idx = cutoff_index[i + 1]
x_message = interaction(
x_down[:, 0 : self.sphharm_list[i].sphere_basis, :],
x_edge[start_idx:end_idx],
edge_index[:, start_idx:end_idx],
)
# Sum all incoming edges to the target nodes
x_new[:, 0 : self.sphharm_list[i].sphere_basis, :].index_add_(
0, edge_index[1, start_idx:end_idx], x_message.to(x_new.dtype)
)
if self.use_grid:
# Feed in the spherical functions from the previous time step
x_grid = self.sphharm_list[0].ToGrid(
x_down, self.sphere_channels_reduce
)
x_grid = torch.cat(
[
x_grid,
self.sphharm_list[0].ToGrid(
x_new, self.sphere_channels_reduce
),
],
dim=1,
)
x_grid = self.act(self.fc1_sphere(x_grid))
x_grid = self.act(self.fc2_sphere(x_grid))
x_grid = self.fc3_sphere(x_grid)
x_new = self.sphharm_list[0].FromGrid(
x_grid, self.sphere_channels_reduce
)
if self.sphere_channels != self.sphere_channels_reduce:
x_new = x_new.view(-1, self.sphere_channels_reduce)
x_new = self.upsample(x_new)
x_new = x_new.view(
-1, self.sphharm_list[0].sphere_basis, self.sphere_channels
)
return x_new
class MessageBlock(torch.nn.Module):
def __init__(
self,
sphere_channels_reduce,
hidden_channels,
num_basis_functions,
sphharm,
act,
) -> None:
super(MessageBlock, self).__init__()
self.act = act
self.hidden_channels = hidden_channels
self.sphere_channels_reduce = sphere_channels_reduce
self.sphharm = sphharm
self.fc1_dist = nn.Linear(num_basis_functions, self.hidden_channels)
# Network for each edge to compute edge messages
self.fc1_edge_proj = nn.Linear(
2 * self.sphharm.sphere_basis_reduce * self.sphere_channels_reduce,
self.hidden_channels,
)
self.fc1_edge = nn.Linear(self.hidden_channels, self.hidden_channels)
self.fc2_edge = nn.Linear(
self.hidden_channels,
self.sphharm.sphere_basis_reduce * self.sphere_channels_reduce,
)
def forward(
self,
x,
x_edge,
edge_index,
):
###############################################################
# Compute messages
###############################################################
x_edge = self.act(self.fc1_dist(x_edge))
x_source = x[edge_index[0, :]]
x_target = x[edge_index[1, :]]
# Rotate the spherical harmonic basis functions to align with the edge
x_msg_source = self.sphharm.Rotate(x_source)
x_msg_target = self.sphharm.Rotate(x_target)
# Compute messages
x_message = torch.cat([x_msg_source, x_msg_target], dim=1)
x_message = self.act(self.fc1_edge_proj(x_message))
x_message = (
x_message.view(
-1, self.sphharm.num_y_rotations, self.hidden_channels
)
) * x_edge.view(-1, 1, self.hidden_channels)
x_message = x_message.view(-1, self.hidden_channels)
x_message = self.act(self.fc1_edge(x_message))
x_message = self.act(self.fc2_edge(x_message))
# Combine the rotated versions of the messages
x_message = x_message.view(-1, self.sphere_channels_reduce)
x_message = self.sphharm.CombineYRotations(x_message)
# Rotate the spherical harmonic basis functions back to global coordinate frame
x_message = self.sphharm.RotateInv(x_message)
return x_message
class DistanceBlock(torch.nn.Module):
def __init__(
self,
in_channels,
num_basis_functions: int,
distance_expansion,
max_num_elements: int,
act,
) -> None:
super(DistanceBlock, self).__init__()
self.in_channels = in_channels
self.distance_expansion = distance_expansion
self.act = act
self.num_basis_functions = num_basis_functions
self.max_num_elements = max_num_elements
self.num_edge_channels = self.num_basis_functions
self.fc1_dist = nn.Linear(self.in_channels, self.num_basis_functions)
self.source_embedding = nn.Embedding(
self.max_num_elements, self.num_basis_functions
)
self.target_embedding = nn.Embedding(
self.max_num_elements, self.num_basis_functions
)
nn.init.uniform_(self.source_embedding.weight.data, -0.001, 0.001)
nn.init.uniform_(self.target_embedding.weight.data, -0.001, 0.001)
self.fc1_edge_attr = nn.Linear(
self.num_edge_channels,
self.num_edge_channels,
)
def forward(self, edge_distance, source_element, target_element):
x_dist = self.distance_expansion(edge_distance)
x_dist = self.fc1_dist(x_dist)
source_embedding = self.source_embedding(source_element)
target_embedding = self.target_embedding(target_element)
x_edge = self.act(source_embedding + target_embedding + x_dist)
x_edge = self.act(self.fc1_edge_attr(x_edge))
return x_edge
| 30,218 | 35.060859 | 136 | py |
ocp | ocp-main/ocpmodels/models/scn/smearing.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import torch
import torch.nn as nn
# Different encodings for the atom distance embeddings
class GaussianSmearing(torch.nn.Module):
def __init__(
self,
start: float = -5.0,
stop: float = 5.0,
num_gaussians: int = 50,
basis_width_scalar: float = 1.0,
) -> None:
super(GaussianSmearing, self).__init__()
self.num_output = num_gaussians
offset = torch.linspace(start, stop, num_gaussians)
self.coeff = (
-0.5 / (basis_width_scalar * (offset[1] - offset[0])).item() ** 2
)
self.register_buffer("offset", offset)
def forward(self, dist) -> torch.Tensor:
dist = dist.view(-1, 1) - self.offset.view(1, -1)
return torch.exp(self.coeff * torch.pow(dist, 2))
class SigmoidSmearing(torch.nn.Module):
def __init__(
self, start=-5.0, stop=5.0, num_sigmoid=50, basis_width_scalar=1.0
) -> None:
super(SigmoidSmearing, self).__init__()
self.num_output = num_sigmoid
offset = torch.linspace(start, stop, num_sigmoid)
self.coeff = (basis_width_scalar / (offset[1] - offset[0])).item()
self.register_buffer("offset", offset)
def forward(self, dist) -> torch.Tensor:
exp_dist = self.coeff * (dist.view(-1, 1) - self.offset.view(1, -1))
return torch.sigmoid(exp_dist)
class LinearSigmoidSmearing(torch.nn.Module):
def __init__(
self,
start: float = -5.0,
stop: float = 5.0,
num_sigmoid: int = 50,
basis_width_scalar: float = 1.0,
) -> None:
super(LinearSigmoidSmearing, self).__init__()
self.num_output = num_sigmoid
offset = torch.linspace(start, stop, num_sigmoid)
self.coeff = (basis_width_scalar / (offset[1] - offset[0])).item()
self.register_buffer("offset", offset)
def forward(self, dist) -> torch.Tensor:
exp_dist = self.coeff * (dist.view(-1, 1) - self.offset.view(1, -1))
x_dist = torch.sigmoid(exp_dist) + 0.001 * exp_dist
return x_dist
class SiLUSmearing(torch.nn.Module):
def __init__(
self,
start: float = -5.0,
stop: float = 5.0,
num_output: int = 50,
basis_width_scalar: float = 1.0,
) -> None:
super(SiLUSmearing, self).__init__()
self.num_output = num_output
self.fc1 = nn.Linear(2, num_output)
self.act = nn.SiLU()
def forward(self, dist):
x_dist = dist.view(-1, 1)
x_dist = torch.cat([x_dist, torch.ones_like(x_dist)], dim=1)
x_dist = self.act(self.fc1(x_dist))
return x_dist
| 2,803 | 31.229885 | 77 | py |
ocp | ocp-main/ocpmodels/models/scn/spherical_harmonics.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import logging
import math
import os
import torch
try:
from e3nn import o3
from e3nn.o3 import FromS2Grid, ToS2Grid
# Borrowed from e3nn @ 0.4.0:
# https://github.com/e3nn/e3nn/blob/0.4.0/e3nn/o3/_wigner.py#L10
# _Jd is a list of tensors of shape (2l+1, 2l+1)
_Jd = torch.load(os.path.join(os.path.dirname(__file__), "Jd.pt"))
except (ImportError, FileNotFoundError):
logging.error(
"Invalid setup for SCN. Either the e3nn library or Jd.pt is missing."
)
pass
class SphericalHarmonicsHelper:
"""
Helper functions for spherical harmonics calculations and representations
Args:
lmax (int): Maximum degree of the spherical harmonics
mmax (int): Maximum order of the spherical harmonics
num_taps (int): Number of taps or rotations (1 or otherwise set automatically based on mmax)
num_bands (int): Number of bands used during message aggregation for the 1x1 pointwise convolution (1 or 2)
"""
def __init__(
self,
lmax: int,
mmax: int,
num_taps: int,
num_bands: int,
) -> None:
import sys
if "e3nn" not in sys.modules:
logging.error(
"You need to install the e3nn library to use Spherical Harmonics"
)
raise ImportError
super().__init__()
self.lmax = lmax
self.mmax = mmax
self.num_taps = num_taps
self.num_bands = num_bands
# Make sure lmax is large enough to support the num_bands
assert self.lmax - (self.num_bands - 1) >= 0
self.sphere_basis = (self.lmax + 1) ** 2
self.sphere_basis = int(self.sphere_basis)
self.sphere_basis_reduce = self.lmax + 1
for i in range(1, self.mmax + 1):
self.sphere_basis_reduce = self.sphere_basis_reduce + 2 * (
self.lmax + 1 - i
)
self.sphere_basis_reduce = int(self.sphere_basis_reduce)
def InitWignerDMatrix(self, edge_rot_mat) -> None:
self.device = edge_rot_mat.device
# Initialize matrix to combine the y-axis rotations during message passing
self.mapping_y_rot, self.y_rotations = self.InitYRotMapping()
self.num_y_rotations = len(self.y_rotations)
# Conversion from basis to grid respresentations
self.grid_res = (self.lmax + 1) * 2
self.to_grid_shb = torch.tensor([], device=self.device)
self.to_grid_sha = torch.tensor([], device=self.device)
for b in range(self.num_bands):
l = self.lmax - b # noqa: E741
togrid = ToS2Grid(
l,
(self.grid_res, self.grid_res + 1),
normalization="integral",
device=self.device,
)
shb = togrid.shb
sha = togrid.sha
padding = torch.zeros(
shb.size()[0],
shb.size()[1],
self.sphere_basis - shb.size()[2],
device=self.device,
)
shb = torch.cat([shb, padding], dim=2)
self.to_grid_shb = torch.cat([self.to_grid_shb, shb], dim=0)
if b == 0:
self.to_grid_sha = sha
else:
self.to_grid_sha = torch.block_diag(self.to_grid_sha, sha)
self.to_grid_sha = self.to_grid_sha.view(
self.num_bands, self.grid_res + 1, -1
)
self.to_grid_sha = torch.transpose(self.to_grid_sha, 0, 1).contiguous()
self.to_grid_sha = self.to_grid_sha.view(
(self.grid_res + 1) * self.num_bands, -1
)
self.to_grid_shb = self.to_grid_shb.detach()
self.to_grid_sha = self.to_grid_sha.detach()
self.from_grid = FromS2Grid(
(self.grid_res, self.grid_res + 1),
self.lmax,
normalization="integral",
device=self.device,
)
for p in self.from_grid.parameters():
p.detach()
# Compute subsets of Wigner matrices to use for messages
wigner = torch.tensor([], device=self.device)
wigner_inv = torch.tensor([], device=self.device)
for y_rot in self.y_rotations:
# Compute rotation about y-axis
y_rot_mat = self.RotationMatrix(0, y_rot, 0)
y_rot_mat = y_rot_mat.repeat(len(edge_rot_mat), 1, 1)
# Add additional rotation about y-axis
rot_mat = torch.bmm(y_rot_mat, edge_rot_mat)
# Compute Wigner matrices corresponding to the 3x3 rotation matrices
wignerD = self.RotationToWignerDMatrix(rot_mat, 0, self.lmax)
basis_in = torch.tensor([], device=self.device)
basis_out = torch.tensor([], device=self.device)
start_l = 0
end_l = self.lmax + 1
for l in range(start_l, end_l): # noqa: E741
offset = l**2
basis_in = torch.cat(
[
basis_in,
torch.arange(2 * l + 1, device=self.device) + offset,
],
dim=0,
)
m_max = min(l, self.mmax)
basis_out = torch.cat(
[
basis_out,
torch.arange(-m_max, m_max + 1, device=self.device)
+ offset
+ l,
],
dim=0,
)
# Only keep the rows/columns of the matrices used given lmax and mmax
wignerD_reduce = wignerD[:, basis_out.long(), :]
wignerD_reduce = wignerD_reduce[:, :, basis_in.long()]
if y_rot == 0.0:
wigner_inv = (
torch.transpose(wignerD_reduce, 1, 2).contiguous().detach()
)
wigner = torch.cat([wigner, wignerD_reduce.unsqueeze(1)], dim=1)
wigner = wigner.view(-1, self.sphere_basis_reduce, self.sphere_basis)
self.wigner = wigner.detach()
self.wigner_inv = wigner_inv.detach()
# If num_taps is greater than 1, calculate how to combine the different samples.
# Note the e3nn code flips the y-axis with the z-axis in the SCN paper description.
def InitYRotMapping(self):
if self.mmax == 0:
y_rotations = torch.tensor([0.0], device=self.device)
num_y_rotations = 1
mapping_y_rot = torch.eye(
self.sphere_basis_reduce, device=self.device
)
if self.mmax == 1:
if self.num_taps == 1:
y_rotations = torch.tensor([0.0], device=self.device)
num_y_rotations = len(y_rotations)
mapping_y_rot = torch.eye(
len(y_rotations) * self.sphere_basis_reduce,
self.sphere_basis_reduce,
device=self.device,
)
else:
y_rotations = torch.tensor(
[0.0, 0.5 * math.pi, math.pi, 1.5 * math.pi],
device=self.device,
)
num_y_rotations = len(y_rotations)
mapping_y_rot = torch.zeros(
len(y_rotations) * self.sphere_basis_reduce,
self.sphere_basis_reduce,
device=self.device,
)
# m = 0
for l in range(0, self.lmax + 1): # noqa: E741
offset = (l - 1) * 3 + 2
if l == 0: # noqa: E741
offset = 0
for y in range(num_y_rotations):
mapping_y_rot[
offset + y * self.sphere_basis_reduce, offset
] = (1.0 / num_y_rotations)
# m = -1
for l in range(1, self.lmax + 1): # noqa: E741
offset = (l - 1) * 3 + 1
for y in range(num_y_rotations):
mapping_y_rot[
offset + y * self.sphere_basis_reduce, offset
] = (math.cos(y_rotations[y]) / num_y_rotations)
mapping_y_rot[
(offset + 2) + y * self.sphere_basis_reduce, offset
] = (math.sin(y_rotations[y]) / num_y_rotations)
# m = 1
for l in range(1, self.lmax + 1): # noqa: E741
offset = (l - 1) * 3 + 3
for y in range(num_y_rotations):
mapping_y_rot[
offset + y * self.sphere_basis_reduce, offset
] = (math.cos(y_rotations[y]) / num_y_rotations)
mapping_y_rot[
offset - 2 + y * self.sphere_basis_reduce, offset
] = (-math.sin(y_rotations[y]) / num_y_rotations)
return mapping_y_rot.detach(), y_rotations
# Simplified version of function from e3nn
def ToGrid(self, x, channels) -> torch.Tensor:
x = x.view(-1, self.sphere_basis, channels)
x_grid = torch.einsum("mbi,zic->zbmc", self.to_grid_shb, x)
x_grid = torch.einsum(
"am,zbmc->zbac", self.to_grid_sha, x_grid
).contiguous()
x_grid = x_grid.view(-1, self.num_bands * channels)
return x_grid
# Simplified version of function from e3nn
def FromGrid(self, x_grid, channels) -> torch.Tensor:
x_grid = x_grid.view(-1, self.grid_res, (self.grid_res + 1), channels)
x = torch.einsum("am,zbac->zbmc", self.from_grid.sha, x_grid)
x = torch.einsum("mbi,zbmc->zic", self.from_grid.shb, x).contiguous()
x = x.view(-1, channels)
return x
def CombineYRotations(self, x) -> torch.Tensor:
num_channels = x.size()[-1]
x = x.view(
-1, self.num_y_rotations * self.sphere_basis_reduce, num_channels
)
x = torch.einsum("abc, bd->adc", x, self.mapping_y_rot).contiguous()
return x
def Rotate(self, x) -> torch.Tensor:
num_channels = x.size()[2]
x = x.view(-1, 1, self.sphere_basis, num_channels).repeat(
1, self.num_y_rotations, 1, 1
)
x = x.view(-1, self.sphere_basis, num_channels)
# print('{} {}'.format(self.wigner.size(), x.size()))
x_rot = torch.bmm(self.wigner, x)
x_rot = x_rot.view(-1, self.sphere_basis_reduce * num_channels)
return x_rot
def FlipGrid(self, grid, num_channels: int) -> torch.Tensor:
# lat long
long_res = self.grid_res
grid = grid.view(-1, self.grid_res, self.grid_res, num_channels)
grid = torch.roll(grid, int(long_res // 2), 2)
flip_grid = torch.flip(grid, [1])
return flip_grid.view(-1, num_channels)
def RotateInv(self, x) -> torch.Tensor:
x_rot = torch.bmm(self.wigner_inv, x)
return x_rot
def RotateWigner(self, x, wigner) -> torch.Tensor:
x_rot = torch.bmm(wigner, x)
return x_rot
def RotationMatrix(
self, rot_x: float, rot_y: float, rot_z: float
) -> torch.Tensor:
m1, m2, m3 = (
torch.eye(3, device=self.device),
torch.eye(3, device=self.device),
torch.eye(3, device=self.device),
)
if rot_x:
degree = rot_x
sin, cos = math.sin(degree), math.cos(degree)
m1 = torch.tensor(
[[1, 0, 0], [0, cos, sin], [0, -sin, cos]], device=self.device
)
if rot_y:
degree = rot_y
sin, cos = math.sin(degree), math.cos(degree)
m2 = torch.tensor(
[[cos, 0, -sin], [0, 1, 0], [sin, 0, cos]], device=self.device
)
if rot_z:
degree = rot_z
sin, cos = math.sin(degree), math.cos(degree)
m3 = torch.tensor(
[[cos, sin, 0], [-sin, cos, 0], [0, 0, 1]], device=self.device
)
matrix = torch.mm(torch.mm(m1, m2), m3)
matrix = matrix.view(1, 3, 3)
return matrix
def RotationToWignerDMatrix(self, edge_rot_mat, start_lmax, end_lmax):
x = edge_rot_mat @ edge_rot_mat.new_tensor([0.0, 1.0, 0.0])
alpha, beta = o3.xyz_to_angles(x)
R = (
o3.angles_to_matrix(
alpha, beta, torch.zeros_like(alpha)
).transpose(-1, -2)
@ edge_rot_mat
)
gamma = torch.atan2(R[..., 0, 2], R[..., 0, 0])
size = (end_lmax + 1) ** 2 - (start_lmax) ** 2
wigner = torch.zeros(len(alpha), size, size, device=self.device)
start = 0
for lmax in range(start_lmax, end_lmax + 1):
block = wigner_D(lmax, alpha, beta, gamma)
end = start + block.size()[1]
wigner[:, start:end, start:end] = block
start = end
return wigner.detach()
# Borrowed from e3nn @ 0.4.0:
# https://github.com/e3nn/e3nn/blob/0.4.0/e3nn/o3/_wigner.py#L37
#
# In 0.5.0, e3nn shifted to torch.matrix_exp which is significantly slower:
# https://github.com/e3nn/e3nn/blob/0.5.0/e3nn/o3/_wigner.py#L92
def wigner_D(l, alpha, beta, gamma):
if not l < len(_Jd):
raise NotImplementedError(
f"wigner D maximum l implemented is {len(_Jd) - 1}, send us an email to ask for more"
)
alpha, beta, gamma = torch.broadcast_tensors(alpha, beta, gamma)
J = _Jd[l].to(dtype=alpha.dtype, device=alpha.device)
Xa = _z_rot_mat(alpha, l)
Xb = _z_rot_mat(beta, l)
Xc = _z_rot_mat(gamma, l)
return Xa @ J @ Xb @ J @ Xc
def _z_rot_mat(angle, l):
shape, device, dtype = angle.shape, angle.device, angle.dtype
M = angle.new_zeros((*shape, 2 * l + 1, 2 * l + 1))
inds = torch.arange(0, 2 * l + 1, 1, device=device)
reversed_inds = torch.arange(2 * l, -1, -1, device=device)
frequencies = torch.arange(l, -l - 1, -1, dtype=dtype, device=device)
M[..., inds, reversed_inds] = torch.sin(frequencies * angle[..., None])
M[..., inds, inds] = torch.cos(frequencies * angle[..., None])
return M
| 14,397 | 36.397403 | 122 | py |
ocp | ocp-main/ocpmodels/models/scn/sampling.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import math
import torch
### Methods for sample points on a sphere
def CalcSpherePoints(num_points: int, device: str = "cpu") -> torch.Tensor:
goldenRatio = (1 + 5**0.5) / 2
i = torch.arange(num_points, device=device).view(-1, 1)
theta = 2 * math.pi * i / goldenRatio
phi = torch.arccos(1 - 2 * (i + 0.5) / num_points)
points = torch.cat(
[
torch.cos(theta) * torch.sin(phi),
torch.sin(theta) * torch.sin(phi),
torch.cos(phi),
],
dim=1,
)
# weight the points by their density
pt_cross = points.view(1, -1, 3) - points.view(-1, 1, 3)
pt_cross = torch.sum(pt_cross**2, dim=2)
pt_cross = torch.exp(-pt_cross / (0.5 * 0.3))
scalar = 1.0 / torch.sum(pt_cross, dim=1)
scalar = num_points * scalar / torch.sum(scalar)
return points * (scalar.view(-1, 1))
def CalcSpherePointsRandom(num_points: int, device) -> torch.Tensor:
pts = 2.0 * (torch.rand(num_points, 3, device=device) - 0.5)
radius = torch.sum(pts**2, dim=1)
while torch.max(radius) > 1.0:
replace_pts = 2.0 * (torch.rand(num_points, 3, device=device) - 0.5)
replace_mask = radius.gt(0.99)
pts.masked_scatter_(replace_mask.view(-1, 1).repeat(1, 3), replace_pts)
radius = torch.sum(pts**2, dim=1)
return pts / radius.view(-1, 1)
| 1,527 | 31.510638 | 79 | py |
ocp | ocp-main/ocpmodels/models/scn/__init__.py | 0 | 0 | 0 | py |
|
ocp | ocp-main/ocpmodels/models/gemnet_gp/initializers.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import torch
def _standardize(kernel):
"""
Makes sure that N*Var(W) = 1 and E[W] = 0
"""
eps = 1e-6
if len(kernel.shape) == 3:
axis = [0, 1] # last dimension is output dimension
else:
axis = 1
var, mean = torch.var_mean(kernel, dim=axis, unbiased=True, keepdim=True)
kernel = (kernel - mean) / (var + eps) ** 0.5
return kernel
def he_orthogonal_init(tensor):
"""
Generate a weight matrix with variance according to He (Kaiming) initialization.
Based on a random (semi-)orthogonal matrix neural networks
are expected to learn better when features are decorrelated
(stated by eg. "Reducing overfitting in deep networks by decorrelating representations",
"Dropout: a simple way to prevent neural networks from overfitting",
"Exact solutions to the nonlinear dynamics of learning in deep linear neural networks")
"""
tensor = torch.nn.init.orthogonal_(tensor)
if len(tensor.shape) == 3:
fan_in = tensor.shape[:-1].numel()
else:
fan_in = tensor.shape[1]
with torch.no_grad():
tensor.data = _standardize(tensor.data)
tensor.data *= (1 / fan_in) ** 0.5
return tensor
| 1,385 | 27.875 | 92 | py |
ocp | ocp-main/ocpmodels/models/gemnet_gp/gemnet.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
from typing import Optional
import numpy as np
import torch
from torch_cluster import radius_graph
from torch_scatter import scatter
from torch_sparse import SparseTensor
from ocpmodels.common import distutils, gp_utils
from ocpmodels.common.registry import registry
from ocpmodels.common.utils import (
compute_neighbors,
conditional_grad,
get_pbc_distances,
radius_graph_pbc,
)
from ocpmodels.models.base import BaseModel
from ocpmodels.modules.scaling.compat import load_scales_compat
from .layers.atom_update_block import OutputBlock
from .layers.base_layers import Dense
from .layers.efficient import EfficientInteractionDownProjection
from .layers.embedding_block import AtomEmbedding, EdgeEmbedding
from .layers.interaction_block import InteractionBlockTripletsOnly
from .layers.radial_basis import RadialBasis
from .layers.spherical_basis import CircularBasisLayer
from .utils import (
inner_product_normalized,
mask_neighbors,
ragged_range,
repeat_blocks,
)
@registry.register_model("gp_gemnet_t")
class GraphParallelGemNetT(BaseModel):
"""
GemNet-T, triplets-only variant of GemNet
Parameters
----------
num_atoms (int): Unused argument
bond_feat_dim (int): Unused argument
num_targets: int
Number of prediction targets.
num_spherical: int
Controls maximum frequency.
num_radial: int
Controls maximum frequency.
num_blocks: int
Number of building blocks to be stacked.
emb_size_atom: int
Embedding size of the atoms.
emb_size_edge: int
Embedding size of the edges.
emb_size_trip: int
(Down-projected) Embedding size in the triplet message passing block.
emb_size_rbf: int
Embedding size of the radial basis transformation.
emb_size_cbf: int
Embedding size of the circular basis transformation (one angle).
emb_size_bil_trip: int
Embedding size of the edge embeddings in the triplet-based message passing block after the bilinear layer.
num_before_skip: int
Number of residual blocks before the first skip connection.
num_after_skip: int
Number of residual blocks after the first skip connection.
num_concat: int
Number of residual blocks after the concatenation.
num_atom: int
Number of residual blocks in the atom embedding blocks.
regress_forces: bool
Whether to predict forces. Default: True
direct_forces: bool
If True predict forces based on aggregation of interatomic directions.
If False predict forces based on negative gradient of energy potential.
cutoff: float
Embedding cutoff for interactomic directions in Angstrom.
rbf: dict
Name and hyperparameters of the radial basis function.
envelope: dict
Name and hyperparameters of the envelope function.
cbf: dict
Name and hyperparameters of the cosine basis function.
extensive: bool
Whether the output should be extensive (proportional to the number of atoms)
output_init: str
Initialization method for the final dense layer.
activation: str
Name of the activation function.
scale_file: str
Path to the json file containing the scaling factors.
"""
def __init__(
self,
num_atoms: Optional[int],
bond_feat_dim: int,
num_targets: int,
num_spherical: int,
num_radial: int,
num_blocks: int,
emb_size_atom: int,
emb_size_edge: int,
emb_size_trip: int,
emb_size_rbf: int,
emb_size_cbf: int,
emb_size_bil_trip: int,
num_before_skip: int,
num_after_skip: int,
num_concat: int,
num_atom: int,
regress_forces: bool = True,
direct_forces: bool = False,
cutoff: float = 6.0,
max_neighbors: int = 50,
rbf: dict = {"name": "gaussian"},
envelope: dict = {"name": "polynomial", "exponent": 5},
cbf: dict = {"name": "spherical_harmonics"},
extensive: bool = True,
otf_graph: bool = False,
use_pbc: bool = True,
output_init: str = "HeOrthogonal",
activation: str = "swish",
scale_num_blocks: bool = False,
scatter_atoms: bool = True,
scale_file: Optional[str] = None,
):
super().__init__()
self.num_targets = num_targets
assert num_blocks > 0
self.num_blocks = num_blocks
self.extensive = extensive
self.scale_num_blocks = scale_num_blocks
self.scatter_atoms = scatter_atoms
self.cutoff = cutoff
assert self.cutoff <= 6 or otf_graph
self.max_neighbors = max_neighbors
assert self.max_neighbors == 50 or otf_graph
self.regress_forces = regress_forces
self.otf_graph = otf_graph
self.use_pbc = use_pbc
# GemNet variants
self.direct_forces = direct_forces
### ---------------------------------- Basis Functions ---------------------------------- ###
self.radial_basis = RadialBasis(
num_radial=num_radial,
cutoff=cutoff,
rbf=rbf,
envelope=envelope,
)
radial_basis_cbf3 = RadialBasis(
num_radial=num_radial,
cutoff=cutoff,
rbf=rbf,
envelope=envelope,
)
self.cbf_basis3 = CircularBasisLayer(
num_spherical,
radial_basis=radial_basis_cbf3,
cbf=cbf,
efficient=True,
)
### ------------------------------------------------------------------------------------- ###
### ------------------------------- Share Down Projections ------------------------------ ###
# Share down projection across all interaction blocks
self.mlp_rbf3 = Dense(
num_radial,
emb_size_rbf,
activation=None,
bias=False,
)
self.mlp_cbf3 = EfficientInteractionDownProjection(
num_spherical, num_radial, emb_size_cbf
)
# Share the dense Layer of the atom embedding block accross the interaction blocks
self.mlp_rbf_h = Dense(
num_radial,
emb_size_rbf,
activation=None,
bias=False,
)
self.mlp_rbf_out = Dense(
num_radial,
emb_size_rbf,
activation=None,
bias=False,
)
### ------------------------------------------------------------------------------------- ###
# Embedding block
self.atom_emb = AtomEmbedding(emb_size_atom)
self.edge_emb = EdgeEmbedding(
emb_size_atom, num_radial, emb_size_edge, activation=activation
)
out_blocks = []
int_blocks = []
# Interaction Blocks
interaction_block = InteractionBlockTripletsOnly # GemNet-(d)T
for i in range(num_blocks):
int_blocks.append(
interaction_block(
emb_size_atom=emb_size_atom,
emb_size_edge=emb_size_edge,
emb_size_trip=emb_size_trip,
emb_size_rbf=emb_size_rbf,
emb_size_cbf=emb_size_cbf,
emb_size_bil_trip=emb_size_bil_trip,
num_before_skip=num_before_skip,
num_after_skip=num_after_skip,
num_concat=num_concat,
num_atom=num_atom,
activation=activation,
name=f"IntBlock_{i+1}",
)
)
for i in range(num_blocks + 1):
out_blocks.append(
OutputBlock(
emb_size_atom=emb_size_atom,
emb_size_edge=emb_size_edge,
emb_size_rbf=emb_size_rbf,
nHidden=num_atom,
num_targets=num_targets,
activation=activation,
output_init=output_init,
direct_forces=direct_forces,
name=f"OutBlock_{i}",
)
)
self.out_blocks = torch.nn.ModuleList(out_blocks)
self.int_blocks = torch.nn.ModuleList(int_blocks)
load_scales_compat(self, scale_file)
def get_triplets(self, edge_index, num_atoms):
"""
Get all b->a for each edge c->a.
It is possible that b=c, as long as the edges are distinct.
Returns
-------
id3_ba: torch.Tensor, shape (num_triplets,)
Indices of input edge b->a of each triplet b->a<-c
id3_ca: torch.Tensor, shape (num_triplets,)
Indices of output edge c->a of each triplet b->a<-c
id3_ragged_idx: torch.Tensor, shape (num_triplets,)
Indices enumerating the copies of id3_ca for creating a padded matrix
"""
idx_s, idx_t = edge_index # c->a (source=c, target=a)
value = torch.arange(
idx_s.size(0), device=idx_s.device, dtype=idx_s.dtype
)
# Possibly contains multiple copies of the same edge (for periodic interactions)
adj = SparseTensor(
row=idx_t,
col=idx_s,
value=value,
sparse_sizes=(num_atoms, num_atoms),
)
adj_edges = adj[idx_t]
# Edge indices (b->a, c->a) for triplets.
id3_ba = adj_edges.storage.value()
id3_ca = adj_edges.storage.row()
# Remove self-loop triplets
# Compare edge indices, not atom indices to correctly handle periodic interactions
mask = id3_ba != id3_ca
id3_ba = id3_ba[mask]
id3_ca = id3_ca[mask]
# Get indices to reshape the neighbor indices b->a into a dense matrix.
# id3_ca has to be sorted for this to work.
num_triplets = torch.bincount(id3_ca, minlength=idx_s.size(0))
id3_ragged_idx = ragged_range(num_triplets)
return id3_ba, id3_ca, id3_ragged_idx
def select_symmetric_edges(self, tensor, mask, reorder_idx, inverse_neg):
# Mask out counter-edges
tensor_directed = tensor[mask]
# Concatenate counter-edges after normal edges
sign = 1 - 2 * inverse_neg
tensor_cat = torch.cat([tensor_directed, sign * tensor_directed])
# Reorder everything so the edges of every image are consecutive
tensor_ordered = tensor_cat[reorder_idx]
return tensor_ordered
def reorder_symmetric_edges(
self, edge_index, cell_offsets, neighbors, edge_dist, edge_vector
):
"""
Reorder edges to make finding counter-directional edges easier.
Some edges are only present in one direction in the data,
since every atom has a maximum number of neighbors. Since we only use i->j
edges here, we lose some j->i edges and add others by
making it symmetric.
We could fix this by merging edge_index with its counter-edges,
including the cell_offsets, and then running torch.unique.
But this does not seem worth it.
"""
# Generate mask
mask_sep_atoms = edge_index[0] < edge_index[1]
# Distinguish edges between the same (periodic) atom by ordering the cells
cell_earlier = (
(cell_offsets[:, 0] < 0)
| ((cell_offsets[:, 0] == 0) & (cell_offsets[:, 1] < 0))
| (
(cell_offsets[:, 0] == 0)
& (cell_offsets[:, 1] == 0)
& (cell_offsets[:, 2] < 0)
)
)
mask_same_atoms = edge_index[0] == edge_index[1]
mask_same_atoms &= cell_earlier
mask = mask_sep_atoms | mask_same_atoms
# Mask out counter-edges
edge_index_new = edge_index[mask[None, :].expand(2, -1)].view(2, -1)
# Concatenate counter-edges after normal edges
edge_index_cat = torch.cat(
[
edge_index_new,
torch.stack([edge_index_new[1], edge_index_new[0]], dim=0),
],
dim=1,
)
# Count remaining edges per image
neighbors = neighbors.to(edge_index.device)
batch_edge = torch.repeat_interleave(
torch.arange(neighbors.size(0), device=edge_index.device),
neighbors,
)
batch_edge = batch_edge[mask]
neighbors_new = 2 * torch.bincount(
batch_edge, minlength=neighbors.size(0)
)
# Create indexing array
edge_reorder_idx = repeat_blocks(
neighbors_new // 2,
repeats=2,
continuous_indexing=True,
repeat_inc=edge_index_new.size(1),
)
# Reorder everything so the edges of every image are consecutive
edge_index_new = edge_index_cat[:, edge_reorder_idx]
cell_offsets_new = self.select_symmetric_edges(
cell_offsets, mask, edge_reorder_idx, True
)
edge_dist_new = self.select_symmetric_edges(
edge_dist, mask, edge_reorder_idx, False
)
edge_vector_new = self.select_symmetric_edges(
edge_vector, mask, edge_reorder_idx, True
)
return (
edge_index_new,
cell_offsets_new,
neighbors_new,
edge_dist_new,
edge_vector_new,
)
def select_edges(
self,
data,
edge_index,
cell_offsets,
neighbors,
edge_dist,
edge_vector,
cutoff=None,
):
if cutoff is not None:
edge_mask = edge_dist <= cutoff
edge_index = edge_index[:, edge_mask]
cell_offsets = cell_offsets[edge_mask]
neighbors = mask_neighbors(neighbors, edge_mask)
edge_dist = edge_dist[edge_mask]
edge_vector = edge_vector[edge_mask]
empty_image = neighbors == 0
if torch.any(empty_image):
raise ValueError(
f"An image has no neighbors: id={data.id[empty_image]}, "
f"sid={data.sid[empty_image]}, fid={data.fid[empty_image]}"
)
return edge_index, cell_offsets, neighbors, edge_dist, edge_vector
def generate_interaction_graph(self, data):
num_atoms = data.atomic_numbers.size(0)
(
edge_index,
D_st,
distance_vec,
cell_offsets,
_, # cell offset distances
neighbors,
) = self.generate_graph(data)
# These vectors actually point in the opposite direction.
# But we want to use col as idx_t for efficient aggregation.
V_st = -distance_vec / D_st[:, None]
# Mask interaction edges if required
if self.otf_graph or np.isclose(self.cutoff, 6):
select_cutoff = None
else:
select_cutoff = self.cutoff
(edge_index, cell_offsets, neighbors, D_st, V_st,) = self.select_edges(
data=data,
edge_index=edge_index,
cell_offsets=cell_offsets,
neighbors=neighbors,
edge_dist=D_st,
edge_vector=V_st,
cutoff=select_cutoff,
)
(
edge_index,
cell_offsets,
neighbors,
D_st,
V_st,
) = self.reorder_symmetric_edges(
edge_index, cell_offsets, neighbors, D_st, V_st
)
# Indices for swapping c->a and a->c (for symmetric MP)
block_sizes = neighbors // 2
id_swap = repeat_blocks(
block_sizes,
repeats=2,
continuous_indexing=False,
start_idx=block_sizes[0],
block_inc=block_sizes[:-1] + block_sizes[1:],
repeat_inc=-block_sizes,
)
id3_ba, id3_ca, id3_ragged_idx = self.get_triplets(
edge_index, num_atoms=num_atoms
)
return (
edge_index,
neighbors,
D_st,
V_st,
id_swap,
id3_ba,
id3_ca,
id3_ragged_idx,
)
@conditional_grad(torch.enable_grad())
def forward(self, data):
pos = data.pos
batch = data.batch
atomic_numbers = data.atomic_numbers.long()
if self.regress_forces and not self.direct_forces:
pos.requires_grad_(True)
(
edge_index,
neighbors,
D_st,
V_st,
id_swap,
id3_ba,
id3_ca,
id3_ragged_idx,
) = self.generate_interaction_graph(data)
idx_s, idx_t = edge_index
# Graph Parallel: Precompute Kmax so all processes have the same value
Kmax = torch.max(
torch.max(id3_ragged_idx) + 1,
torch.tensor(0).to(id3_ragged_idx.device),
)
# Graph Parallel: Scatter triplets (consistent with edge splits)
edge_partition = gp_utils.scatter_to_model_parallel_region(
torch.arange(edge_index.size(1))
)
triplet_partition = torch.where(
torch.logical_and(
id3_ca >= edge_partition.min(), id3_ca <= edge_partition.max()
)
)[0]
id3_ba = id3_ba[triplet_partition]
id3_ca = id3_ca[triplet_partition]
id3_ragged_idx = id3_ragged_idx[triplet_partition]
edge_offset = edge_partition.min()
# Calculate triplet angles
cosφ_cab = inner_product_normalized(V_st[id3_ca], V_st[id3_ba])
rad_cbf3, cbf3 = self.cbf_basis3(D_st, cosφ_cab, id3_ca)
# TODO: Only do this for the partitioned edges
cbf3 = self.mlp_cbf3(rad_cbf3, cbf3, id3_ca, id3_ragged_idx, Kmax)
# Graph Paralllel: Scatter edges
D_st = gp_utils.scatter_to_model_parallel_region(D_st, dim=0)
cbf3 = (
gp_utils.scatter_to_model_parallel_region(cbf3[0], dim=0),
gp_utils.scatter_to_model_parallel_region(cbf3[1], dim=0),
)
idx_s = gp_utils.scatter_to_model_parallel_region(idx_s, dim=0)
idx_t_full = idx_t
idx_t = gp_utils.scatter_to_model_parallel_region(idx_t, dim=0)
rbf = self.radial_basis(D_st)
# Graph Paralllel: Scatter Nodes
nAtoms = atomic_numbers.shape[0]
if self.scatter_atoms:
atomic_numbers = gp_utils.scatter_to_model_parallel_region(
atomic_numbers, dim=0
)
# Embedding block
h = self.atom_emb(atomic_numbers)
# (nAtoms, emb_size_atom)
m = self.edge_emb(h, rbf, idx_s, idx_t) # (nEdges, emb_size_edge)
rbf3 = self.mlp_rbf3(rbf)
rbf_h = self.mlp_rbf_h(rbf)
rbf_out = self.mlp_rbf_out(rbf)
E_t, F_st = self.out_blocks[0](nAtoms, m, rbf_out, idx_t)
# (nAtoms, num_targets), (nEdges, num_targets)
for i in range(self.num_blocks):
# Interaction block
h, m = self.int_blocks[i](
h=h,
m=m,
rbf3=rbf3,
cbf3=cbf3,
id3_ragged_idx=id3_ragged_idx,
id_swap=id_swap,
id3_ba=id3_ba,
id3_ca=id3_ca,
rbf_h=rbf_h,
idx_s=idx_s,
idx_t=idx_t,
edge_offset=edge_offset,
Kmax=Kmax,
nAtoms=nAtoms,
) # (nAtoms, emb_size_atom), (nEdges, emb_size_edge)
E, F = self.out_blocks[i + 1](nAtoms, m, rbf_out, idx_t)
# (nAtoms, num_targets), (nEdges, num_targets)
F_st += F
E_t += E
if self.scale_num_blocks:
F_st = F_st / (self.num_blocks + 1)
E_t = E_t / (self.num_blocks + 1)
# Graph Parallel: Gather F_st
F_st = gp_utils.gather_from_model_parallel_region(F_st, dim=0)
nMolecules = torch.max(batch) + 1
if self.extensive:
E_t = gp_utils.gather_from_model_parallel_region(E_t, dim=0)
E_t = scatter(
E_t, batch, dim=0, dim_size=nMolecules, reduce="add"
) # (nMolecules, num_targets)
else:
E_t = scatter(
E_t, batch, dim=0, dim_size=nMolecules, reduce="mean"
) # (nMolecules, num_targets)
if self.regress_forces:
if self.direct_forces:
# map forces in edge directions
F_st_vec = F_st[:, :, None] * V_st[:, None, :]
# (nEdges, num_targets, 3)
F_t = scatter(
F_st_vec,
idx_t_full,
dim=0,
dim_size=data.atomic_numbers.size(0),
reduce="add",
) # (nAtoms, num_targets, 3)
F_t = F_t.squeeze(1) # (nAtoms, 3)
else:
if self.num_targets > 1:
forces = []
for i in range(self.num_targets):
# maybe this can be solved differently
forces += [
-torch.autograd.grad(
E_t[:, i].sum(), pos, create_graph=True
)[0]
]
F_t = torch.stack(forces, dim=1)
# (nAtoms, num_targets, 3)
else:
F_t = -torch.autograd.grad(
E_t.sum(), pos, create_graph=True
)[0]
# (nAtoms, 3)
return E_t, F_t # (nMolecules, num_targets), (nAtoms, 3)
else:
return E_t
@property
def num_params(self):
return sum(p.numel() for p in self.parameters())
| 22,203 | 33.16 | 118 | py |
ocp | ocp-main/ocpmodels/models/gemnet_gp/utils.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import json
from typing import Optional, Tuple
import torch
from torch_scatter import segment_csr
def read_json(path: str):
""""""
if not path.endswith(".json"):
raise UserWarning(f"Path {path} is not a json-path.")
with open(path, "r") as f:
content = json.load(f)
return content
def update_json(path: str, data) -> None:
""""""
if not path.endswith(".json"):
raise UserWarning(f"Path {path} is not a json-path.")
content = read_json(path)
content.update(data)
write_json(path, content)
def write_json(path: str, data) -> None:
""""""
if not path.endswith(".json"):
raise UserWarning(f"Path {path} is not a json-path.")
with open(path, "w", encoding="utf-8") as f:
json.dump(data, f, ensure_ascii=False, indent=4)
def read_value_json(path: str, key):
""""""
content = read_json(path)
if key in content.keys():
return content[key]
else:
return None
def ragged_range(sizes):
"""Multiple concatenated ranges.
Examples
--------
sizes = [1 4 2 3]
Return: [0 0 1 2 3 0 1 0 1 2]
"""
assert sizes.dim() == 1
if sizes.sum() == 0:
return sizes.new_empty(0)
# Remove 0 sizes
sizes_nonzero = sizes > 0
if not torch.all(sizes_nonzero):
sizes = torch.masked_select(sizes, sizes_nonzero)
# Initialize indexing array with ones as we need to setup incremental indexing
# within each group when cumulatively summed at the final stage.
id_steps = torch.ones(sizes.sum(), dtype=torch.long, device=sizes.device)
id_steps[0] = 0
insert_index = sizes[:-1].cumsum(0)
insert_val = (1 - sizes)[:-1]
# Assign index-offsetting values
id_steps[insert_index] = insert_val
# Finally index into input array for the group repeated o/p
res = id_steps.cumsum(0)
return res
def repeat_blocks(
sizes: torch.Tensor,
repeats,
continuous_indexing: bool = True,
start_idx: int = 0,
block_inc: int = 0,
repeat_inc: int = 0,
) -> torch.Tensor:
"""Repeat blocks of indices.
Adapted from https://stackoverflow.com/questions/51154989/numpy-vectorized-function-to-repeat-blocks-of-consecutive-elements
continuous_indexing: Whether to keep increasing the index after each block
start_idx: Starting index
block_inc: Number to increment by after each block,
either global or per block. Shape: len(sizes) - 1
repeat_inc: Number to increment by after each repetition,
either global or per block
Examples
--------
sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = False
Return: [0 0 0 0 1 2 0 1 2 0 1 0 1 0 1]
sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True
Return: [0 0 0 1 2 3 1 2 3 4 5 4 5 4 5]
sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True ;
repeat_inc = 4
Return: [0 4 8 1 2 3 5 6 7 4 5 8 9 12 13]
sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True ;
start_idx = 5
Return: [5 5 5 6 7 8 6 7 8 9 10 9 10 9 10]
sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True ;
block_inc = 1
Return: [0 0 0 2 3 4 2 3 4 6 7 6 7 6 7]
sizes = [0,3,2] ; repeats = [3,2,3] ; continuous_indexing = True
Return: [0 1 2 0 1 2 3 4 3 4 3 4]
sizes = [2,3,2] ; repeats = [2,0,2] ; continuous_indexing = True
Return: [0 1 0 1 5 6 5 6]
"""
assert sizes.dim() == 1
assert all(sizes >= 0)
# Remove 0 sizes
sizes_nonzero = sizes > 0
if not torch.all(sizes_nonzero):
assert block_inc == 0 # Implementing this is not worth the effort
sizes = torch.masked_select(sizes, sizes_nonzero)
if isinstance(repeats, torch.Tensor):
repeats = torch.masked_select(repeats, sizes_nonzero)
if isinstance(repeat_inc, torch.Tensor):
repeat_inc = torch.masked_select(repeat_inc, sizes_nonzero)
if isinstance(repeats, torch.Tensor):
assert all(repeats >= 0)
insert_dummy = repeats[0] == 0
if insert_dummy:
one = sizes.new_ones(1)
zero = sizes.new_zeros(1)
sizes = torch.cat((one, sizes))
repeats = torch.cat((one, repeats))
if isinstance(block_inc, torch.Tensor):
block_inc = torch.cat((zero, block_inc))
if isinstance(repeat_inc, torch.Tensor):
repeat_inc = torch.cat((zero, repeat_inc))
else:
assert repeats >= 0
insert_dummy = False
# Get repeats for each group using group lengths/sizes
r1 = torch.repeat_interleave(
torch.arange(len(sizes), device=sizes.device), repeats
)
# Get total size of output array, as needed to initialize output indexing array
N = int((sizes * repeats).sum().item())
# Initialize indexing array with ones as we need to setup incremental indexing
# within each group when cumulatively summed at the final stage.
# Two steps here:
# 1. Within each group, we have multiple sequences, so setup the offsetting
# at each sequence lengths by the seq. lengths preceding those.
id_ar = torch.ones(N, dtype=torch.long, device=sizes.device)
id_ar[0] = 0
insert_index = sizes[r1[:-1]].cumsum(0)
insert_val = (1 - sizes)[r1[:-1]]
if isinstance(repeats, torch.Tensor) and torch.any(repeats == 0):
diffs = r1[1:] - r1[:-1]
indptr = torch.cat((sizes.new_zeros(1), diffs.cumsum(0)))
if continuous_indexing:
# If a group was skipped (repeats=0) we need to add its size
insert_val += segment_csr(sizes[: r1[-1]], indptr, reduce="sum")
# Add block increments
if isinstance(block_inc, torch.Tensor):
insert_val += segment_csr(
block_inc[: r1[-1]], indptr, reduce="sum"
)
else:
insert_val += block_inc * (indptr[1:] - indptr[:-1])
if insert_dummy:
insert_val[0] -= block_inc
else:
idx = r1[1:] != r1[:-1]
if continuous_indexing:
# 2. For each group, make sure the indexing starts from the next group's
# first element. So, simply assign 1s there.
insert_val[idx] = 1
# Add block increments
insert_val[idx] += block_inc
# Add repeat_inc within each group
if isinstance(repeat_inc, torch.Tensor):
insert_val += repeat_inc[r1[:-1]]
if isinstance(repeats, torch.Tensor):
repeat_inc_inner = repeat_inc[repeats > 0][:-1]
else:
repeat_inc_inner = repeat_inc[:-1]
else:
insert_val += repeat_inc
repeat_inc_inner = repeat_inc
# Subtract the increments between groups
if isinstance(repeats, torch.Tensor):
repeats_inner = repeats[repeats > 0][:-1]
else:
repeats_inner = repeats
insert_val[r1[1:] != r1[:-1]] -= repeat_inc_inner * repeats_inner
# Assign index-offsetting values
id_ar[insert_index] = insert_val
if insert_dummy:
id_ar = id_ar[1:]
if continuous_indexing:
id_ar[0] -= 1
# Set start index now, in case of insertion due to leading repeats=0
id_ar[0] += start_idx
# Finally index into input array for the group repeated o/p
res = id_ar.cumsum(0)
return res
def calculate_interatomic_vectors(
R: torch.Tensor,
id_s: torch.Tensor,
id_t: torch.Tensor,
offsets_st: Optional[torch.Tensor],
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Calculate the vectors connecting the given atom pairs,
considering offsets from periodic boundary conditions (PBC).
Parameters
----------
R: Tensor, shape = (nAtoms, 3)
Atom positions.
id_s: Tensor, shape = (nEdges,)
Indices of the source atom of the edges.
id_t: Tensor, shape = (nEdges,)
Indices of the target atom of the edges.
offsets_st: Tensor, shape = (nEdges,)
PBC offsets of the edges.
Subtract this from the correct direction.
Returns
-------
(D_st, V_st): tuple
D_st: Tensor, shape = (nEdges,)
Distance from atom t to s.
V_st: Tensor, shape = (nEdges,)
Unit direction from atom t to s.
"""
Rs = R[id_s]
Rt = R[id_t]
# ReLU prevents negative numbers in sqrt
if offsets_st is None:
V_st = Rt - Rs # s -> t
else:
V_st = Rt - Rs + offsets_st # s -> t
D_st = torch.sqrt(torch.sum(V_st**2, dim=1))
V_st = V_st / D_st[..., None]
return D_st, V_st
def inner_product_normalized(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
"""
Calculate the inner product between the given normalized vectors,
giving a result between -1 and 1.
"""
return torch.sum(x * y, dim=-1).clamp(min=-1, max=1)
def mask_neighbors(neighbors, edge_mask):
neighbors_old_indptr = torch.cat([neighbors.new_zeros(1), neighbors])
neighbors_old_indptr = torch.cumsum(neighbors_old_indptr, dim=0)
neighbors = segment_csr(edge_mask.long(), neighbors_old_indptr)
return neighbors
| 9,439 | 32.006993 | 128 | py |
ocp | ocp-main/ocpmodels/models/gemnet_gp/__init__.py | 0 | 0 | 0 | py |
|
ocp | ocp-main/ocpmodels/models/gemnet_gp/layers/base_layers.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import math
from typing import Optional
import torch
from ..initializers import he_orthogonal_init
class Dense(torch.nn.Module):
"""
Combines dense layer with scaling for swish activation.
Parameters
----------
units: int
Output embedding size.
activation: str
Name of the activation function to use.
bias: bool
True if use bias.
"""
def __init__(
self,
num_in_features: int,
num_out_features: int,
bias: bool = False,
activation: Optional[str] = None,
) -> None:
super().__init__()
self.linear = torch.nn.Linear(
num_in_features, num_out_features, bias=bias
)
self.reset_parameters()
if isinstance(activation, str):
activation = activation.lower()
if activation in ["swish", "silu"]:
self._activation = ScaledSiLU()
elif activation == "siqu":
self._activation = SiQU()
elif activation is None:
self._activation = torch.nn.Identity()
else:
raise NotImplementedError(
"Activation function not implemented for GemNet (yet)."
)
def reset_parameters(self, initializer=he_orthogonal_init) -> None:
initializer(self.linear.weight)
if self.linear.bias is not None:
self.linear.bias.data.fill_(0)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.linear(x)
x = self._activation(x)
return x
class ScaledSiLU(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.scale_factor = 1 / 0.6
self._activation = torch.nn.SiLU()
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self._activation(x) * self.scale_factor
class SiQU(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self._activation = torch.nn.SiLU()
def forward(self, x: torch.Tensor) -> torch.Tensor:
return x * self._activation(x)
class ResidualLayer(torch.nn.Module):
"""
Residual block with output scaled by 1/sqrt(2).
Parameters
----------
units: int
Output embedding size.
nLayers: int
Number of dense layers.
layer_kwargs: str
Keyword arguments for initializing the layers.
"""
def __init__(
self, units: int, nLayers: int = 2, layer=Dense, **layer_kwargs
) -> None:
super().__init__()
self.dense_mlp = torch.nn.Sequential(
*[
layer(
in_features=units,
out_features=units,
bias=False,
**layer_kwargs
)
for _ in range(nLayers)
]
)
self.inv_sqrt_2 = 1 / math.sqrt(2)
def forward(self, input: torch.Tensor) -> torch.Tensor:
x = self.dense_mlp(input)
x = input + x
x = x * self.inv_sqrt_2
return x
| 3,247 | 25.406504 | 71 | py |
ocp | ocp-main/ocpmodels/models/gemnet_gp/layers/atom_update_block.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
from typing import Optional
import torch
from torch_scatter import scatter
from torch_scatter.utils import broadcast
from ocpmodels.common import gp_utils
from ocpmodels.modules.scaling import ScaleFactor
from ..initializers import he_orthogonal_init
from .base_layers import Dense, ResidualLayer
def scatter_sum(
src: torch.Tensor,
index: torch.Tensor,
dim: int = -1,
out: Optional[torch.Tensor] = None,
dim_size: Optional[int] = None,
) -> torch.Tensor:
"""
Clone of torch_scatter.scatter_sum but without in-place operations
"""
index = broadcast(index, src, dim)
if out is None:
size = list(src.size())
if dim_size is not None:
size[dim] = dim_size
elif index.numel() == 0:
size[dim] = 0
else:
size[dim] = int(index.max()) + 1
out = torch.zeros(size, dtype=src.dtype, device=src.device)
return torch.scatter_add(out, dim, index, src)
else:
return out.scatter_add(dim, index, src)
class AtomUpdateBlock(torch.nn.Module):
"""
Aggregate the message embeddings of the atoms
Parameters
----------
emb_size_atom: int
Embedding size of the atoms.
emb_size_atom: int
Embedding size of the edges.
nHidden: int
Number of residual blocks.
activation: callable/str
Name of the activation function to use in the dense layers.
"""
def __init__(
self,
emb_size_atom: int,
emb_size_edge: int,
emb_size_rbf: int,
nHidden: int,
activation: Optional[str] = None,
name: str = "atom_update",
) -> None:
super().__init__()
self.name = name
self.dense_rbf = Dense(
emb_size_rbf, emb_size_edge, activation=None, bias=False
)
self.scale_sum = ScaleFactor(name + "_sum")
self.layers = self.get_mlp(
emb_size_edge, emb_size_atom, nHidden, activation
)
def get_mlp(
self,
units_in: int,
units: int,
nHidden: int,
activation: Optional[str],
):
dense1 = Dense(units_in, units, activation=activation, bias=False)
mlp = [dense1]
res = [
ResidualLayer(units, nLayers=2, activation=activation)
for i in range(nHidden)
]
mlp = mlp + res
return torch.nn.ModuleList(mlp)
def forward(self, nAtoms: int, m: int, rbf, id_j):
"""
Returns
-------
h: torch.Tensor, shape=(nAtoms, emb_size_atom)
Atom embedding.
"""
mlp_rbf = self.dense_rbf(rbf) # (nEdges, emb_size_edge)
x = m * mlp_rbf
# Graph Parallel: Local node aggregation
x2 = scatter(x, id_j, dim=0, dim_size=nAtoms, reduce="sum")
# Graph Parallel: Global node aggregation
x2 = gp_utils.reduce_from_model_parallel_region(x2)
x2 = gp_utils.scatter_to_model_parallel_region(x2, dim=0)
# (nAtoms, emb_size_edge)
x = self.scale_sum(x2, ref=m)
for layer in self.layers:
x = layer(x) # (nAtoms, emb_size_atom)
return x
class OutputBlock(AtomUpdateBlock):
"""
Combines the atom update block and subsequent final dense layer.
Parameters
----------
emb_size_atom: int
Embedding size of the atoms.
emb_size_atom: int
Embedding size of the edges.
nHidden: int
Number of residual blocks.
num_targets: int
Number of targets.
activation: str
Name of the activation function to use in the dense layers except for the final dense layer.
direct_forces: bool
If true directly predict forces without taking the gradient of the energy potential.
output_init: int
Kernel initializer of the final dense layer.
"""
def __init__(
self,
emb_size_atom: int,
emb_size_edge: int,
emb_size_rbf: int,
nHidden: int,
num_targets: int,
activation: Optional[str] = None,
direct_forces: bool = True,
output_init: str = "HeOrthogonal",
name: str = "output",
**kwargs,
) -> None:
super().__init__(
name=name,
emb_size_atom=emb_size_atom,
emb_size_edge=emb_size_edge,
emb_size_rbf=emb_size_rbf,
nHidden=nHidden,
activation=activation,
**kwargs,
)
assert isinstance(output_init, str)
self.output_init = output_init.lower()
self.direct_forces = direct_forces
self.seq_energy = self.layers # inherited from parent class
self.out_energy = Dense(
emb_size_atom, num_targets, bias=False, activation=None
)
if self.direct_forces:
self.scale_rbf_F = ScaleFactor(name + "_had")
self.seq_forces = self.get_mlp(
emb_size_edge, emb_size_edge, nHidden, activation
)
self.out_forces = Dense(
emb_size_edge, num_targets, bias=False, activation=None
)
self.dense_rbf_F = Dense(
emb_size_rbf, emb_size_edge, activation=None, bias=False
)
self.reset_parameters()
def reset_parameters(self) -> None:
if self.output_init == "heorthogonal":
self.out_energy.reset_parameters(he_orthogonal_init)
if self.direct_forces:
self.out_forces.reset_parameters(he_orthogonal_init)
elif self.output_init == "zeros":
self.out_energy.reset_parameters(torch.nn.init.zeros_)
if self.direct_forces:
self.out_forces.reset_parameters(torch.nn.init.zeros_)
else:
raise UserWarning(f"Unknown output_init: {self.output_init}")
def forward(self, nAtoms, m, rbf, id_j):
"""
Returns
-------
(E, F): tuple
- E: torch.Tensor, shape=(nAtoms, num_targets)
- F: torch.Tensor, shape=(nEdges, num_targets)
Energy and force prediction
"""
# -------------------------------------- Energy Prediction -------------------------------------- #
rbf_emb_E = self.dense_rbf(rbf) # (nEdges, emb_size_edge)
x = m * rbf_emb_E
# Graph Parallel: Local Node aggregation
x_E = scatter(x, id_j, dim=0, dim_size=nAtoms, reduce="sum")
# Graph Parallel: Global Node aggregation
x_E = gp_utils.reduce_from_model_parallel_region(x_E)
x_E = gp_utils.scatter_to_model_parallel_region(x_E, dim=0)
# (nAtoms, emb_size_edge)
x_E = self.scale_sum(x_E, ref=m)
for layer in self.seq_energy:
x_E = layer(x_E) # (nAtoms, emb_size_atom)
x_E = self.out_energy(x_E) # (nAtoms, num_targets)
# --------------------------------------- Force Prediction -------------------------------------- #
if self.direct_forces:
x_F = m
for _, layer in enumerate(self.seq_forces):
x_F = layer(x_F) # (nEdges, emb_size_edge)
rbf_emb_F = self.dense_rbf_F(rbf) # (nEdges, emb_size_edge)
x_F_rbf = x_F * rbf_emb_F
x_F = self.scale_rbf_F(x_F_rbf, ref=x_F)
x_F = self.out_forces(x_F) # (nEdges, num_targets)
else:
x_F = 0
# ----------------------------------------------------------------------------------------------- #
return x_E, x_F
| 7,813 | 30.256 | 107 | py |
ocp | ocp-main/ocpmodels/models/gemnet_gp/layers/embedding_block.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
from typing import Optional
import numpy as np
import torch
from ocpmodels.common import gp_utils
from .base_layers import Dense
class AtomEmbedding(torch.nn.Module):
"""
Initial atom embeddings based on the atom type
Parameters
----------
emb_size: int
Atom embeddings size
"""
def __init__(self, emb_size: int) -> None:
super().__init__()
self.emb_size = emb_size
# Atom embeddings: We go up to Bi (83).
self.embeddings = torch.nn.Embedding(83, emb_size)
# init by uniform distribution
torch.nn.init.uniform_(
self.embeddings.weight, a=-np.sqrt(3), b=np.sqrt(3)
)
def forward(self, Z):
"""
Returns
-------
h: torch.Tensor, shape=(nAtoms, emb_size)
Atom embeddings.
"""
h = self.embeddings(Z - 1) # -1 because Z.min()=1 (==Hydrogen)
return h
class EdgeEmbedding(torch.nn.Module):
"""
Edge embedding based on the concatenation of atom embeddings and subsequent dense layer.
Parameters
----------
emb_size: int
Embedding size after the dense layer.
activation: str
Activation function used in the dense layer.
"""
def __init__(
self,
atom_features: int,
edge_features: int,
num_out_features: int,
activation: Optional[str] = None,
) -> None:
super().__init__()
in_features = 2 * atom_features + edge_features
self.dense = Dense(
in_features, num_out_features, activation=activation, bias=False
)
def forward(
self,
h,
m_rbf,
idx_s,
idx_t,
):
"""
Arguments
---------
h
m_rbf: shape (nEdges, nFeatures)
in embedding block: m_rbf = rbf ; In interaction block: m_rbf = m_st
idx_s
idx_t
Returns
-------
m_st: torch.Tensor, shape=(nEdges, emb_size)
Edge embeddings.
"""
h = gp_utils.gather_from_model_parallel_region(h, dim=0)
h_s = h[idx_s] # shape=(nEdges, emb_size)
h_t = h[idx_t] # shape=(nEdges, emb_size)
m_st = torch.cat(
[h_s, h_t, m_rbf], dim=-1
) # (nEdges, 2*emb_size+nFeatures)
m_st = self.dense(m_st) # (nEdges, emb_size)
return m_st
| 2,621 | 23.735849 | 92 | py |
ocp | ocp-main/ocpmodels/models/gemnet_gp/layers/radial_basis.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import math
from typing import Dict, Union
import numpy as np
import torch
from scipy.special import binom
from ocpmodels.common.typing import assert_is_instance
from torch_geometric.nn.models.schnet import GaussianSmearing
class PolynomialEnvelope(torch.nn.Module):
"""
Polynomial envelope function that ensures a smooth cutoff.
Parameters
----------
exponent: int
Exponent of the envelope function.
"""
def __init__(self, exponent) -> None:
super().__init__()
assert exponent > 0
self.p = exponent
self.a = -(self.p + 1) * (self.p + 2) / 2
self.b = self.p * (self.p + 2)
self.c = -self.p * (self.p + 1) / 2
def forward(self, d_scaled: torch.Tensor) -> torch.Tensor:
env_val = (
1
+ self.a * d_scaled**self.p
+ self.b * d_scaled ** (self.p + 1)
+ self.c * d_scaled ** (self.p + 2)
)
return torch.where(d_scaled < 1, env_val, torch.zeros_like(d_scaled))
class ExponentialEnvelope(torch.nn.Module):
"""
Exponential envelope function that ensures a smooth cutoff,
as proposed in Unke, Chmiela, Gastegger, Schütt, Sauceda, Müller 2021.
SpookyNet: Learning Force Fields with Electronic Degrees of Freedom
and Nonlocal Effects
"""
def __init__(self) -> None:
super().__init__()
def forward(self, d_scaled) -> torch.Tensor:
env_val = torch.exp(
-(d_scaled**2) / ((1 - d_scaled) * (1 + d_scaled))
)
return torch.where(d_scaled < 1, env_val, torch.zeros_like(d_scaled))
class SphericalBesselBasis(torch.nn.Module):
"""
1D spherical Bessel basis
Parameters
----------
num_radial: int
Controls maximum frequency.
cutoff: float
Cutoff distance in Angstrom.
"""
def __init__(
self,
num_radial: int,
cutoff: float,
) -> None:
super().__init__()
self.norm_const = math.sqrt(2 / (cutoff**3))
# cutoff ** 3 to counteract dividing by d_scaled = d / cutoff
# Initialize frequencies at canonical positions
self.frequencies = torch.nn.Parameter(
data=torch.tensor(
np.pi * np.arange(1, num_radial + 1, dtype=np.float32)
),
requires_grad=True,
)
def forward(self, d_scaled):
return (
self.norm_const
/ d_scaled[:, None]
* torch.sin(self.frequencies * d_scaled[:, None])
) # (num_edges, num_radial)
class BernsteinBasis(torch.nn.Module):
"""
Bernstein polynomial basis,
as proposed in Unke, Chmiela, Gastegger, Schütt, Sauceda, Müller 2021.
SpookyNet: Learning Force Fields with Electronic Degrees of Freedom
and Nonlocal Effects
Parameters
----------
num_radial: int
Controls maximum frequency.
pregamma_initial: float
Initial value of exponential coefficient gamma.
Default: gamma = 0.5 * a_0**-1 = 0.94486,
inverse softplus -> pregamma = log e**gamma - 1 = 0.45264
"""
def __init__(
self,
num_radial: int,
pregamma_initial: float = 0.45264,
) -> None:
super().__init__()
prefactor = binom(num_radial - 1, np.arange(num_radial))
self.register_buffer(
"prefactor",
torch.tensor(prefactor, dtype=torch.float),
persistent=False,
)
self.pregamma = torch.nn.Parameter(
data=torch.tensor(pregamma_initial, dtype=torch.float),
requires_grad=True,
)
self.softplus = torch.nn.Softplus()
exp1 = torch.arange(num_radial)
self.register_buffer("exp1", exp1[None, :], persistent=False)
exp2 = num_radial - 1 - exp1
self.register_buffer("exp2", exp2[None, :], persistent=False)
def forward(self, d_scaled) -> torch.Tensor:
gamma = self.softplus(self.pregamma) # constrain to positive
exp_d = torch.exp(-gamma * d_scaled)[:, None]
return (
self.prefactor * (exp_d**self.exp1) * ((1 - exp_d) ** self.exp2)
)
class RadialBasis(torch.nn.Module):
"""
Parameters
----------
num_radial: int
Controls maximum frequency.
cutoff: float
Cutoff distance in Angstrom.
rbf: dict = {"name": "gaussian"}
Basis function and its hyperparameters.
envelope: dict = {"name": "polynomial", "exponent": 5}
Envelope function and its hyperparameters.
"""
def __init__(
self,
num_radial: int,
cutoff: float,
rbf: Dict[str, str] = {"name": "gaussian"},
envelope: Dict[str, Union[str, int]] = {
"name": "polynomial",
"exponent": 5,
},
) -> None:
super().__init__()
self.inv_cutoff = 1 / cutoff
env_name = assert_is_instance(envelope["name"], str).lower()
env_hparams = envelope.copy()
del env_hparams["name"]
if env_name == "polynomial":
self.envelope = PolynomialEnvelope(**env_hparams)
elif env_name == "exponential":
self.envelope = ExponentialEnvelope(**env_hparams)
else:
raise ValueError(f"Unknown envelope function '{env_name}'.")
rbf_name = rbf["name"].lower()
rbf_hparams = rbf.copy()
del rbf_hparams["name"]
# RBFs get distances scaled to be in [0, 1]
if rbf_name == "gaussian":
self.rbf = GaussianSmearing(
start=0, stop=1, num_gaussians=num_radial, **rbf_hparams
)
elif rbf_name == "spherical_bessel":
self.rbf = SphericalBesselBasis(
num_radial=num_radial, cutoff=cutoff, **rbf_hparams
)
elif rbf_name == "bernstein":
self.rbf = BernsteinBasis(num_radial=num_radial, **rbf_hparams)
else:
raise ValueError(f"Unknown radial basis function '{rbf_name}'.")
def forward(self, d):
d_scaled = d * self.inv_cutoff
env = self.envelope(d_scaled)
return env[:, None] * self.rbf(d_scaled) # (nEdges, num_radial)
| 6,381 | 29.103774 | 77 | py |
ocp | ocp-main/ocpmodels/models/gemnet_gp/layers/basis_utils.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import math
import numpy as np
import sympy as sym
from scipy import special as sp
from scipy.optimize import brentq
def Jn(r, n):
"""
numerical spherical bessel functions of order n
"""
return sp.spherical_jn(n, r)
def Jn_zeros(n: int, k: int):
"""
Compute the first k zeros of the spherical bessel functions up to order n (excluded)
"""
zerosj = np.zeros((n, k), dtype="float32")
zerosj[0] = np.arange(1, k + 1) * np.pi
points = np.arange(1, k + n) * np.pi
racines = np.zeros(k + n - 1, dtype="float32")
for i in range(1, n):
for j in range(k + n - 1 - i):
foo = brentq(Jn, points[j], points[j + 1], (i,))
racines[j] = foo
points = racines
zerosj[i][:k] = racines[:k]
return zerosj
def spherical_bessel_formulas(n):
"""
Computes the sympy formulas for the spherical bessel functions up to order n (excluded)
"""
x = sym.symbols("x")
# j_i = (-x)^i * (1/x * d/dx)^î * sin(x)/x
j = [sym.sin(x) / x] # j_0
a = sym.sin(x) / x
for i in range(1, n):
b = sym.diff(a, x) / x
j += [sym.simplify(b * (-x) ** i)]
a = sym.simplify(b)
return j
def bessel_basis(n, k):
"""
Compute the sympy formulas for the normalized and rescaled spherical bessel functions up to
order n (excluded) and maximum frequency k (excluded).
Returns:
bess_basis: list
Bessel basis formulas taking in a single argument x.
Has length n where each element has length k. -> In total n*k many.
"""
zeros = Jn_zeros(n, k)
normalizer = []
for order in range(n):
normalizer_tmp = []
for i in range(k):
normalizer_tmp += [0.5 * Jn(zeros[order, i], order + 1) ** 2]
normalizer_tmp = (
1 / np.array(normalizer_tmp) ** 0.5
) # sqrt(2/(j_l+1)**2) , sqrt(1/c**3) not taken into account yet
normalizer += [normalizer_tmp]
f = spherical_bessel_formulas(n)
x = sym.symbols("x")
bess_basis = []
for order in range(n):
bess_basis_tmp = []
for i in range(k):
bess_basis_tmp += [
sym.simplify(
normalizer[order][i]
* f[order].subs(x, zeros[order, i] * x)
)
]
bess_basis += [bess_basis_tmp]
return bess_basis
def sph_harm_prefactor(l_degree: int, m_order: int) -> float:
"""Computes the constant pre-factor for the spherical harmonic of degree l and order m.
Parameters
----------
l_degree: int
Degree of the spherical harmonic. l >= 0
m_order: int
Order of the spherical harmonic. -l <= m <= l
Returns
-------
factor: float
"""
# sqrt((2*l+1)/4*pi * (l-m)!/(l+m)! )
return (
(2 * l_degree + 1)
/ (4 * np.pi)
* math.factorial(l_degree - abs(m_order))
/ math.factorial(l_degree + abs(m_order))
) ** 0.5
def associated_legendre_polynomials(
L_maxdegree: int, zero_m_only: bool = True, pos_m_only: bool = True
):
"""Computes string formulas of the associated legendre polynomials up to degree L (excluded).
Parameters
----------
L_maxdegree: int
Degree up to which to calculate the associated legendre polynomials (degree L is excluded).
zero_m_only: bool
If True only calculate the polynomials for the polynomials where m=0.
pos_m_only: bool
If True only calculate the polynomials for the polynomials where m>=0. Overwritten by zero_m_only.
Returns
-------
polynomials: list
Contains the sympy functions of the polynomials (in total L many if zero_m_only is True else L^2 many).
"""
# calculations from http://web.cmb.usc.edu/people/alber/Software/tomominer/docs/cpp/group__legendre__polynomials.html
z = sym.symbols("z")
P_l_m = [
[0] * (2 * l_degree + 1) for l_degree in range(L_maxdegree)
] # for order l: -l <= m <= l
P_l_m[0][0] = 1
if L_maxdegree > 0:
if zero_m_only:
# m = 0
P_l_m[1][0] = z
for l_degree in range(2, L_maxdegree):
P_l_m[l_degree][0] = sym.simplify(
(
(2 * l_degree - 1) * z * P_l_m[l_degree - 1][0]
- (l_degree - 1) * P_l_m[l_degree - 2][0]
)
/ l_degree
)
return P_l_m
else:
# for m >= 0
for l_degree in range(1, L_maxdegree):
P_l_m[l_degree][l_degree] = sym.simplify(
(1 - 2 * l_degree)
* (1 - z**2) ** 0.5
* P_l_m[l_degree - 1][l_degree - 1]
) # P_00, P_11, P_22, P_33
for m_order in range(0, L_maxdegree - 1):
P_l_m[m_order + 1][m_order] = sym.simplify(
(2 * m_order + 1) * z * P_l_m[m_order][m_order]
) # P_10, P_21, P_32, P_43
for l_degree in range(2, L_maxdegree):
for m_order in range(l_degree - 1): # P_20, P_30, P_31
P_l_m[l_degree][m_order] = sym.simplify(
(
(2 * l_degree - 1)
* z
* P_l_m[l_degree - 1][m_order]
- (l_degree + m_order - 1)
* P_l_m[l_degree - 2][m_order]
)
/ (l_degree - m_order)
)
if not pos_m_only:
# for m < 0: P_l(-m) = (-1)^m * (l-m)!/(l+m)! * P_lm
for l_degree in range(1, L_maxdegree):
for m_order in range(
1, l_degree + 1
): # P_1(-1), P_2(-1) P_2(-2)
P_l_m[l_degree][-m_order] = sym.simplify(
(-1) ** m_order
* math.factorial(l_degree - m_order)
/ math.factorial(l_degree + m_order)
* P_l_m[l_degree][m_order]
)
return P_l_m
def real_sph_harm(
L_maxdegree: int,
use_theta: bool,
use_phi: bool = True,
zero_m_only: bool = True,
):
"""
Computes formula strings of the the real part of the spherical harmonics up to degree L (excluded).
Variables are either spherical coordinates phi and theta (or cartesian coordinates x,y,z) on the UNIT SPHERE.
Parameters
----------
L_maxdegree: int
Degree up to which to calculate the spherical harmonics (degree L is excluded).
use_theta: bool
- True: Expects the input of the formula strings to contain theta.
- False: Expects the input of the formula strings to contain z.
use_phi: bool
- True: Expects the input of the formula strings to contain phi.
- False: Expects the input of the formula strings to contain x and y.
Does nothing if zero_m_only is True
zero_m_only: bool
If True only calculate the harmonics where m=0.
Returns
-------
Y_lm_real: list
Computes formula strings of the the real part of the spherical harmonics up
to degree L (where degree L is not excluded).
In total L^2 many sph harm exist up to degree L (excluded). However, if zero_m_only only is True then
the total count is reduced to be only L many.
"""
z = sym.symbols("z")
P_l_m = associated_legendre_polynomials(L_maxdegree, zero_m_only)
if zero_m_only:
# for all m != 0: Y_lm = 0
Y_l_m = [[0] for l_degree in range(L_maxdegree)]
else:
Y_l_m = [
[0] * (2 * l_degree + 1) for l_degree in range(L_maxdegree)
] # for order l: -l <= m <= l
# convert expressions to spherical coordiantes
if use_theta:
# replace z by cos(theta)
theta = sym.symbols("theta")
for l_degree in range(L_maxdegree):
for m_order in range(len(P_l_m[l_degree])):
if not isinstance(P_l_m[l_degree][m_order], int):
P_l_m[l_degree][m_order] = P_l_m[l_degree][m_order].subs(
z, sym.cos(theta)
)
## calculate Y_lm
# Y_lm = N * P_lm(cos(theta)) * exp(i*m*phi)
# { sqrt(2) * (-1)^m * N * P_l|m| * sin(|m|*phi) if m < 0
# Y_lm_real = { Y_lm if m = 0
# { sqrt(2) * (-1)^m * N * P_lm * cos(m*phi) if m > 0
for l_degree in range(L_maxdegree):
Y_l_m[l_degree][0] = sym.simplify(
sph_harm_prefactor(l_degree, 0) * P_l_m[l_degree][0]
) # Y_l0
if not zero_m_only:
phi = sym.symbols("phi")
for l_degree in range(1, L_maxdegree):
# m > 0
for m_order in range(1, l_degree + 1):
Y_l_m[l_degree][m_order] = sym.simplify(
2**0.5
* (-1) ** m_order
* sph_harm_prefactor(l_degree, m_order)
* P_l_m[l_degree][m_order]
* sym.cos(m_order * phi)
)
# m < 0
for m_order in range(1, l_degree + 1):
Y_l_m[l_degree][-m_order] = sym.simplify(
2**0.5
* (-1) ** m_order
* sph_harm_prefactor(l_degree, -m_order)
* P_l_m[l_degree][m_order]
* sym.sin(m_order * phi)
)
# convert expressions to cartesian coordinates
if not use_phi:
# replace phi by atan2(y,x)
x = sym.symbols("x")
y = sym.symbols("y")
for l_degree in range(L_maxdegree):
for m_order in range(len(Y_l_m[l_degree])):
Y_l_m[l_degree][m_order] = sym.simplify(
Y_l_m[l_degree][m_order].subs(phi, sym.atan2(y, x))
)
return Y_l_m
| 10,397 | 34.128378 | 121 | py |
ocp | ocp-main/ocpmodels/models/gemnet_gp/layers/spherical_basis.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import sympy as sym
import torch
from torch_geometric.nn.models.schnet import GaussianSmearing
from ocpmodels.common.typing import assert_is_instance
from .basis_utils import real_sph_harm
from .radial_basis import RadialBasis
class CircularBasisLayer(torch.nn.Module):
"""
2D Fourier Bessel Basis
Parameters
----------
num_spherical: int
Controls maximum frequency.
radial_basis: RadialBasis
Radial basis functions
cbf: dict
Name and hyperparameters of the cosine basis function
efficient: bool
Whether to use the "efficient" summation order
"""
def __init__(
self,
num_spherical: int,
radial_basis: RadialBasis,
cbf,
efficient: bool = False,
) -> None:
super().__init__()
self.radial_basis = radial_basis
self.efficient = efficient
cbf_name = assert_is_instance(cbf["name"], str).lower()
cbf_hparams = cbf.copy()
del cbf_hparams["name"]
if cbf_name == "gaussian":
self.cosφ_basis = GaussianSmearing(
start=-1, stop=1, num_gaussians=num_spherical, **cbf_hparams
)
elif cbf_name == "spherical_harmonics":
Y_lm = real_sph_harm(
num_spherical, use_theta=False, zero_m_only=True
)
sph_funcs = [] # (num_spherical,)
# convert to tensorflow functions
z = sym.symbols("z")
modules = {"sin": torch.sin, "cos": torch.cos, "sqrt": torch.sqrt}
m_order = 0 # only single angle
for l_degree in range(len(Y_lm)): # num_spherical
if (
l_degree == 0
): # Y_00 is only a constant -> function returns value and not tensor
first_sph = sym.lambdify(
[z], Y_lm[l_degree][m_order], modules
)
sph_funcs.append(
lambda z: torch.zeros_like(z) + first_sph(z)
)
else:
sph_funcs.append(
sym.lambdify([z], Y_lm[l_degree][m_order], modules)
)
self.cosφ_basis = lambda cosφ: torch.stack(
[f(cosφ) for f in sph_funcs], dim=1
)
else:
raise ValueError(f"Unknown cosine basis function '{cbf_name}'.")
def forward(self, D_ca, cosφ_cab, id3_ca):
rbf = self.radial_basis(D_ca) # (num_edges, num_radial)
cbf = self.cosφ_basis(cosφ_cab) # (num_triplets, num_spherical)
if not self.efficient:
rbf = rbf[id3_ca] # (num_triplets, num_radial)
out = (rbf[:, None, :] * cbf[:, :, None]).view(
-1, rbf.shape[-1] * cbf.shape[-1]
)
return (out,)
# (num_triplets, num_radial * num_spherical)
else:
return (rbf[None, :, :], cbf)
# (1, num_edges, num_radial), (num_edges, num_spherical)
| 3,221 | 32.216495 | 86 | py |
ocp | ocp-main/ocpmodels/models/gemnet_gp/layers/interaction_block.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import math
from typing import Optional
import torch
from ocpmodels.common import gp_utils
from ocpmodels.modules.scaling import ScaleFactor
from .atom_update_block import AtomUpdateBlock
from .base_layers import Dense, ResidualLayer
from .efficient import EfficientInteractionBilinear
from .embedding_block import EdgeEmbedding
class InteractionBlockTripletsOnly(torch.nn.Module):
"""
Interaction block for GemNet-T/dT.
Parameters
----------
emb_size_atom: int
Embedding size of the atoms.
emb_size_edge: int
Embedding size of the edges.
emb_size_trip: int
(Down-projected) Embedding size in the triplet message passing block.
emb_size_rbf: int
Embedding size of the radial basis transformation.
emb_size_cbf: int
Embedding size of the circular basis transformation (one angle).
emb_size_bil_trip: int
Embedding size of the edge embeddings in the triplet-based message passing block after the bilinear layer.
num_before_skip: int
Number of residual blocks before the first skip connection.
num_after_skip: int
Number of residual blocks after the first skip connection.
num_concat: int
Number of residual blocks after the concatenation.
num_atom: int
Number of residual blocks in the atom embedding blocks.
activation: str
Name of the activation function to use in the dense layers except for the final dense layer.
"""
def __init__(
self,
emb_size_atom: int,
emb_size_edge: int,
emb_size_trip: int,
emb_size_rbf: int,
emb_size_cbf: int,
emb_size_bil_trip: int,
num_before_skip: int,
num_after_skip: int,
num_concat: int,
num_atom: int,
activation: Optional[str] = None,
name: str = "Interaction",
) -> None:
super().__init__()
self.name = name
block_nr = name.split("_")[-1]
## -------------------------------------------- Message Passing ------------------------------------------- ##
# Dense transformation of skip connection
self.dense_ca = Dense(
emb_size_edge,
emb_size_edge,
activation=activation,
bias=False,
)
# Triplet Interaction
self.trip_interaction = TripletInteraction(
emb_size_edge=emb_size_edge,
emb_size_trip=emb_size_trip,
emb_size_bilinear=emb_size_bil_trip,
emb_size_rbf=emb_size_rbf,
emb_size_cbf=emb_size_cbf,
activation=activation,
name=f"TripInteraction_{block_nr}",
)
## ---------------------------------------- Update Edge Embeddings ---------------------------------------- ##
# Residual layers before skip connection
self.layers_before_skip = torch.nn.ModuleList(
[
ResidualLayer(
emb_size_edge,
activation=activation,
)
for _ in range(num_before_skip)
]
)
# Residual layers after skip connection
self.layers_after_skip = torch.nn.ModuleList(
[
ResidualLayer(
emb_size_edge,
activation=activation,
)
for _ in range(num_after_skip)
]
)
## ---------------------------------------- Update Atom Embeddings ---------------------------------------- ##
self.atom_update = AtomUpdateBlock(
emb_size_atom=emb_size_atom,
emb_size_edge=emb_size_edge,
emb_size_rbf=emb_size_rbf,
nHidden=num_atom,
activation=activation,
name=f"AtomUpdate_{block_nr}",
)
## ------------------------------ Update Edge Embeddings with Atom Embeddings ----------------------------- ##
self.concat_layer = EdgeEmbedding(
emb_size_atom,
emb_size_edge,
emb_size_edge,
activation=activation,
)
self.residual_m = torch.nn.ModuleList(
[
ResidualLayer(emb_size_edge, activation=activation)
for _ in range(num_concat)
]
)
self.inv_sqrt_2 = 1 / math.sqrt(2.0)
def forward(
self,
h: torch.Tensor,
m: torch.Tensor,
rbf3,
cbf3,
id3_ragged_idx,
id_swap,
id3_ba,
id3_ca,
rbf_h,
idx_s,
idx_t,
edge_offset,
Kmax,
nAtoms,
):
"""
Returns
-------
h: torch.Tensor, shape=(nEdges, emb_size_atom)
Atom embeddings.
m: torch.Tensor, shape=(nEdges, emb_size_edge)
Edge embeddings (c->a).
Node: h
Edge: m, rbf3, id_swap, rbf_h, idx_s, idx_t, cbf3[0], cbf3[1] (dense)
Triplet: id3_ragged_idx, id3_ba, id3_ca
"""
# Initial transformation
x_ca_skip = self.dense_ca(m) # (nEdges, emb_size_edge)
x3 = self.trip_interaction(
m,
rbf3,
cbf3,
id3_ragged_idx,
id_swap,
id3_ba,
id3_ca,
edge_offset,
Kmax,
)
## ----------------------------- Merge Embeddings after Triplet Interaction ------------------------------ ##
x = x_ca_skip + x3 # (nEdges, emb_size_edge)
x = x * self.inv_sqrt_2
## ---------------------------------------- Update Edge Embeddings --------------------------------------- ##
# Transformations before skip connection
for _, layer in enumerate(self.layers_before_skip):
x = layer(x) # (nEdges, emb_size_edge)
# Skip connection
m = m + x # (nEdges, emb_size_edge)
m = m * self.inv_sqrt_2
# Transformations after skip connection
for _, layer in enumerate(self.layers_after_skip):
m = layer(m) # (nEdges, emb_size_edge)
## ---------------------------------------- Update Atom Embeddings --------------------------------------- ##
h2 = self.atom_update(nAtoms, m, rbf_h, idx_t)
# Skip connection
h = h + h2 # (nAtoms, emb_size_atom)
h = h * self.inv_sqrt_2
## ----------------------------- Update Edge Embeddings with Atom Embeddings ----------------------------- ##
m2 = self.concat_layer(h, m, idx_s, idx_t) # (nEdges, emb_size_edge)
for _, layer in enumerate(self.residual_m):
m2 = layer(m2) # (nEdges, emb_size_edge)
# Skip connection
m = m + m2 # (nEdges, emb_size_edge)
m = m * self.inv_sqrt_2
return h, m
class TripletInteraction(torch.nn.Module):
"""
Triplet-based message passing block.
Parameters
----------
emb_size_edge: int
Embedding size of the edges.
emb_size_trip: int
(Down-projected) Embedding size of the edge embeddings after the hadamard product with rbf.
emb_size_bilinear: int
Embedding size of the edge embeddings after the bilinear layer.
emb_size_rbf: int
Embedding size of the radial basis transformation.
emb_size_cbf: int
Embedding size of the circular basis transformation (one angle).
activation: str
Name of the activation function to use in the dense layers except for the final dense layer.
"""
def __init__(
self,
emb_size_edge: int,
emb_size_trip: int,
emb_size_bilinear: int,
emb_size_rbf: int,
emb_size_cbf: int,
activation: Optional[str] = None,
name: str = "TripletInteraction",
**kwargs,
) -> None:
super().__init__()
self.name = name
# Dense transformation
self.dense_ba = Dense(
emb_size_edge,
emb_size_edge,
activation=activation,
bias=False,
)
# Up projections of basis representations, bilinear layer and scaling factors
self.mlp_rbf = Dense(
emb_size_rbf,
emb_size_edge,
activation=None,
bias=False,
)
self.scale_rbf = ScaleFactor(name + "_had_rbf")
self.mlp_cbf = EfficientInteractionBilinear(
emb_size_trip, emb_size_cbf, emb_size_bilinear
)
# combines scaling for bilinear layer and summation
self.scale_cbf_sum = ScaleFactor(name + "_sum_cbf")
# Down and up projections
self.down_projection = Dense(
emb_size_edge,
emb_size_trip,
activation=activation,
bias=False,
)
self.up_projection_ca = Dense(
emb_size_bilinear,
emb_size_edge,
activation=activation,
bias=False,
)
self.up_projection_ac = Dense(
emb_size_bilinear,
emb_size_edge,
activation=activation,
bias=False,
)
self.inv_sqrt_2 = 1 / math.sqrt(2.0)
def forward(
self,
m: torch.Tensor,
rbf3,
cbf3,
id3_ragged_idx,
id_swap,
id3_ba,
id3_ca,
edge_offset,
Kmax,
):
"""
Returns
-------
m: torch.Tensor, shape=(nEdges, emb_size_edge)
Edge embeddings (c->a).
"""
# Dense transformation
x_ba = self.dense_ba(m) # (nEdges, emb_size_edge)
# Transform via radial bessel basis
rbf_emb = self.mlp_rbf(rbf3) # (nEdges, emb_size_edge)
x_ba2 = x_ba * rbf_emb
x_ba = self.scale_rbf(x_ba2, ref=x_ba)
x_ba = self.down_projection(x_ba) # (nEdges, emb_size_trip)
# Graph Parallel: Gather x_ba from all nodes
x_ba = gp_utils.gather_from_model_parallel_region(x_ba, dim=0)
# Transform via circular spherical basis
x_ba = x_ba[id3_ba]
# Efficient bilinear layer
x = self.mlp_cbf(cbf3, x_ba, id3_ca, id3_ragged_idx, edge_offset, Kmax)
# (nEdges, emb_size_quad)
x = self.scale_cbf_sum(x, ref=x_ba)
# =>
# rbf(d_ba)
# cbf(d_ca, angle_cab)
# Up project embeddings
x_ca = self.up_projection_ca(x) # (nEdges, emb_size_edge)
x_ac = self.up_projection_ac(x) # (nEdges, emb_size_edge)
# Graph Parallel: Gather x_ac from all nodes
x_ac = gp_utils.gather_from_model_parallel_region(x_ac, dim=0)
# Merge interaction of c->a and a->c
x_ac = x_ac[id_swap] # swap to add to edge a->c and not c->a
x_ac = gp_utils.scatter_to_model_parallel_region(x_ac, dim=0)
x3 = x_ca + x_ac
x3 = x3 * self.inv_sqrt_2
return x3
| 11,227 | 30.016575 | 118 | py |
ocp | ocp-main/ocpmodels/models/gemnet_gp/layers/__init__.py | 0 | 0 | 0 | py |
|
ocp | ocp-main/ocpmodels/models/gemnet_gp/layers/efficient.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
from typing import Tuple
import torch
from ..initializers import he_orthogonal_init
class EfficientInteractionDownProjection(torch.nn.Module):
"""
Down projection in the efficient reformulation.
Parameters
----------
emb_size_interm: int
Intermediate embedding size (down-projection size).
kernel_initializer: callable
Initializer of the weight matrix.
"""
def __init__(
self,
num_spherical: int,
num_radial: int,
emb_size_interm: int,
) -> None:
super().__init__()
self.num_spherical = num_spherical
self.num_radial = num_radial
self.emb_size_interm = emb_size_interm
self.reset_parameters()
def reset_parameters(self) -> None:
self.weight = torch.nn.Parameter(
torch.empty(
(self.num_spherical, self.num_radial, self.emb_size_interm)
),
requires_grad=True,
)
he_orthogonal_init(self.weight)
def forward(
self,
rbf: torch.Tensor,
sph: torch.Tensor,
id_ca,
id_ragged_idx,
Kmax: int,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Arguments
---------
rbf: torch.Tensor, shape=(1, nEdges, num_radial)
sph: torch.Tensor, shape=(nEdges, Kmax, num_spherical)
id_ca
id_ragged_idx
Returns
-------
rbf_W1: torch.Tensor, shape=(nEdges, emb_size_interm, num_spherical)
sph: torch.Tensor, shape=(nEdges, Kmax, num_spherical)
Kmax = maximum number of neighbors of the edges
"""
num_edges = rbf.shape[1]
# MatMul: mul + sum over num_radial
rbf_W1 = torch.matmul(rbf, self.weight)
# (num_spherical, nEdges , emb_size_interm)
rbf_W1 = rbf_W1.permute(1, 2, 0)
# (nEdges, emb_size_interm, num_spherical)
# Zero padded dense matrix
# maximum number of neighbors, catch empty id_ca with maximum
if sph.shape[0] == 0:
Kmax = 0
sph2 = sph.new_zeros(num_edges, Kmax, self.num_spherical)
sph2[id_ca, id_ragged_idx] = sph
sph2 = torch.transpose(sph2, 1, 2)
# (nEdges, num_spherical/emb_size_interm, Kmax)
return rbf_W1, sph2
class EfficientInteractionBilinear(torch.nn.Module):
"""
Efficient reformulation of the bilinear layer and subsequent summation.
Parameters
----------
units_out: int
Embedding output size of the bilinear layer.
kernel_initializer: callable
Initializer of the weight matrix.
"""
def __init__(
self,
emb_size: int,
emb_size_interm: int,
units_out: int,
) -> None:
super().__init__()
self.emb_size = emb_size
self.emb_size_interm = emb_size_interm
self.units_out = units_out
self.reset_parameters()
def reset_parameters(self) -> None:
self.weight = torch.nn.Parameter(
torch.empty(
(self.emb_size, self.emb_size_interm, self.units_out),
requires_grad=True,
)
)
he_orthogonal_init(self.weight)
def forward(
self,
basis: Tuple[torch.Tensor, torch.Tensor],
m,
id_reduce,
id_ragged_idx,
edge_offset,
Kmax: int,
) -> torch.Tensor:
"""
Arguments
---------
basis
m: quadruplets: m = m_db , triplets: m = m_ba
id_reduce
id_ragged_idx
Returns
-------
m_ca: torch.Tensor, shape=(nEdges, units_out)
Edge embeddings.
"""
# num_spherical is actually num_spherical**2 for quadruplets
(rbf_W1, sph) = basis
# (nEdges, emb_size_interm, num_spherical), (nEdges, num_spherical, Kmax)
nEdges = rbf_W1.shape[0]
# Create (zero-padded) dense matrix of the neighboring edge embeddings.
# maximum number of neighbors, catch empty id_reduce_ji with maximum
m2 = m.new_zeros(nEdges, Kmax, self.emb_size)
m2[id_reduce - edge_offset, id_ragged_idx] = m
# (num_quadruplets or num_triplets, emb_size) -> (nEdges, Kmax, emb_size)
sum_k = torch.matmul(sph, m2) # (nEdges, num_spherical, emb_size)
# MatMul: mul + sum over num_spherical
rbf_W1_sum_k = torch.matmul(rbf_W1, sum_k)
# (nEdges, emb_size_interm, emb_size)
# Bilinear: Sum over emb_size_interm and emb_size
m_ca = torch.matmul(rbf_W1_sum_k.permute(2, 0, 1), self.weight)
# (emb_size, nEdges, units_out)
m_ca = torch.sum(m_ca, dim=0)
# (nEdges, units_out)
return m_ca
| 4,957 | 27.170455 | 81 | py |
ocp | ocp-main/ocpmodels/models/gemnet_oc/initializers.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
from functools import partial
import torch
def _standardize(kernel):
"""
Makes sure that N*Var(W) = 1 and E[W] = 0
"""
eps = 1e-6
if len(kernel.shape) == 3:
axis = [0, 1] # last dimension is output dimension
else:
axis = 1
var, mean = torch.var_mean(kernel, dim=axis, unbiased=True, keepdim=True)
kernel = (kernel - mean) / (var + eps) ** 0.5
return kernel
def he_orthogonal_init(tensor):
"""
Generate a weight matrix with variance according to He (Kaiming) initialization.
Based on a random (semi-)orthogonal matrix neural networks
are expected to learn better when features are decorrelated
(stated by eg. "Reducing overfitting in deep networks by decorrelating representations",
"Dropout: a simple way to prevent neural networks from overfitting",
"Exact solutions to the nonlinear dynamics of learning in deep linear neural networks")
"""
tensor = torch.nn.init.orthogonal_(tensor)
if len(tensor.shape) == 3:
fan_in = tensor.shape[:-1].numel()
else:
fan_in = tensor.shape[1]
with torch.no_grad():
tensor.data = _standardize(tensor.data)
tensor.data *= (1 / fan_in) ** 0.5
return tensor
def grid_init(tensor, start: int = -1, end: int = 1):
"""
Generate a weight matrix so that each input value corresponds to one value on a regular grid between start and end.
"""
fan_in = tensor.shape[1]
with torch.no_grad():
data = torch.linspace(
start, end, fan_in, device=tensor.device, dtype=tensor.dtype
).expand_as(tensor)
tensor.copy_(data)
return tensor
def log_grid_init(tensor, start: int = -4, end: int = 0):
"""
Generate a weight matrix so that each input value corresponds to one value on a regular logarithmic grid between 10^start and 10^end.
"""
fan_in = tensor.shape[1]
with torch.no_grad():
data = torch.logspace(
start, end, fan_in, device=tensor.device, dtype=tensor.dtype
).expand_as(tensor)
tensor.copy_(data)
return tensor
def get_initializer(name, **init_kwargs):
name = name.lower()
if name == "heorthogonal":
initializer = he_orthogonal_init
elif name == "zeros":
initializer = torch.nn.init.zeros_
elif name == "grid":
initializer = grid_init
elif name == "loggrid":
initializer = log_grid_init
else:
raise UserWarning(f"Unknown initializer: {name}")
initializer = partial(initializer, **init_kwargs)
return initializer
| 2,765 | 27.8125 | 137 | py |
ocp | ocp-main/ocpmodels/models/gemnet_oc/interaction_indices.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import torch
from torch_scatter import segment_coo
from torch_sparse import SparseTensor
from .utils import get_inner_idx, masked_select_sparsetensor_flat
def get_triplets(graph, num_atoms: int):
"""
Get all input edges b->a for each output edge c->a.
It is possible that b=c, as long as the edges are distinct
(i.e. atoms b and c stem from different unit cells).
Arguments
---------
graph: dict of torch.Tensor
Contains the graph's edge_index.
num_atoms: int
Total number of atoms.
Returns
-------
Dictionary containing the entries:
in: torch.Tensor, shape (num_triplets,)
Indices of input edge b->a of each triplet b->a<-c
out: torch.Tensor, shape (num_triplets,)
Indices of output edge c->a of each triplet b->a<-c
out_agg: torch.Tensor, shape (num_triplets,)
Indices enumerating the intermediate edges of each output edge.
Used for creating a padded matrix and aggregating via matmul.
"""
idx_s, idx_t = graph["edge_index"] # c->a (source=c, target=a)
num_edges = idx_s.size(0)
value = torch.arange(num_edges, device=idx_s.device, dtype=idx_s.dtype)
# Possibly contains multiple copies of the same edge (for periodic interactions)
adj = SparseTensor(
row=idx_t,
col=idx_s,
value=value,
sparse_sizes=(num_atoms, num_atoms),
)
adj_edges = adj[idx_t]
# Edge indices (b->a, c->a) for triplets.
idx = {}
idx["in"] = adj_edges.storage.value()
idx["out"] = adj_edges.storage.row()
# Remove self-loop triplets
# Compare edge indices, not atom indices to correctly handle periodic interactions
mask = idx["in"] != idx["out"]
idx["in"] = idx["in"][mask]
idx["out"] = idx["out"][mask]
# idx['out'] has to be sorted for this
idx["out_agg"] = get_inner_idx(idx["out"], dim_size=num_edges)
return idx
def get_mixed_triplets(
graph_in,
graph_out,
num_atoms,
to_outedge=False,
return_adj=False,
return_agg_idx=False,
):
"""
Get all output edges (ingoing or outgoing) for each incoming edge.
It is possible that in atom=out atom, as long as the edges are distinct
(i.e. they stem from different unit cells). In edges and out edges stem
from separate graphs (hence "mixed") with shared atoms.
Arguments
---------
graph_in: dict of torch.Tensor
Contains the input graph's edge_index and cell_offset.
graph_out: dict of torch.Tensor
Contains the output graph's edge_index and cell_offset.
Input and output graphs use the same atoms, but different edges.
num_atoms: int
Total number of atoms.
to_outedge: bool
Whether to map the output to the atom's outgoing edges a->c
instead of the ingoing edges c->a.
return_adj: bool
Whether to output the adjacency (incidence) matrix between output
edges and atoms adj_edges.
return_agg_idx: bool
Whether to output the indices enumerating the intermediate edges
of each output edge.
Returns
-------
Dictionary containing the entries:
in: torch.Tensor, shape (num_triplets,)
Indices of input edges
out: torch.Tensor, shape (num_triplets,)
Indices of output edges
adj_edges: SparseTensor, shape (num_edges, num_atoms)
Adjacency (incidence) matrix between output edges and atoms,
with values specifying the input edges.
Only returned if return_adj is True.
out_agg: torch.Tensor, shape (num_triplets,)
Indices enumerating the intermediate edges of each output edge.
Used for creating a padded matrix and aggregating via matmul.
Only returned if return_agg_idx is True.
"""
idx_out_s, idx_out_t = graph_out["edge_index"]
# c->a (source=c, target=a)
idx_in_s, idx_in_t = graph_in["edge_index"]
num_edges = idx_out_s.size(0)
value_in = torch.arange(
idx_in_s.size(0), device=idx_in_s.device, dtype=idx_in_s.dtype
)
# This exploits that SparseTensor can have multiple copies of the same edge!
adj_in = SparseTensor(
row=idx_in_t,
col=idx_in_s,
value=value_in,
sparse_sizes=(num_atoms, num_atoms),
)
if to_outedge:
adj_edges = adj_in[idx_out_s]
else:
adj_edges = adj_in[idx_out_t]
# Edge indices (b->a, c->a) for triplets.
idx_in = adj_edges.storage.value()
idx_out = adj_edges.storage.row()
# Remove self-loop triplets c->a<-c or c<-a<-c
# Check atom as well as cell offset
if to_outedge:
idx_atom_in = idx_in_s[idx_in]
idx_atom_out = idx_out_t[idx_out]
cell_offsets_sum = (
graph_out["cell_offset"][idx_out] + graph_in["cell_offset"][idx_in]
)
else:
idx_atom_in = idx_in_s[idx_in]
idx_atom_out = idx_out_s[idx_out]
cell_offsets_sum = (
graph_out["cell_offset"][idx_out] - graph_in["cell_offset"][idx_in]
)
mask = (idx_atom_in != idx_atom_out) | torch.any(
cell_offsets_sum != 0, dim=-1
)
idx = {}
if return_adj:
idx["adj_edges"] = masked_select_sparsetensor_flat(adj_edges, mask)
idx["in"] = idx["adj_edges"].storage.value().clone()
idx["out"] = idx["adj_edges"].storage.row()
else:
idx["in"] = idx_in[mask]
idx["out"] = idx_out[mask]
if return_agg_idx:
# idx['out'] has to be sorted
idx["out_agg"] = get_inner_idx(idx["out"], dim_size=num_edges)
return idx
def get_quadruplets(
main_graph,
qint_graph,
num_atoms,
):
"""
Get all d->b for each edge c->a and connection b->a
Careful about periodic images!
Separate interaction cutoff not supported.
Arguments
---------
main_graph: dict of torch.Tensor
Contains the main graph's edge_index and cell_offset.
The main graph defines which edges are embedded.
qint_graph: dict of torch.Tensor
Contains the quadruplet interaction graph's edge_index and
cell_offset. main_graph and qint_graph use the same atoms,
but different edges.
num_atoms: int
Total number of atoms.
Returns
-------
Dictionary containing the entries:
triplet_in['in']: torch.Tensor, shape (nTriplets,)
Indices of input edge d->b in triplet d->b->a.
triplet_in['out']: torch.Tensor, shape (nTriplets,)
Interaction indices of output edge b->a in triplet d->b->a.
triplet_out['in']: torch.Tensor, shape (nTriplets,)
Interaction indices of input edge b->a in triplet c->a<-b.
triplet_out['out']: torch.Tensor, shape (nTriplets,)
Indices of output edge c->a in triplet c->a<-b.
out: torch.Tensor, shape (nQuadruplets,)
Indices of output edge c->a in quadruplet
trip_in_to_quad: torch.Tensor, shape (nQuadruplets,)
Indices to map from input triplet d->b->a
to quadruplet d->b->a<-c.
trip_out_to_quad: torch.Tensor, shape (nQuadruplets,)
Indices to map from output triplet c->a<-b
to quadruplet d->b->a<-c.
out_agg: torch.Tensor, shape (num_triplets,)
Indices enumerating the intermediate edges of each output edge.
Used for creating a padded matrix and aggregating via matmul.
"""
idx_s, _ = main_graph["edge_index"]
idx_qint_s, _ = qint_graph["edge_index"]
# c->a (source=c, target=a)
num_edges = idx_s.size(0)
idx = {}
idx["triplet_in"] = get_mixed_triplets(
main_graph,
qint_graph,
num_atoms,
to_outedge=True,
return_adj=True,
)
# Input triplets d->b->a
idx["triplet_out"] = get_mixed_triplets(
qint_graph,
main_graph,
num_atoms,
to_outedge=False,
)
# Output triplets c->a<-b
# ---------------- Quadruplets -----------------
# Repeat indices by counting the number of input triplets per
# intermediate edge ba. segment_coo assumes sorted idx['triplet_in']['out']
ones = (
idx["triplet_in"]["out"]
.new_ones(1)
.expand_as(idx["triplet_in"]["out"])
)
num_trip_in_per_inter = segment_coo(
ones, idx["triplet_in"]["out"], dim_size=idx_qint_s.size(0)
)
num_trip_out_per_inter = num_trip_in_per_inter[idx["triplet_out"]["in"]]
idx["out"] = torch.repeat_interleave(
idx["triplet_out"]["out"], num_trip_out_per_inter
)
idx_inter = torch.repeat_interleave(
idx["triplet_out"]["in"], num_trip_out_per_inter
)
idx["trip_out_to_quad"] = torch.repeat_interleave(
torch.arange(
len(idx["triplet_out"]["out"]),
device=idx_s.device,
dtype=idx_s.dtype,
),
num_trip_out_per_inter,
)
# Generate input indices by using the adjacency
# matrix idx['triplet_in']['adj_edges']
idx["triplet_in"]["adj_edges"].set_value_(
torch.arange(
len(idx["triplet_in"]["in"]),
device=idx_s.device,
dtype=idx_s.dtype,
),
layout="coo",
)
adj_trip_in_per_trip_out = idx["triplet_in"]["adj_edges"][
idx["triplet_out"]["in"]
]
# Rows in adj_trip_in_per_trip_out are intermediate edges ba
idx["trip_in_to_quad"] = adj_trip_in_per_trip_out.storage.value()
idx_in = idx["triplet_in"]["in"][idx["trip_in_to_quad"]]
# Remove quadruplets with c == d
# Triplets should already ensure that a != d and b != c
# Compare atom indices and cell offsets
idx_atom_c = idx_s[idx["out"]]
idx_atom_d = idx_s[idx_in]
cell_offset_cd = (
main_graph["cell_offset"][idx_in]
+ qint_graph["cell_offset"][idx_inter]
- main_graph["cell_offset"][idx["out"]]
)
mask_cd = (idx_atom_c != idx_atom_d) | torch.any(
cell_offset_cd != 0, dim=-1
)
idx["out"] = idx["out"][mask_cd]
idx["trip_out_to_quad"] = idx["trip_out_to_quad"][mask_cd]
idx["trip_in_to_quad"] = idx["trip_in_to_quad"][mask_cd]
# idx['out'] has to be sorted for this
idx["out_agg"] = get_inner_idx(idx["out"], dim_size=num_edges)
return idx
| 10,507 | 32.787781 | 86 | py |
ocp | ocp-main/ocpmodels/models/gemnet_oc/gemnet_oc.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import logging
import os
from typing import Dict, Optional, Union
import numpy as np
import torch
from torch_geometric.nn import radius_graph
from torch_scatter import scatter, segment_coo
from ocpmodels.common.registry import registry
from ocpmodels.common.utils import (
compute_neighbors,
conditional_grad,
get_max_neighbors_mask,
get_pbc_distances,
radius_graph_pbc,
scatter_det,
)
from ocpmodels.models.base import BaseModel
from ocpmodels.modules.scaling.compat import load_scales_compat
from .initializers import get_initializer
from .interaction_indices import (
get_mixed_triplets,
get_quadruplets,
get_triplets,
)
from .layers.atom_update_block import OutputBlock
from .layers.base_layers import Dense, ResidualLayer
from .layers.efficient import BasisEmbedding
from .layers.embedding_block import AtomEmbedding, EdgeEmbedding
from .layers.force_scaler import ForceScaler
from .layers.interaction_block import InteractionBlock
from .layers.radial_basis import RadialBasis
from .layers.spherical_basis import CircularBasisLayer, SphericalBasisLayer
from .utils import (
get_angle,
get_edge_id,
get_inner_idx,
inner_product_clamped,
mask_neighbors,
repeat_blocks,
)
@registry.register_model("gemnet_oc")
class GemNetOC(BaseModel):
"""
Arguments
---------
num_atoms (int): Unused argument
bond_feat_dim (int): Unused argument
num_targets: int
Number of prediction targets.
num_spherical: int
Controls maximum frequency.
num_radial: int
Controls maximum frequency.
num_blocks: int
Number of building blocks to be stacked.
emb_size_atom: int
Embedding size of the atoms.
emb_size_edge: int
Embedding size of the edges.
emb_size_trip_in: int
(Down-projected) embedding size of the quadruplet edge embeddings
before the bilinear layer.
emb_size_trip_out: int
(Down-projected) embedding size of the quadruplet edge embeddings
after the bilinear layer.
emb_size_quad_in: int
(Down-projected) embedding size of the quadruplet edge embeddings
before the bilinear layer.
emb_size_quad_out: int
(Down-projected) embedding size of the quadruplet edge embeddings
after the bilinear layer.
emb_size_aint_in: int
Embedding size in the atom interaction before the bilinear layer.
emb_size_aint_out: int
Embedding size in the atom interaction after the bilinear layer.
emb_size_rbf: int
Embedding size of the radial basis transformation.
emb_size_cbf: int
Embedding size of the circular basis transformation (one angle).
emb_size_sbf: int
Embedding size of the spherical basis transformation (two angles).
num_before_skip: int
Number of residual blocks before the first skip connection.
num_after_skip: int
Number of residual blocks after the first skip connection.
num_concat: int
Number of residual blocks after the concatenation.
num_atom: int
Number of residual blocks in the atom embedding blocks.
num_output_afteratom: int
Number of residual blocks in the output blocks
after adding the atom embedding.
num_atom_emb_layers: int
Number of residual blocks for transforming atom embeddings.
num_global_out_layers: int
Number of final residual blocks before the output.
regress_forces: bool
Whether to predict forces. Default: True
direct_forces: bool
If True predict forces based on aggregation of interatomic directions.
If False predict forces based on negative gradient of energy potential.
use_pbc: bool
Whether to use periodic boundary conditions.
scale_backprop_forces: bool
Whether to scale up the energy and then scales down the forces
to prevent NaNs and infs in backpropagated forces.
cutoff: float
Embedding cutoff for interatomic connections and embeddings in Angstrom.
cutoff_qint: float
Quadruplet interaction cutoff in Angstrom.
Optional. Uses cutoff per default.
cutoff_aeaint: float
Edge-to-atom and atom-to-edge interaction cutoff in Angstrom.
Optional. Uses cutoff per default.
cutoff_aint: float
Atom-to-atom interaction cutoff in Angstrom.
Optional. Uses maximum of all other cutoffs per default.
max_neighbors: int
Maximum number of neighbors for interatomic connections and embeddings.
max_neighbors_qint: int
Maximum number of quadruplet interactions per embedding.
Optional. Uses max_neighbors per default.
max_neighbors_aeaint: int
Maximum number of edge-to-atom and atom-to-edge interactions per embedding.
Optional. Uses max_neighbors per default.
max_neighbors_aint: int
Maximum number of atom-to-atom interactions per atom.
Optional. Uses maximum of all other neighbors per default.
enforce_max_neighbors_strictly: bool
When subselected edges based on max_neighbors args, arbitrarily
select amongst degenerate edges to have exactly the correct number.
rbf: dict
Name and hyperparameters of the radial basis function.
rbf_spherical: dict
Name and hyperparameters of the radial basis function used as part of the
circular and spherical bases.
Optional. Uses rbf per default.
envelope: dict
Name and hyperparameters of the envelope function.
cbf: dict
Name and hyperparameters of the circular basis function.
sbf: dict
Name and hyperparameters of the spherical basis function.
extensive: bool
Whether the output should be extensive (proportional to the number of atoms)
forces_coupled: bool
If True, enforce that |F_st| = |F_ts|. No effect if direct_forces is False.
output_init: str
Initialization method for the final dense layer.
activation: str
Name of the activation function.
scale_file: str
Path to the pytorch file containing the scaling factors.
quad_interaction: bool
Whether to use quadruplet interactions (with dihedral angles)
atom_edge_interaction: bool
Whether to use atom-to-edge interactions
edge_atom_interaction: bool
Whether to use edge-to-atom interactions
atom_interaction: bool
Whether to use atom-to-atom interactions
scale_basis: bool
Whether to use a scaling layer in the raw basis function for better
numerical stability.
qint_tags: list
Which atom tags to use quadruplet interactions for.
0=sub-surface bulk, 1=surface, 2=adsorbate atoms.
"""
def __init__(
self,
num_atoms: Optional[int],
bond_feat_dim: int,
num_targets: int,
num_spherical: int,
num_radial: int,
num_blocks: int,
emb_size_atom: int,
emb_size_edge: int,
emb_size_trip_in: int,
emb_size_trip_out: int,
emb_size_quad_in: int,
emb_size_quad_out: int,
emb_size_aint_in: int,
emb_size_aint_out: int,
emb_size_rbf: int,
emb_size_cbf: int,
emb_size_sbf: int,
num_before_skip: int,
num_after_skip: int,
num_concat: int,
num_atom: int,
num_output_afteratom: int,
num_atom_emb_layers: int = 0,
num_global_out_layers: int = 2,
regress_forces: bool = True,
direct_forces: bool = False,
use_pbc: bool = True,
scale_backprop_forces: bool = False,
cutoff: float = 6.0,
cutoff_qint: Optional[float] = None,
cutoff_aeaint: Optional[float] = None,
cutoff_aint: Optional[float] = None,
max_neighbors: int = 50,
max_neighbors_qint: Optional[int] = None,
max_neighbors_aeaint: Optional[int] = None,
max_neighbors_aint: Optional[int] = None,
enforce_max_neighbors_strictly: bool = True,
rbf: Dict[str, str] = {"name": "gaussian"},
rbf_spherical: Optional[dict] = None,
envelope: Dict[str, Union[str, int]] = {
"name": "polynomial",
"exponent": 5,
},
cbf: Dict[str, str] = {"name": "spherical_harmonics"},
sbf: Dict[str, str] = {"name": "spherical_harmonics"},
extensive: bool = True,
forces_coupled: bool = False,
output_init: str = "HeOrthogonal",
activation: str = "silu",
quad_interaction: bool = False,
atom_edge_interaction: bool = False,
edge_atom_interaction: bool = False,
atom_interaction: bool = False,
scale_basis: bool = False,
qint_tags: list = [0, 1, 2],
num_elements: int = 83,
otf_graph: bool = False,
scale_file: Optional[str] = None,
**kwargs, # backwards compatibility with deprecated arguments
) -> None:
super().__init__()
if len(kwargs) > 0:
logging.warning(f"Unrecognized arguments: {list(kwargs.keys())}")
self.num_targets = num_targets
assert num_blocks > 0
self.num_blocks = num_blocks
self.extensive = extensive
self.atom_edge_interaction = atom_edge_interaction
self.edge_atom_interaction = edge_atom_interaction
self.atom_interaction = atom_interaction
self.quad_interaction = quad_interaction
self.qint_tags = torch.tensor(qint_tags)
self.otf_graph = otf_graph
if not rbf_spherical:
rbf_spherical = rbf
self.set_cutoffs(cutoff, cutoff_qint, cutoff_aeaint, cutoff_aint)
self.set_max_neighbors(
max_neighbors,
max_neighbors_qint,
max_neighbors_aeaint,
max_neighbors_aint,
)
self.enforce_max_neighbors_strictly = enforce_max_neighbors_strictly
self.use_pbc = use_pbc
self.direct_forces = direct_forces
self.forces_coupled = forces_coupled
self.regress_forces = regress_forces
self.force_scaler = ForceScaler(enabled=scale_backprop_forces)
self.init_basis_functions(
num_radial,
num_spherical,
rbf,
rbf_spherical,
envelope,
cbf,
sbf,
scale_basis,
)
self.init_shared_basis_layers(
num_radial, num_spherical, emb_size_rbf, emb_size_cbf, emb_size_sbf
)
# Embedding blocks
self.atom_emb = AtomEmbedding(emb_size_atom, num_elements)
self.edge_emb = EdgeEmbedding(
emb_size_atom, num_radial, emb_size_edge, activation=activation
)
# Interaction Blocks
int_blocks = []
for _ in range(num_blocks):
int_blocks.append(
InteractionBlock(
emb_size_atom=emb_size_atom,
emb_size_edge=emb_size_edge,
emb_size_trip_in=emb_size_trip_in,
emb_size_trip_out=emb_size_trip_out,
emb_size_quad_in=emb_size_quad_in,
emb_size_quad_out=emb_size_quad_out,
emb_size_a2a_in=emb_size_aint_in,
emb_size_a2a_out=emb_size_aint_out,
emb_size_rbf=emb_size_rbf,
emb_size_cbf=emb_size_cbf,
emb_size_sbf=emb_size_sbf,
num_before_skip=num_before_skip,
num_after_skip=num_after_skip,
num_concat=num_concat,
num_atom=num_atom,
num_atom_emb_layers=num_atom_emb_layers,
quad_interaction=quad_interaction,
atom_edge_interaction=atom_edge_interaction,
edge_atom_interaction=edge_atom_interaction,
atom_interaction=atom_interaction,
activation=activation,
)
)
self.int_blocks = torch.nn.ModuleList(int_blocks)
out_blocks = []
for _ in range(num_blocks + 1):
out_blocks.append(
OutputBlock(
emb_size_atom=emb_size_atom,
emb_size_edge=emb_size_edge,
emb_size_rbf=emb_size_rbf,
nHidden=num_atom,
nHidden_afteratom=num_output_afteratom,
activation=activation,
direct_forces=direct_forces,
)
)
self.out_blocks = torch.nn.ModuleList(out_blocks)
out_mlp_E = [
Dense(
emb_size_atom * (num_blocks + 1),
emb_size_atom,
activation=activation,
)
] + [
ResidualLayer(
emb_size_atom,
activation=activation,
)
for _ in range(num_global_out_layers)
]
self.out_mlp_E = torch.nn.Sequential(*out_mlp_E)
self.out_energy = Dense(
emb_size_atom, num_targets, bias=False, activation=None
)
if direct_forces:
out_mlp_F = [
Dense(
emb_size_edge * (num_blocks + 1),
emb_size_edge,
activation=activation,
)
] + [
ResidualLayer(
emb_size_edge,
activation=activation,
)
for _ in range(num_global_out_layers)
]
self.out_mlp_F = torch.nn.Sequential(*out_mlp_F)
self.out_forces = Dense(
emb_size_edge, num_targets, bias=False, activation=None
)
out_initializer = get_initializer(output_init)
self.out_energy.reset_parameters(out_initializer)
if direct_forces:
self.out_forces.reset_parameters(out_initializer)
load_scales_compat(self, scale_file)
def set_cutoffs(self, cutoff, cutoff_qint, cutoff_aeaint, cutoff_aint):
self.cutoff = cutoff
if (
not (self.atom_edge_interaction or self.edge_atom_interaction)
or cutoff_aeaint is None
):
self.cutoff_aeaint = self.cutoff
else:
self.cutoff_aeaint = cutoff_aeaint
if not self.quad_interaction or cutoff_qint is None:
self.cutoff_qint = self.cutoff
else:
self.cutoff_qint = cutoff_qint
if not self.atom_interaction or cutoff_aint is None:
self.cutoff_aint = max(
self.cutoff,
self.cutoff_aeaint,
self.cutoff_qint,
)
else:
self.cutoff_aint = cutoff_aint
assert self.cutoff <= self.cutoff_aint
assert self.cutoff_aeaint <= self.cutoff_aint
assert self.cutoff_qint <= self.cutoff_aint
def set_max_neighbors(
self,
max_neighbors,
max_neighbors_qint,
max_neighbors_aeaint,
max_neighbors_aint,
):
self.max_neighbors = max_neighbors
if (
not (self.atom_edge_interaction or self.edge_atom_interaction)
or max_neighbors_aeaint is None
):
self.max_neighbors_aeaint = self.max_neighbors
else:
self.max_neighbors_aeaint = max_neighbors_aeaint
if not self.quad_interaction or max_neighbors_qint is None:
self.max_neighbors_qint = self.max_neighbors
else:
self.max_neighbors_qint = max_neighbors_qint
if not self.atom_interaction or max_neighbors_aint is None:
self.max_neighbors_aint = max(
self.max_neighbors,
self.max_neighbors_aeaint,
self.max_neighbors_qint,
)
else:
self.max_neighbors_aint = max_neighbors_aint
assert self.max_neighbors <= self.max_neighbors_aint
assert self.max_neighbors_aeaint <= self.max_neighbors_aint
assert self.max_neighbors_qint <= self.max_neighbors_aint
def init_basis_functions(
self,
num_radial,
num_spherical,
rbf,
rbf_spherical,
envelope,
cbf,
sbf,
scale_basis,
):
self.radial_basis = RadialBasis(
num_radial=num_radial,
cutoff=self.cutoff,
rbf=rbf,
envelope=envelope,
scale_basis=scale_basis,
)
radial_basis_spherical = RadialBasis(
num_radial=num_radial,
cutoff=self.cutoff,
rbf=rbf_spherical,
envelope=envelope,
scale_basis=scale_basis,
)
if self.quad_interaction:
radial_basis_spherical_qint = RadialBasis(
num_radial=num_radial,
cutoff=self.cutoff_qint,
rbf=rbf_spherical,
envelope=envelope,
scale_basis=scale_basis,
)
self.cbf_basis_qint = CircularBasisLayer(
num_spherical,
radial_basis=radial_basis_spherical_qint,
cbf=cbf,
scale_basis=scale_basis,
)
self.sbf_basis_qint = SphericalBasisLayer(
num_spherical,
radial_basis=radial_basis_spherical,
sbf=sbf,
scale_basis=scale_basis,
)
if self.atom_edge_interaction:
self.radial_basis_aeaint = RadialBasis(
num_radial=num_radial,
cutoff=self.cutoff_aeaint,
rbf=rbf,
envelope=envelope,
scale_basis=scale_basis,
)
self.cbf_basis_aeint = CircularBasisLayer(
num_spherical,
radial_basis=radial_basis_spherical,
cbf=cbf,
scale_basis=scale_basis,
)
if self.edge_atom_interaction:
self.radial_basis_aeaint = RadialBasis(
num_radial=num_radial,
cutoff=self.cutoff_aeaint,
rbf=rbf,
envelope=envelope,
scale_basis=scale_basis,
)
radial_basis_spherical_aeaint = RadialBasis(
num_radial=num_radial,
cutoff=self.cutoff_aeaint,
rbf=rbf_spherical,
envelope=envelope,
scale_basis=scale_basis,
)
self.cbf_basis_eaint = CircularBasisLayer(
num_spherical,
radial_basis=radial_basis_spherical_aeaint,
cbf=cbf,
scale_basis=scale_basis,
)
if self.atom_interaction:
self.radial_basis_aint = RadialBasis(
num_radial=num_radial,
cutoff=self.cutoff_aint,
rbf=rbf,
envelope=envelope,
scale_basis=scale_basis,
)
self.cbf_basis_tint = CircularBasisLayer(
num_spherical,
radial_basis=radial_basis_spherical,
cbf=cbf,
scale_basis=scale_basis,
)
def init_shared_basis_layers(
self,
num_radial,
num_spherical,
emb_size_rbf,
emb_size_cbf,
emb_size_sbf,
):
# Share basis down projections across all interaction blocks
if self.quad_interaction:
self.mlp_rbf_qint = Dense(
num_radial,
emb_size_rbf,
activation=None,
bias=False,
)
self.mlp_cbf_qint = BasisEmbedding(
num_radial, emb_size_cbf, num_spherical
)
self.mlp_sbf_qint = BasisEmbedding(
num_radial, emb_size_sbf, num_spherical**2
)
if self.atom_edge_interaction:
self.mlp_rbf_aeint = Dense(
num_radial,
emb_size_rbf,
activation=None,
bias=False,
)
self.mlp_cbf_aeint = BasisEmbedding(
num_radial, emb_size_cbf, num_spherical
)
if self.edge_atom_interaction:
self.mlp_rbf_eaint = Dense(
num_radial,
emb_size_rbf,
activation=None,
bias=False,
)
self.mlp_cbf_eaint = BasisEmbedding(
num_radial, emb_size_cbf, num_spherical
)
if self.atom_interaction:
self.mlp_rbf_aint = BasisEmbedding(num_radial, emb_size_rbf)
self.mlp_rbf_tint = Dense(
num_radial,
emb_size_rbf,
activation=None,
bias=False,
)
self.mlp_cbf_tint = BasisEmbedding(
num_radial, emb_size_cbf, num_spherical
)
# Share the dense Layer of the atom embedding block accross the interaction blocks
self.mlp_rbf_h = Dense(
num_radial,
emb_size_rbf,
activation=None,
bias=False,
)
self.mlp_rbf_out = Dense(
num_radial,
emb_size_rbf,
activation=None,
bias=False,
)
# Set shared parameters for better gradients
self.shared_parameters = [
(self.mlp_rbf_tint.linear.weight, self.num_blocks),
(self.mlp_cbf_tint.weight, self.num_blocks),
(self.mlp_rbf_h.linear.weight, self.num_blocks),
(self.mlp_rbf_out.linear.weight, self.num_blocks + 1),
]
if self.quad_interaction:
self.shared_parameters += [
(self.mlp_rbf_qint.linear.weight, self.num_blocks),
(self.mlp_cbf_qint.weight, self.num_blocks),
(self.mlp_sbf_qint.weight, self.num_blocks),
]
if self.atom_edge_interaction:
self.shared_parameters += [
(self.mlp_rbf_aeint.linear.weight, self.num_blocks),
(self.mlp_cbf_aeint.weight, self.num_blocks),
]
if self.edge_atom_interaction:
self.shared_parameters += [
(self.mlp_rbf_eaint.linear.weight, self.num_blocks),
(self.mlp_cbf_eaint.weight, self.num_blocks),
]
if self.atom_interaction:
self.shared_parameters += [
(self.mlp_rbf_aint.weight, self.num_blocks),
]
def calculate_quad_angles(
self,
V_st,
V_qint_st,
quad_idx,
):
"""Calculate angles for quadruplet-based message passing.
Arguments
---------
V_st: Tensor, shape = (nAtoms, 3)
Normalized directions from s to t
V_qint_st: Tensor, shape = (nAtoms, 3)
Normalized directions from s to t for the quadruplet
interaction graph
quad_idx: dict of torch.Tensor
Indices relevant for quadruplet interactions.
Returns
-------
cosφ_cab: Tensor, shape = (num_triplets_inint,)
Cosine of angle between atoms c -> a <- b.
cosφ_abd: Tensor, shape = (num_triplets_qint,)
Cosine of angle between atoms a -> b -> d.
angle_cabd: Tensor, shape = (num_quadruplets,)
Dihedral angle between atoms c <- a-b -> d.
"""
# ---------------------------------- d -> b -> a ---------------------------------- #
V_ba = V_qint_st[quad_idx["triplet_in"]["out"]]
# (num_triplets_qint, 3)
V_db = V_st[quad_idx["triplet_in"]["in"]]
# (num_triplets_qint, 3)
cosφ_abd = inner_product_clamped(V_ba, V_db)
# (num_triplets_qint,)
# Project for calculating dihedral angle
# Cross product is the same as projection, just 90° rotated
V_db_cross = torch.cross(V_db, V_ba, dim=-1) # a - b -| d
V_db_cross = V_db_cross[quad_idx["trip_in_to_quad"]]
# (num_quadruplets,)
# --------------------------------- c -> a <- b ---------------------------------- #
V_ca = V_st[quad_idx["triplet_out"]["out"]] # (num_triplets_in, 3)
V_ba = V_qint_st[quad_idx["triplet_out"]["in"]] # (num_triplets_in, 3)
cosφ_cab = inner_product_clamped(V_ca, V_ba) # (n4Triplets,)
# Project for calculating dihedral angle
# Cross product is the same as projection, just 90° rotated
V_ca_cross = torch.cross(V_ca, V_ba, dim=-1) # c |- a - b
V_ca_cross = V_ca_cross[quad_idx["trip_out_to_quad"]]
# (num_quadruplets,)
# -------------------------------- c -> a - b <- d -------------------------------- #
half_angle_cabd = get_angle(V_ca_cross, V_db_cross)
# (num_quadruplets,)
angle_cabd = half_angle_cabd
# Ignore parity and just use the half angle.
return cosφ_cab, cosφ_abd, angle_cabd
def select_symmetric_edges(self, tensor, mask, reorder_idx, opposite_neg):
"""Use a mask to remove values of removed edges and then
duplicate the values for the correct edge direction.
Arguments
---------
tensor: torch.Tensor
Values to symmetrize for the new tensor.
mask: torch.Tensor
Mask defining which edges go in the correct direction.
reorder_idx: torch.Tensor
Indices defining how to reorder the tensor values after
concatenating the edge values of both directions.
opposite_neg: bool
Whether the edge in the opposite direction should use the
negative tensor value.
Returns
-------
tensor_ordered: torch.Tensor
A tensor with symmetrized values.
"""
# Mask out counter-edges
tensor_directed = tensor[mask]
# Concatenate counter-edges after normal edges
sign = 1 - 2 * opposite_neg
tensor_cat = torch.cat([tensor_directed, sign * tensor_directed])
# Reorder everything so the edges of every image are consecutive
tensor_ordered = tensor_cat[reorder_idx]
return tensor_ordered
def symmetrize_edges(
self,
graph,
batch_idx,
):
"""
Symmetrize edges to ensure existence of counter-directional edges.
Some edges are only present in one direction in the data,
since every atom has a maximum number of neighbors.
We only use i->j edges here. So we lose some j->i edges
and add others by making it symmetric.
"""
num_atoms = batch_idx.shape[0]
new_graph = {}
# Generate mask
mask_sep_atoms = graph["edge_index"][0] < graph["edge_index"][1]
# Distinguish edges between the same (periodic) atom by ordering the cells
cell_earlier = (
(graph["cell_offset"][:, 0] < 0)
| (
(graph["cell_offset"][:, 0] == 0)
& (graph["cell_offset"][:, 1] < 0)
)
| (
(graph["cell_offset"][:, 0] == 0)
& (graph["cell_offset"][:, 1] == 0)
& (graph["cell_offset"][:, 2] < 0)
)
)
mask_same_atoms = graph["edge_index"][0] == graph["edge_index"][1]
mask_same_atoms &= cell_earlier
mask = mask_sep_atoms | mask_same_atoms
# Mask out counter-edges
edge_index_directed = graph["edge_index"][
mask[None, :].expand(2, -1)
].view(2, -1)
# Concatenate counter-edges after normal edges
edge_index_cat = torch.cat(
[edge_index_directed, edge_index_directed.flip(0)],
dim=1,
)
# Count remaining edges per image
batch_edge = torch.repeat_interleave(
torch.arange(
graph["num_neighbors"].size(0),
device=graph["edge_index"].device,
),
graph["num_neighbors"],
)
batch_edge = batch_edge[mask]
# segment_coo assumes sorted batch_edge
# Factor 2 since this is only one half of the edges
ones = batch_edge.new_ones(1).expand_as(batch_edge)
new_graph["num_neighbors"] = 2 * segment_coo(
ones, batch_edge, dim_size=graph["num_neighbors"].size(0)
)
# Create indexing array
edge_reorder_idx = repeat_blocks(
torch.div(new_graph["num_neighbors"], 2, rounding_mode="floor"),
repeats=2,
continuous_indexing=True,
repeat_inc=edge_index_directed.size(1),
)
# Reorder everything so the edges of every image are consecutive
new_graph["edge_index"] = edge_index_cat[:, edge_reorder_idx]
new_graph["cell_offset"] = self.select_symmetric_edges(
graph["cell_offset"], mask, edge_reorder_idx, True
)
new_graph["distance"] = self.select_symmetric_edges(
graph["distance"], mask, edge_reorder_idx, False
)
new_graph["vector"] = self.select_symmetric_edges(
graph["vector"], mask, edge_reorder_idx, True
)
# Indices for swapping c->a and a->c (for symmetric MP)
# To obtain these efficiently and without any index assumptions,
# we get order the counter-edge IDs and then
# map this order back to the edge IDs.
# Double argsort gives the desired mapping
# from the ordered tensor to the original tensor.
edge_ids = get_edge_id(
new_graph["edge_index"], new_graph["cell_offset"], num_atoms
)
order_edge_ids = torch.argsort(edge_ids)
inv_order_edge_ids = torch.argsort(order_edge_ids)
edge_ids_counter = get_edge_id(
new_graph["edge_index"].flip(0),
-new_graph["cell_offset"],
num_atoms,
)
order_edge_ids_counter = torch.argsort(edge_ids_counter)
id_swap = order_edge_ids_counter[inv_order_edge_ids]
return new_graph, id_swap
def subselect_edges(
self,
data,
graph,
cutoff=None,
max_neighbors=None,
):
"""Subselect edges using a stricter cutoff and max_neighbors."""
subgraph = graph.copy()
if cutoff is not None:
edge_mask = subgraph["distance"] <= cutoff
subgraph["edge_index"] = subgraph["edge_index"][:, edge_mask]
subgraph["cell_offset"] = subgraph["cell_offset"][edge_mask]
subgraph["num_neighbors"] = mask_neighbors(
subgraph["num_neighbors"], edge_mask
)
subgraph["distance"] = subgraph["distance"][edge_mask]
subgraph["vector"] = subgraph["vector"][edge_mask]
if max_neighbors is not None:
edge_mask, subgraph["num_neighbors"] = get_max_neighbors_mask(
natoms=data.natoms,
index=subgraph["edge_index"][1],
atom_distance=subgraph["distance"],
max_num_neighbors_threshold=max_neighbors,
enforce_max_strictly=self.enforce_max_neighbors_strictly,
)
if not torch.all(edge_mask):
subgraph["edge_index"] = subgraph["edge_index"][:, edge_mask]
subgraph["cell_offset"] = subgraph["cell_offset"][edge_mask]
subgraph["distance"] = subgraph["distance"][edge_mask]
subgraph["vector"] = subgraph["vector"][edge_mask]
empty_image = subgraph["num_neighbors"] == 0
if torch.any(empty_image):
raise ValueError(
f"An image has no neighbors: id={data.id[empty_image]}, "
f"sid={data.sid[empty_image]}, fid={data.fid[empty_image]}"
)
return subgraph
def generate_graph_dict(self, data, cutoff, max_neighbors):
"""Generate a radius/nearest neighbor graph."""
otf_graph = cutoff > 6 or max_neighbors > 50 or self.otf_graph
(
edge_index,
edge_dist,
distance_vec,
cell_offsets,
_, # cell offset distances
num_neighbors,
) = self.generate_graph(
data,
cutoff=cutoff,
max_neighbors=max_neighbors,
otf_graph=otf_graph,
)
# These vectors actually point in the opposite direction.
# But we want to use col as idx_t for efficient aggregation.
edge_vector = -distance_vec / edge_dist[:, None]
cell_offsets = -cell_offsets # a - c + offset
graph = {
"edge_index": edge_index,
"distance": edge_dist,
"vector": edge_vector,
"cell_offset": cell_offsets,
"num_neighbors": num_neighbors,
}
# Mask interaction edges if required
if otf_graph or np.isclose(cutoff, 6):
select_cutoff = None
else:
select_cutoff = cutoff
if otf_graph or max_neighbors == 50:
select_neighbors = None
else:
select_neighbors = max_neighbors
graph = self.subselect_edges(
data=data,
graph=graph,
cutoff=select_cutoff,
max_neighbors=select_neighbors,
)
return graph
def subselect_graph(
self,
data,
graph,
cutoff,
max_neighbors,
cutoff_orig,
max_neighbors_orig,
):
"""If the new cutoff and max_neighbors is different from the original,
subselect the edges of a given graph.
"""
# Check if embedding edges are different from interaction edges
if np.isclose(cutoff, cutoff_orig):
select_cutoff = None
else:
select_cutoff = cutoff
if max_neighbors == max_neighbors_orig:
select_neighbors = None
else:
select_neighbors = max_neighbors
return self.subselect_edges(
data=data,
graph=graph,
cutoff=select_cutoff,
max_neighbors=select_neighbors,
)
def get_graphs_and_indices(self, data):
""" "Generate embedding and interaction graphs and indices."""
num_atoms = data.atomic_numbers.size(0)
# Atom interaction graph is always the largest
if (
self.atom_edge_interaction
or self.edge_atom_interaction
or self.atom_interaction
):
a2a_graph = self.generate_graph_dict(
data, self.cutoff_aint, self.max_neighbors_aint
)
main_graph = self.subselect_graph(
data,
a2a_graph,
self.cutoff,
self.max_neighbors,
self.cutoff_aint,
self.max_neighbors_aint,
)
a2ee2a_graph = self.subselect_graph(
data,
a2a_graph,
self.cutoff_aeaint,
self.max_neighbors_aeaint,
self.cutoff_aint,
self.max_neighbors_aint,
)
else:
main_graph = self.generate_graph_dict(
data, self.cutoff, self.max_neighbors
)
a2a_graph = {}
a2ee2a_graph = {}
if self.quad_interaction:
if (
self.atom_edge_interaction
or self.edge_atom_interaction
or self.atom_interaction
):
qint_graph = self.subselect_graph(
data,
a2a_graph,
self.cutoff_qint,
self.max_neighbors_qint,
self.cutoff_aint,
self.max_neighbors_aint,
)
else:
assert self.cutoff_qint <= self.cutoff
assert self.max_neighbors_qint <= self.max_neighbors
qint_graph = self.subselect_graph(
data,
main_graph,
self.cutoff_qint,
self.max_neighbors_qint,
self.cutoff,
self.max_neighbors,
)
# Only use quadruplets for certain tags
self.qint_tags = self.qint_tags.to(qint_graph["edge_index"].device)
tags_s = data.tags[qint_graph["edge_index"][0]]
tags_t = data.tags[qint_graph["edge_index"][1]]
qint_tag_mask_s = (tags_s[..., None] == self.qint_tags).any(dim=-1)
qint_tag_mask_t = (tags_t[..., None] == self.qint_tags).any(dim=-1)
qint_tag_mask = qint_tag_mask_s | qint_tag_mask_t
qint_graph["edge_index"] = qint_graph["edge_index"][
:, qint_tag_mask
]
qint_graph["cell_offset"] = qint_graph["cell_offset"][
qint_tag_mask, :
]
qint_graph["distance"] = qint_graph["distance"][qint_tag_mask]
qint_graph["vector"] = qint_graph["vector"][qint_tag_mask, :]
del qint_graph["num_neighbors"]
else:
qint_graph = {}
# Symmetrize edges for swapping in symmetric message passing
main_graph, id_swap = self.symmetrize_edges(main_graph, data.batch)
trip_idx_e2e = get_triplets(main_graph, num_atoms=num_atoms)
# Additional indices for quadruplets
if self.quad_interaction:
quad_idx = get_quadruplets(
main_graph,
qint_graph,
num_atoms,
)
else:
quad_idx = {}
if self.atom_edge_interaction:
trip_idx_a2e = get_mixed_triplets(
a2ee2a_graph,
main_graph,
num_atoms=num_atoms,
return_agg_idx=True,
)
else:
trip_idx_a2e = {}
if self.edge_atom_interaction:
trip_idx_e2a = get_mixed_triplets(
main_graph,
a2ee2a_graph,
num_atoms=num_atoms,
return_agg_idx=True,
)
# a2ee2a_graph['edge_index'][1] has to be sorted for this
a2ee2a_graph["target_neighbor_idx"] = get_inner_idx(
a2ee2a_graph["edge_index"][1], dim_size=num_atoms
)
else:
trip_idx_e2a = {}
if self.atom_interaction:
# a2a_graph['edge_index'][1] has to be sorted for this
a2a_graph["target_neighbor_idx"] = get_inner_idx(
a2a_graph["edge_index"][1], dim_size=num_atoms
)
return (
main_graph,
a2a_graph,
a2ee2a_graph,
qint_graph,
id_swap,
trip_idx_e2e,
trip_idx_a2e,
trip_idx_e2a,
quad_idx,
)
def get_bases(
self,
main_graph,
a2a_graph,
a2ee2a_graph,
qint_graph,
trip_idx_e2e,
trip_idx_a2e,
trip_idx_e2a,
quad_idx,
num_atoms,
):
"""Calculate and transform basis functions."""
basis_rad_main_raw = self.radial_basis(main_graph["distance"])
# Calculate triplet angles
cosφ_cab = inner_product_clamped(
main_graph["vector"][trip_idx_e2e["out"]],
main_graph["vector"][trip_idx_e2e["in"]],
)
basis_rad_cir_e2e_raw, basis_cir_e2e_raw = self.cbf_basis_tint(
main_graph["distance"], cosφ_cab
)
if self.quad_interaction:
# Calculate quadruplet angles
cosφ_cab_q, cosφ_abd, angle_cabd = self.calculate_quad_angles(
main_graph["vector"],
qint_graph["vector"],
quad_idx,
)
basis_rad_cir_qint_raw, basis_cir_qint_raw = self.cbf_basis_qint(
qint_graph["distance"], cosφ_abd
)
basis_rad_sph_qint_raw, basis_sph_qint_raw = self.sbf_basis_qint(
main_graph["distance"],
cosφ_cab_q[quad_idx["trip_out_to_quad"]],
angle_cabd,
)
if self.atom_edge_interaction:
basis_rad_a2ee2a_raw = self.radial_basis_aeaint(
a2ee2a_graph["distance"]
)
cosφ_cab_a2e = inner_product_clamped(
main_graph["vector"][trip_idx_a2e["out"]],
a2ee2a_graph["vector"][trip_idx_a2e["in"]],
)
basis_rad_cir_a2e_raw, basis_cir_a2e_raw = self.cbf_basis_aeint(
main_graph["distance"], cosφ_cab_a2e
)
if self.edge_atom_interaction:
cosφ_cab_e2a = inner_product_clamped(
a2ee2a_graph["vector"][trip_idx_e2a["out"]],
main_graph["vector"][trip_idx_e2a["in"]],
)
basis_rad_cir_e2a_raw, basis_cir_e2a_raw = self.cbf_basis_eaint(
a2ee2a_graph["distance"], cosφ_cab_e2a
)
if self.atom_interaction:
basis_rad_a2a_raw = self.radial_basis_aint(a2a_graph["distance"])
# Shared Down Projections
bases_qint = {}
if self.quad_interaction:
bases_qint["rad"] = self.mlp_rbf_qint(basis_rad_main_raw)
bases_qint["cir"] = self.mlp_cbf_qint(
rad_basis=basis_rad_cir_qint_raw,
sph_basis=basis_cir_qint_raw,
idx_sph_outer=quad_idx["triplet_in"]["out"],
)
bases_qint["sph"] = self.mlp_sbf_qint(
rad_basis=basis_rad_sph_qint_raw,
sph_basis=basis_sph_qint_raw,
idx_sph_outer=quad_idx["out"],
idx_sph_inner=quad_idx["out_agg"],
)
bases_a2e = {}
if self.atom_edge_interaction:
bases_a2e["rad"] = self.mlp_rbf_aeint(basis_rad_a2ee2a_raw)
bases_a2e["cir"] = self.mlp_cbf_aeint(
rad_basis=basis_rad_cir_a2e_raw,
sph_basis=basis_cir_a2e_raw,
idx_sph_outer=trip_idx_a2e["out"],
idx_sph_inner=trip_idx_a2e["out_agg"],
)
bases_e2a = {}
if self.edge_atom_interaction:
bases_e2a["rad"] = self.mlp_rbf_eaint(basis_rad_main_raw)
bases_e2a["cir"] = self.mlp_cbf_eaint(
rad_basis=basis_rad_cir_e2a_raw,
sph_basis=basis_cir_e2a_raw,
idx_rad_outer=a2ee2a_graph["edge_index"][1],
idx_rad_inner=a2ee2a_graph["target_neighbor_idx"],
idx_sph_outer=trip_idx_e2a["out"],
idx_sph_inner=trip_idx_e2a["out_agg"],
num_atoms=num_atoms,
)
if self.atom_interaction:
basis_a2a_rad = self.mlp_rbf_aint(
rad_basis=basis_rad_a2a_raw,
idx_rad_outer=a2a_graph["edge_index"][1],
idx_rad_inner=a2a_graph["target_neighbor_idx"],
num_atoms=num_atoms,
)
else:
basis_a2a_rad = None
bases_e2e = {}
bases_e2e["rad"] = self.mlp_rbf_tint(basis_rad_main_raw)
bases_e2e["cir"] = self.mlp_cbf_tint(
rad_basis=basis_rad_cir_e2e_raw,
sph_basis=basis_cir_e2e_raw,
idx_sph_outer=trip_idx_e2e["out"],
idx_sph_inner=trip_idx_e2e["out_agg"],
)
basis_atom_update = self.mlp_rbf_h(basis_rad_main_raw)
basis_output = self.mlp_rbf_out(basis_rad_main_raw)
return (
basis_rad_main_raw,
basis_atom_update,
basis_output,
bases_qint,
bases_e2e,
bases_a2e,
bases_e2a,
basis_a2a_rad,
)
@conditional_grad(torch.enable_grad())
def forward(self, data):
pos = data.pos
batch = data.batch
atomic_numbers = data.atomic_numbers.long()
num_atoms = atomic_numbers.shape[0]
if self.regress_forces and not self.direct_forces:
pos.requires_grad_(True)
(
main_graph,
a2a_graph,
a2ee2a_graph,
qint_graph,
id_swap,
trip_idx_e2e,
trip_idx_a2e,
trip_idx_e2a,
quad_idx,
) = self.get_graphs_and_indices(data)
_, idx_t = main_graph["edge_index"]
(
basis_rad_raw,
basis_atom_update,
basis_output,
bases_qint,
bases_e2e,
bases_a2e,
bases_e2a,
basis_a2a_rad,
) = self.get_bases(
main_graph=main_graph,
a2a_graph=a2a_graph,
a2ee2a_graph=a2ee2a_graph,
qint_graph=qint_graph,
trip_idx_e2e=trip_idx_e2e,
trip_idx_a2e=trip_idx_a2e,
trip_idx_e2a=trip_idx_e2a,
quad_idx=quad_idx,
num_atoms=num_atoms,
)
# Embedding block
h = self.atom_emb(atomic_numbers)
# (nAtoms, emb_size_atom)
m = self.edge_emb(h, basis_rad_raw, main_graph["edge_index"])
# (nEdges, emb_size_edge)
x_E, x_F = self.out_blocks[0](h, m, basis_output, idx_t)
# (nAtoms, emb_size_atom), (nEdges, emb_size_edge)
xs_E, xs_F = [x_E], [x_F]
for i in range(self.num_blocks):
# Interaction block
h, m = self.int_blocks[i](
h=h,
m=m,
bases_qint=bases_qint,
bases_e2e=bases_e2e,
bases_a2e=bases_a2e,
bases_e2a=bases_e2a,
basis_a2a_rad=basis_a2a_rad,
basis_atom_update=basis_atom_update,
edge_index_main=main_graph["edge_index"],
a2ee2a_graph=a2ee2a_graph,
a2a_graph=a2a_graph,
id_swap=id_swap,
trip_idx_e2e=trip_idx_e2e,
trip_idx_a2e=trip_idx_a2e,
trip_idx_e2a=trip_idx_e2a,
quad_idx=quad_idx,
) # (nAtoms, emb_size_atom), (nEdges, emb_size_edge)
x_E, x_F = self.out_blocks[i + 1](h, m, basis_output, idx_t)
# (nAtoms, emb_size_atom), (nEdges, emb_size_edge)
xs_E.append(x_E)
xs_F.append(x_F)
# Global output block for final predictions
x_E = self.out_mlp_E(torch.cat(xs_E, dim=-1))
if self.direct_forces:
x_F = self.out_mlp_F(torch.cat(xs_F, dim=-1))
with torch.cuda.amp.autocast(False):
E_t = self.out_energy(x_E.float())
if self.direct_forces:
F_st = self.out_forces(x_F.float())
nMolecules = torch.max(batch) + 1
if self.extensive:
E_t = scatter_det(
E_t, batch, dim=0, dim_size=nMolecules, reduce="add"
) # (nMolecules, num_targets)
else:
E_t = scatter_det(
E_t, batch, dim=0, dim_size=nMolecules, reduce="mean"
) # (nMolecules, num_targets)
if self.regress_forces:
if self.direct_forces:
if self.forces_coupled: # enforce F_st = F_ts
nEdges = idx_t.shape[0]
id_undir = repeat_blocks(
main_graph["num_neighbors"] // 2,
repeats=2,
continuous_indexing=True,
)
F_st = scatter_det(
F_st,
id_undir,
dim=0,
dim_size=int(nEdges / 2),
reduce="mean",
) # (nEdges/2, num_targets)
F_st = F_st[id_undir] # (nEdges, num_targets)
# map forces in edge directions
F_st_vec = F_st[:, :, None] * main_graph["vector"][:, None, :]
# (nEdges, num_targets, 3)
F_t = scatter_det(
F_st_vec,
idx_t,
dim=0,
dim_size=num_atoms,
reduce="add",
) # (nAtoms, num_targets, 3)
else:
F_t = self.force_scaler.calc_forces_and_update(E_t, pos)
E_t = E_t.squeeze(1) # (num_molecules)
F_t = F_t.squeeze(1) # (num_atoms, 3)
return E_t, F_t
else:
E_t = E_t.squeeze(1) # (num_molecules)
return E_t
@property
def num_params(self) -> int:
return sum(p.numel() for p in self.parameters())
| 48,949 | 34.834553 | 93 | py |
ocp | ocp-main/ocpmodels/models/gemnet_oc/utils.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import numpy as np
import torch
from torch_scatter import segment_coo, segment_csr
from torch_sparse import SparseTensor
def ragged_range(sizes):
"""Multiple concatenated ranges.
Examples
--------
sizes = [1 4 2 3]
Return: [0 0 1 2 3 0 1 0 1 2]
"""
assert sizes.dim() == 1
if sizes.sum() == 0:
return sizes.new_empty(0)
# Remove 0 sizes
sizes_nonzero = sizes > 0
if not torch.all(sizes_nonzero):
sizes = torch.masked_select(sizes, sizes_nonzero)
# Initialize indexing array with ones as we need to setup incremental indexing
# within each group when cumulatively summed at the final stage.
id_steps = torch.ones(sizes.sum(), dtype=torch.long, device=sizes.device)
id_steps[0] = 0
insert_index = sizes[:-1].cumsum(0)
insert_val = (1 - sizes)[:-1]
# Assign index-offsetting values
id_steps[insert_index] = insert_val
# Finally index into input array for the group repeated o/p
res = id_steps.cumsum(0)
return res
def repeat_blocks(
sizes,
repeats,
continuous_indexing: bool = True,
start_idx: int = 0,
block_inc: int = 0,
repeat_inc: int = 0,
) -> torch.Tensor:
"""Repeat blocks of indices.
Adapted from https://stackoverflow.com/questions/51154989/numpy-vectorized-function-to-repeat-blocks-of-consecutive-elements
continuous_indexing: Whether to keep increasing the index after each block
start_idx: Starting index
block_inc: Number to increment by after each block,
either global or per block. Shape: len(sizes) - 1
repeat_inc: Number to increment by after each repetition,
either global or per block
Examples
--------
sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = False
Return: [0 0 0 0 1 2 0 1 2 0 1 0 1 0 1]
sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True
Return: [0 0 0 1 2 3 1 2 3 4 5 4 5 4 5]
sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True ;
repeat_inc = 4
Return: [0 4 8 1 2 3 5 6 7 4 5 8 9 12 13]
sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True ;
start_idx = 5
Return: [5 5 5 6 7 8 6 7 8 9 10 9 10 9 10]
sizes = [1,3,2] ; repeats = [3,2,3] ; continuous_indexing = True ;
block_inc = 1
Return: [0 0 0 2 3 4 2 3 4 6 7 6 7 6 7]
sizes = [0,3,2] ; repeats = [3,2,3] ; continuous_indexing = True
Return: [0 1 2 0 1 2 3 4 3 4 3 4]
sizes = [2,3,2] ; repeats = [2,0,2] ; continuous_indexing = True
Return: [0 1 0 1 5 6 5 6]
"""
assert sizes.dim() == 1
assert all(sizes >= 0)
# Remove 0 sizes
sizes_nonzero = sizes > 0
if not torch.all(sizes_nonzero):
assert block_inc == 0 # Implementing this is not worth the effort
sizes = torch.masked_select(sizes, sizes_nonzero)
if isinstance(repeats, torch.Tensor):
repeats = torch.masked_select(repeats, sizes_nonzero)
if isinstance(repeat_inc, torch.Tensor):
repeat_inc = torch.masked_select(repeat_inc, sizes_nonzero)
if isinstance(repeats, torch.Tensor):
assert all(repeats >= 0)
insert_dummy = repeats[0] == 0
if insert_dummy:
one = sizes.new_ones(1)
zero = sizes.new_zeros(1)
sizes = torch.cat((one, sizes))
repeats = torch.cat((one, repeats))
if isinstance(block_inc, torch.Tensor):
block_inc = torch.cat((zero, block_inc))
if isinstance(repeat_inc, torch.Tensor):
repeat_inc = torch.cat((zero, repeat_inc))
else:
assert repeats >= 0
insert_dummy = False
# Get repeats for each group using group lengths/sizes
r1 = torch.repeat_interleave(
torch.arange(len(sizes), device=sizes.device), repeats
)
# Get total size of output array, as needed to initialize output indexing array
N = (sizes * repeats).sum()
# Initialize indexing array with ones as we need to setup incremental indexing
# within each group when cumulatively summed at the final stage.
# Two steps here:
# 1. Within each group, we have multiple sequences, so setup the offsetting
# at each sequence lengths by the seq. lengths preceding those.
id_ar = torch.ones(N, dtype=torch.long, device=sizes.device)
id_ar[0] = 0
insert_index = sizes[r1[:-1]].cumsum(0)
insert_val = (1 - sizes)[r1[:-1]]
if isinstance(repeats, torch.Tensor) and torch.any(repeats == 0):
diffs = r1[1:] - r1[:-1]
indptr = torch.cat((sizes.new_zeros(1), diffs.cumsum(0)))
if continuous_indexing:
# If a group was skipped (repeats=0) we need to add its size
insert_val += segment_csr(sizes[: r1[-1]], indptr, reduce="sum")
# Add block increments
if isinstance(block_inc, torch.Tensor):
insert_val += segment_csr(
block_inc[: r1[-1]], indptr, reduce="sum"
)
else:
insert_val += block_inc * (indptr[1:] - indptr[:-1])
if insert_dummy:
insert_val[0] -= block_inc
else:
idx = r1[1:] != r1[:-1]
if continuous_indexing:
# 2. For each group, make sure the indexing starts from the next group's
# first element. So, simply assign 1s there.
insert_val[idx] = 1
# Add block increments
insert_val[idx] += block_inc
# Add repeat_inc within each group
if isinstance(repeat_inc, torch.Tensor):
insert_val += repeat_inc[r1[:-1]]
if isinstance(repeats, torch.Tensor):
repeat_inc_inner = repeat_inc[repeats > 0][:-1]
else:
repeat_inc_inner = repeat_inc[:-1]
else:
insert_val += repeat_inc
repeat_inc_inner = repeat_inc
# Subtract the increments between groups
if isinstance(repeats, torch.Tensor):
repeats_inner = repeats[repeats > 0][:-1]
else:
repeats_inner = repeats
insert_val[r1[1:] != r1[:-1]] -= repeat_inc_inner * repeats_inner
# Assign index-offsetting values
id_ar[insert_index] = insert_val
if insert_dummy:
id_ar = id_ar[1:]
if continuous_indexing:
id_ar[0] -= 1
# Set start index now, in case of insertion due to leading repeats=0
id_ar[0] += start_idx
# Finally index into input array for the group repeated o/p
res = id_ar.cumsum(0)
return res
def masked_select_sparsetensor_flat(src, mask):
row, col, value = src.coo()
row = row[mask]
col = col[mask]
value = value[mask]
return SparseTensor(
row=row, col=col, value=value, sparse_sizes=src.sparse_sizes()
)
def calculate_interatomic_vectors(R, id_s, id_t, offsets_st):
"""
Calculate the vectors connecting the given atom pairs,
considering offsets from periodic boundary conditions (PBC).
Arguments
---------
R: Tensor, shape = (nAtoms, 3)
Atom positions.
id_s: Tensor, shape = (nEdges,)
Indices of the source atom of the edges.
id_t: Tensor, shape = (nEdges,)
Indices of the target atom of the edges.
offsets_st: Tensor, shape = (nEdges,)
PBC offsets of the edges.
Subtract this from the correct direction.
Returns
-------
(D_st, V_st): tuple
D_st: Tensor, shape = (nEdges,)
Distance from atom t to s.
V_st: Tensor, shape = (nEdges,)
Unit direction from atom t to s.
"""
Rs = R[id_s]
Rt = R[id_t]
# ReLU prevents negative numbers in sqrt
if offsets_st is None:
V_st = Rt - Rs # s -> t
else:
V_st = Rt - Rs + offsets_st # s -> t
D_st = torch.sqrt(torch.sum(V_st**2, dim=1))
V_st = V_st / D_st[..., None]
return D_st, V_st
def inner_product_clamped(x, y) -> torch.Tensor:
"""
Calculate the inner product between the given normalized vectors,
giving a result between -1 and 1.
"""
return torch.sum(x * y, dim=-1).clamp(min=-1, max=1)
def get_angle(R_ac, R_ab) -> torch.Tensor:
"""Calculate angles between atoms c -> a <- b.
Arguments
---------
R_ac: Tensor, shape = (N, 3)
Vector from atom a to c.
R_ab: Tensor, shape = (N, 3)
Vector from atom a to b.
Returns
-------
angle_cab: Tensor, shape = (N,)
Angle between atoms c <- a -> b.
"""
# cos(alpha) = (u * v) / (|u|*|v|)
x = torch.sum(R_ac * R_ab, dim=-1) # shape = (N,)
# sin(alpha) = |u x v| / (|u|*|v|)
y = torch.cross(R_ac, R_ab, dim=-1).norm(dim=-1) # shape = (N,)
y = y.clamp(min=1e-9) # Avoid NaN gradient for y = (0,0,0)
angle = torch.atan2(y, x)
return angle
def vector_rejection(R_ab, P_n):
"""
Project the vector R_ab onto a plane with normal vector P_n.
Arguments
---------
R_ab: Tensor, shape = (N, 3)
Vector from atom a to b.
P_n: Tensor, shape = (N, 3)
Normal vector of a plane onto which to project R_ab.
Returns
-------
R_ab_proj: Tensor, shape = (N, 3)
Projected vector (orthogonal to P_n).
"""
a_x_b = torch.sum(R_ab * P_n, dim=-1)
b_x_b = torch.sum(P_n * P_n, dim=-1)
return R_ab - (a_x_b / b_x_b)[:, None] * P_n
def get_projected_angle(R_ab, P_n, eps: float = 1e-4) -> torch.Tensor:
"""
Project the vector R_ab onto a plane with normal vector P_n,
then calculate the angle w.r.t. the (x [cross] P_n),
or (y [cross] P_n) if the former would be ill-defined/numerically unstable.
Arguments
---------
R_ab: Tensor, shape = (N, 3)
Vector from atom a to b.
P_n: Tensor, shape = (N, 3)
Normal vector of a plane onto which to project R_ab.
eps: float
Norm of projection below which to use the y-axis instead of x.
Returns
-------
angle_ab: Tensor, shape = (N)
Angle on plane w.r.t. x- or y-axis.
"""
R_ab_proj = torch.cross(R_ab, P_n, dim=-1)
# Obtain axis defining the angle=0
x = P_n.new_tensor([[1, 0, 0]]).expand_as(P_n)
zero_angle = torch.cross(x, P_n, dim=-1)
use_y = torch.norm(zero_angle, dim=-1) < eps
P_n_y = P_n[use_y]
y = P_n_y.new_tensor([[0, 1, 0]]).expand_as(P_n_y)
y_cross = torch.cross(y, P_n_y, dim=-1)
zero_angle[use_y] = y_cross
angle = get_angle(zero_angle, R_ab_proj)
# Flip sign of angle if necessary to obtain clock-wise angles
cross = torch.cross(zero_angle, R_ab_proj, dim=-1)
flip_sign = torch.sum(cross * P_n, dim=-1) < 0
angle[flip_sign] = -angle[flip_sign]
return angle
def mask_neighbors(neighbors, edge_mask):
neighbors_old_indptr = torch.cat([neighbors.new_zeros(1), neighbors])
neighbors_old_indptr = torch.cumsum(neighbors_old_indptr, dim=0)
neighbors = segment_csr(edge_mask.long(), neighbors_old_indptr)
return neighbors
def get_neighbor_order(num_atoms: int, index, atom_distance) -> torch.Tensor:
"""
Give a mask that filters out edges so that each atom has at most
`max_num_neighbors_threshold` neighbors.
"""
device = index.device
# Get sorted index and inverse sorting
# Necessary for index_sort_map
index_sorted, index_order = torch.sort(index)
index_order_inverse = torch.argsort(index_order)
# Get number of neighbors
ones = index_sorted.new_ones(1).expand_as(index_sorted)
num_neighbors = segment_coo(ones, index_sorted, dim_size=num_atoms)
max_num_neighbors = num_neighbors.max()
# Create a tensor of size [num_atoms, max_num_neighbors] to sort the distances of the neighbors.
# Fill with infinity so we can easily remove unused distances later.
distance_sort = torch.full(
[num_atoms * max_num_neighbors], np.inf, device=device
)
# Create an index map to map distances from atom_distance to distance_sort
index_neighbor_offset = torch.cumsum(num_neighbors, dim=0) - num_neighbors
index_neighbor_offset_expand = torch.repeat_interleave(
index_neighbor_offset, num_neighbors
)
index_sort_map = (
index_sorted * max_num_neighbors
+ torch.arange(len(index_sorted), device=device)
- index_neighbor_offset_expand
)
distance_sort.index_copy_(0, index_sort_map, atom_distance)
distance_sort = distance_sort.view(num_atoms, max_num_neighbors)
# Sort neighboring atoms based on distance
distance_sort, index_sort = torch.sort(distance_sort, dim=1)
# Offset index_sort so that it indexes into index_sorted
index_sort = index_sort + index_neighbor_offset.view(-1, 1).expand(
-1, max_num_neighbors
)
# Remove "unused pairs" with infinite distances
mask_finite = torch.isfinite(distance_sort)
index_sort = torch.masked_select(index_sort, mask_finite)
# Create indices specifying the order in index_sort
order_peratom = torch.arange(max_num_neighbors, device=device)[
None, :
].expand_as(mask_finite)
order_peratom = torch.masked_select(order_peratom, mask_finite)
# Re-index to obtain order value of each neighbor in index_sorted
order = torch.zeros(len(index), device=device, dtype=torch.long)
order[index_sort] = order_peratom
return order[index_order_inverse]
def get_inner_idx(idx, dim_size):
"""
Assign an inner index to each element (neighbor) with the same index.
For example, with idx=[0 0 0 1 1 1 1 2 2] this returns [0 1 2 0 1 2 3 0 1].
These indices allow reshape neighbor indices into a dense matrix.
idx has to be sorted for this to work.
"""
ones = idx.new_ones(1).expand_as(idx)
num_neighbors = segment_coo(ones, idx, dim_size=dim_size)
inner_idx = ragged_range(num_neighbors)
return inner_idx
def get_edge_id(edge_idx, cell_offsets, num_atoms: int):
cell_basis = cell_offsets.max() - cell_offsets.min() + 1
cell_id = (
(
cell_offsets
* cell_offsets.new_tensor([[1, cell_basis, cell_basis**2]])
)
.sum(-1)
.long()
)
edge_id = edge_idx[0] + edge_idx[1] * num_atoms + cell_id * num_atoms**2
return edge_id
| 14,529 | 33.188235 | 128 | py |
ocp | ocp-main/ocpmodels/models/gemnet_oc/__init__.py | 0 | 0 | 0 | py |
|
ocp | ocp-main/ocpmodels/models/gemnet_oc/layers/base_layers.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import math
import torch
from ..initializers import he_orthogonal_init
class Dense(torch.nn.Module):
"""
Combines dense layer with scaling for silu activation.
Arguments
---------
in_features: int
Input embedding size.
out_features: int
Output embedding size.
bias: bool
True if use bias.
activation: str
Name of the activation function to use.
"""
def __init__(
self, in_features, out_features, bias: bool = False, activation=None
) -> None:
super().__init__()
self.linear = torch.nn.Linear(in_features, out_features, bias=bias)
self.reset_parameters()
if isinstance(activation, str):
activation = activation.lower()
if activation in ["silu", "swish"]:
self._activation = ScaledSiLU()
elif activation is None:
self._activation = torch.nn.Identity()
else:
raise NotImplementedError(
"Activation function not implemented for GemNet (yet)."
)
def reset_parameters(self, initializer=he_orthogonal_init) -> None:
initializer(self.linear.weight)
if self.linear.bias is not None:
self.linear.bias.data.fill_(0)
def forward(self, x):
x = self.linear(x)
x = self._activation(x)
return x
class ScaledSiLU(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.scale_factor = 1 / 0.6
self._activation = torch.nn.SiLU()
def forward(self, x):
return self._activation(x) * self.scale_factor
class ResidualLayer(torch.nn.Module):
"""
Residual block with output scaled by 1/sqrt(2).
Arguments
---------
units: int
Input and output embedding size.
nLayers: int
Number of dense layers.
layer: torch.nn.Module
Class for the layers inside the residual block.
layer_kwargs: str
Keyword arguments for initializing the layers.
"""
def __init__(
self, units: int, nLayers: int = 2, layer=Dense, **layer_kwargs
) -> None:
super().__init__()
self.dense_mlp = torch.nn.Sequential(
*[
layer(
in_features=units,
out_features=units,
bias=False,
**layer_kwargs
)
for _ in range(nLayers)
]
)
self.inv_sqrt_2 = 1 / math.sqrt(2)
def forward(self, input):
x = self.dense_mlp(input)
x = input + x
x = x * self.inv_sqrt_2
return x
| 2,826 | 25.175926 | 76 | py |
ocp | ocp-main/ocpmodels/models/gemnet_oc/layers/atom_update_block.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import math
import torch
from torch_scatter import scatter
from ocpmodels.common.utils import scatter_det
from ocpmodels.modules.scaling import ScaleFactor
from ..initializers import get_initializer
from .base_layers import Dense, ResidualLayer
class AtomUpdateBlock(torch.nn.Module):
"""
Aggregate the message embeddings of the atoms
Arguments
---------
emb_size_atom: int
Embedding size of the atoms.
emb_size_edge: int
Embedding size of the edges.
emb_size_rbf: int
Embedding size of the radial basis.
nHidden: int
Number of residual blocks.
activation: callable/str
Name of the activation function to use in the dense layers.
"""
def __init__(
self,
emb_size_atom: int,
emb_size_edge: int,
emb_size_rbf: int,
nHidden: int,
activation=None,
) -> None:
super().__init__()
self.dense_rbf = Dense(
emb_size_rbf, emb_size_edge, activation=None, bias=False
)
self.scale_sum = ScaleFactor()
self.layers = self.get_mlp(
emb_size_edge, emb_size_atom, nHidden, activation
)
def get_mlp(self, units_in, units, nHidden, activation):
if units_in != units:
dense1 = Dense(units_in, units, activation=activation, bias=False)
mlp = [dense1]
else:
mlp = []
res = [
ResidualLayer(units, nLayers=2, activation=activation)
for i in range(nHidden)
]
mlp += res
return torch.nn.ModuleList(mlp)
def forward(self, h, m, basis_rad, idx_atom):
"""
Returns
-------
h: torch.Tensor, shape=(nAtoms, emb_size_atom)
Atom embedding.
"""
nAtoms = h.shape[0]
bases_emb = self.dense_rbf(basis_rad) # (nEdges, emb_size_edge)
x = m * bases_emb
x2 = scatter_det(
x, idx_atom, dim=0, dim_size=nAtoms, reduce="sum"
) # (nAtoms, emb_size_edge)
x = self.scale_sum(x2, ref=m)
for layer in self.layers:
x = layer(x) # (nAtoms, emb_size_atom)
return x
class OutputBlock(AtomUpdateBlock):
"""
Combines the atom update block and subsequent final dense layer.
Arguments
---------
emb_size_atom: int
Embedding size of the atoms.
emb_size_edge: int
Embedding size of the edges.
emb_size_rbf: int
Embedding size of the radial basis.
nHidden: int
Number of residual blocks before adding the atom embedding.
nHidden_afteratom: int
Number of residual blocks after adding the atom embedding.
activation: str
Name of the activation function to use in the dense layers.
direct_forces: bool
If true directly predict forces, i.e. without taking the gradient
of the energy potential.
"""
def __init__(
self,
emb_size_atom: int,
emb_size_edge: int,
emb_size_rbf: int,
nHidden: int,
nHidden_afteratom: int,
activation=None,
direct_forces: bool = True,
) -> None:
super().__init__(
emb_size_atom=emb_size_atom,
emb_size_edge=emb_size_edge,
emb_size_rbf=emb_size_rbf,
nHidden=nHidden,
activation=activation,
)
self.direct_forces = direct_forces
self.seq_energy_pre = self.layers # inherited from parent class
if nHidden_afteratom >= 1:
self.seq_energy2 = self.get_mlp(
emb_size_atom, emb_size_atom, nHidden_afteratom, activation
)
self.inv_sqrt_2 = 1 / math.sqrt(2.0)
else:
self.seq_energy2 = None
if self.direct_forces:
self.scale_rbf_F = ScaleFactor()
self.seq_forces = self.get_mlp(
emb_size_edge, emb_size_edge, nHidden, activation
)
self.dense_rbf_F = Dense(
emb_size_rbf, emb_size_edge, activation=None, bias=False
)
def forward(self, h, m, basis_rad, idx_atom):
"""
Returns
-------
torch.Tensor, shape=(nAtoms, emb_size_atom)
Output atom embeddings.
torch.Tensor, shape=(nEdges, emb_size_edge)
Output edge embeddings.
"""
nAtoms = h.shape[0]
# ------------------------ Atom embeddings ------------------------ #
basis_emb_E = self.dense_rbf(basis_rad) # (nEdges, emb_size_edge)
x = m * basis_emb_E
x_E = scatter_det(
x, idx_atom, dim=0, dim_size=nAtoms, reduce="sum"
) # (nAtoms, emb_size_edge)
x_E = self.scale_sum(x_E, ref=m)
for layer in self.seq_energy_pre:
x_E = layer(x_E) # (nAtoms, emb_size_atom)
if self.seq_energy2 is not None:
x_E = x_E + h
x_E = x_E * self.inv_sqrt_2
for layer in self.seq_energy2:
x_E = layer(x_E) # (nAtoms, emb_size_atom)
# ------------------------- Edge embeddings ------------------------ #
if self.direct_forces:
x_F = m
for _, layer in enumerate(self.seq_forces):
x_F = layer(x_F) # (nEdges, emb_size_edge)
basis_emb_F = self.dense_rbf_F(basis_rad)
# (nEdges, emb_size_edge)
x_F_basis = x_F * basis_emb_F
x_F = self.scale_rbf_F(x_F_basis, ref=x_F)
else:
x_F = 0
# ------------------------------------------------------------------ #
return x_E, x_F
| 5,833 | 28.614213 | 78 | py |
ocp | ocp-main/ocpmodels/models/gemnet_oc/layers/embedding_block.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import numpy as np
import torch
from .base_layers import Dense
class AtomEmbedding(torch.nn.Module):
"""
Initial atom embeddings based on the atom type
Arguments
---------
emb_size: int
Atom embeddings size
"""
def __init__(self, emb_size: int, num_elements: int) -> None:
super().__init__()
self.emb_size = emb_size
self.embeddings = torch.nn.Embedding(num_elements, emb_size)
# init by uniform distribution
torch.nn.init.uniform_(
self.embeddings.weight, a=-np.sqrt(3), b=np.sqrt(3)
)
def forward(self, Z):
"""
Returns
-------
h: torch.Tensor, shape=(nAtoms, emb_size)
Atom embeddings.
"""
h = self.embeddings(Z - 1) # -1 because Z.min()=1 (==Hydrogen)
return h
class EdgeEmbedding(torch.nn.Module):
"""
Edge embedding based on the concatenation of atom embeddings
and a subsequent dense layer.
Arguments
---------
atom_features: int
Embedding size of the atom embedding.
edge_features: int
Embedding size of the input edge embedding.
out_features: int
Embedding size after the dense layer.
activation: str
Activation function used in the dense layer.
"""
def __init__(
self,
atom_features,
edge_features,
out_features,
activation=None,
) -> None:
super().__init__()
in_features = 2 * atom_features + edge_features
self.dense = Dense(
in_features, out_features, activation=activation, bias=False
)
def forward(
self,
h,
m,
edge_index,
):
"""
Arguments
---------
h: torch.Tensor, shape (num_atoms, atom_features)
Atom embeddings.
m: torch.Tensor, shape (num_edges, edge_features)
Radial basis in embedding block,
edge embedding in interaction block.
Returns
-------
m_st: torch.Tensor, shape=(nEdges, emb_size)
Edge embeddings.
"""
h_s = h[edge_index[0]] # shape=(nEdges, emb_size)
h_t = h[edge_index[1]] # shape=(nEdges, emb_size)
m_st = torch.cat(
[h_s, h_t, m], dim=-1
) # (nEdges, 2*emb_size+nFeatures)
m_st = self.dense(m_st) # (nEdges, emb_size)
return m_st
| 2,622 | 24.715686 | 72 | py |
ocp | ocp-main/ocpmodels/models/gemnet_oc/layers/radial_basis.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import math
from typing import Dict, Union
import numpy as np
import sympy as sym
import torch
from scipy.special import binom
from ocpmodels.common.typing import assert_is_instance
from ocpmodels.modules.scaling import ScaleFactor
from .basis_utils import bessel_basis
class PolynomialEnvelope(torch.nn.Module):
"""
Polynomial envelope function that ensures a smooth cutoff.
Arguments
---------
exponent: int
Exponent of the envelope function.
"""
def __init__(self, exponent: int) -> None:
super().__init__()
assert exponent > 0
self.p = exponent
self.a = -(self.p + 1) * (self.p + 2) / 2
self.b = self.p * (self.p + 2)
self.c = -self.p * (self.p + 1) / 2
def forward(self, d_scaled: torch.Tensor) -> torch.Tensor:
env_val = (
1
+ self.a * d_scaled**self.p
+ self.b * d_scaled ** (self.p + 1)
+ self.c * d_scaled ** (self.p + 2)
)
return torch.where(d_scaled < 1, env_val, torch.zeros_like(d_scaled))
class ExponentialEnvelope(torch.nn.Module):
"""
Exponential envelope function that ensures a smooth cutoff,
as proposed in Unke, Chmiela, Gastegger, Schütt, Sauceda, Müller 2021.
SpookyNet: Learning Force Fields with Electronic Degrees of Freedom
and Nonlocal Effects
"""
def __init__(self) -> None:
super().__init__()
def forward(self, d_scaled: torch.Tensor) -> torch.Tensor:
env_val = torch.exp(
-(d_scaled**2) / ((1 - d_scaled) * (1 + d_scaled))
)
return torch.where(d_scaled < 1, env_val, torch.zeros_like(d_scaled))
class GaussianBasis(torch.nn.Module):
def __init__(
self,
start: float = 0.0,
stop: float = 5.0,
num_gaussians: int = 50,
trainable: bool = False,
) -> None:
super().__init__()
offset = torch.linspace(start, stop, num_gaussians)
if trainable:
self.offset = torch.nn.Parameter(offset, requires_grad=True)
else:
self.register_buffer("offset", offset)
self.coeff = -0.5 / ((stop - start) / (num_gaussians - 1)) ** 2
def forward(self, dist) -> torch.Tensor:
dist = dist[:, None] - self.offset[None, :]
return torch.exp(self.coeff * torch.pow(dist, 2))
class SphericalBesselBasis(torch.nn.Module):
"""
First-order spherical Bessel basis
Arguments
---------
num_radial: int
Number of basis functions. Controls the maximum frequency.
cutoff: float
Cutoff distance in Angstrom.
"""
def __init__(
self,
num_radial: int,
cutoff: float,
) -> None:
super().__init__()
self.norm_const = math.sqrt(2 / (cutoff**3))
# cutoff ** 3 to counteract dividing by d_scaled = d / cutoff
# Initialize frequencies at canonical positions
self.frequencies = torch.nn.Parameter(
data=torch.tensor(
np.pi * np.arange(1, num_radial + 1, dtype=np.float32)
),
requires_grad=True,
)
def forward(self, d_scaled: torch.Tensor) -> torch.Tensor:
return (
self.norm_const
/ d_scaled[:, None]
* torch.sin(self.frequencies * d_scaled[:, None])
) # (num_edges, num_radial)
class BernsteinBasis(torch.nn.Module):
"""
Bernstein polynomial basis,
as proposed in Unke, Chmiela, Gastegger, Schütt, Sauceda, Müller 2021.
SpookyNet: Learning Force Fields with Electronic Degrees of Freedom
and Nonlocal Effects
Arguments
---------
num_radial: int
Number of basis functions. Controls the maximum frequency.
pregamma_initial: float
Initial value of exponential coefficient gamma.
Default: gamma = 0.5 * a_0**-1 = 0.94486,
inverse softplus -> pregamma = log e**gamma - 1 = 0.45264
"""
def __init__(
self,
num_radial: int,
pregamma_initial: float = 0.45264,
) -> None:
super().__init__()
prefactor = binom(num_radial - 1, np.arange(num_radial))
self.register_buffer(
"prefactor",
torch.tensor(prefactor, dtype=torch.float),
persistent=False,
)
self.pregamma = torch.nn.Parameter(
data=torch.tensor(pregamma_initial, dtype=torch.float),
requires_grad=True,
)
self.softplus = torch.nn.Softplus()
exp1 = torch.arange(num_radial)
self.register_buffer("exp1", exp1[None, :], persistent=False)
exp2 = num_radial - 1 - exp1
self.register_buffer("exp2", exp2[None, :], persistent=False)
def forward(self, d_scaled: torch.Tensor) -> torch.Tensor:
gamma = self.softplus(self.pregamma) # constrain to positive
exp_d = torch.exp(-gamma * d_scaled)[:, None]
return (
self.prefactor * (exp_d**self.exp1) * ((1 - exp_d) ** self.exp2)
)
class RadialBasis(torch.nn.Module):
"""
Arguments
---------
num_radial: int
Number of basis functions. Controls the maximum frequency.
cutoff: float
Cutoff distance in Angstrom.
rbf: dict = {"name": "gaussian"}
Basis function and its hyperparameters.
envelope: dict = {"name": "polynomial", "exponent": 5}
Envelope function and its hyperparameters.
scale_basis: bool
Whether to scale the basis values for better numerical stability.
"""
def __init__(
self,
num_radial: int,
cutoff: float,
rbf: Dict[str, str] = {"name": "gaussian"},
envelope: Dict[str, Union[str, int]] = {
"name": "polynomial",
"exponent": 5,
},
scale_basis: bool = False,
) -> None:
super().__init__()
self.inv_cutoff = 1 / cutoff
self.scale_basis = scale_basis
if self.scale_basis:
self.scale_rbf = ScaleFactor()
env_name = assert_is_instance(envelope["name"], str).lower()
env_hparams = envelope.copy()
del env_hparams["name"]
if env_name == "polynomial":
self.envelope = PolynomialEnvelope(**env_hparams)
elif env_name == "exponential":
self.envelope = ExponentialEnvelope(**env_hparams)
else:
raise ValueError(f"Unknown envelope function '{env_name}'.")
rbf_name = rbf["name"].lower()
rbf_hparams = rbf.copy()
del rbf_hparams["name"]
# RBFs get distances scaled to be in [0, 1]
if rbf_name == "gaussian":
self.rbf = GaussianBasis(
start=0, stop=1, num_gaussians=num_radial, **rbf_hparams
)
elif rbf_name == "spherical_bessel":
self.rbf = SphericalBesselBasis(
num_radial=num_radial, cutoff=cutoff, **rbf_hparams
)
elif rbf_name == "bernstein":
self.rbf = BernsteinBasis(num_radial=num_radial, **rbf_hparams)
else:
raise ValueError(f"Unknown radial basis function '{rbf_name}'.")
def forward(self, d: torch.Tensor) -> torch.Tensor:
d_scaled = d * self.inv_cutoff
env = self.envelope(d_scaled)
res = env[:, None] * self.rbf(d_scaled)
if self.scale_basis:
res = self.scale_rbf(res)
return res
# (num_edges, num_radial) or (num_edges, num_orders * num_radial)
| 7,675 | 29.827309 | 77 | py |
ocp | ocp-main/ocpmodels/models/gemnet_oc/layers/basis_utils.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import math
import numpy as np
import sympy as sym
import torch
from scipy import special as sp
from scipy.optimize import brentq
def Jn(r: int, n: int):
"""
numerical spherical bessel functions of order n
"""
return sp.spherical_jn(n, r)
def Jn_zeros(n: int, k: int):
"""
Compute the first k zeros of the spherical bessel functions
up to order n (excluded)
"""
zerosj = np.zeros((n, k), dtype="float32")
zerosj[0] = np.arange(1, k + 1) * np.pi
points = np.arange(1, k + n) * np.pi
racines = np.zeros(k + n - 1, dtype="float32")
for i in range(1, n):
for j in range(k + n - 1 - i):
foo = brentq(Jn, points[j], points[j + 1], (i,))
racines[j] = foo
points = racines
zerosj[i][:k] = racines[:k]
return zerosj
def spherical_bessel_formulas(n: int):
"""
Computes the sympy formulas for the spherical bessel functions
up to order n (excluded)
"""
x = sym.symbols("x", real=True)
# j_i = (-x)^i * (1/x * d/dx)^î * sin(x)/x
j = [sym.sin(x) / x] # j_0
a = sym.sin(x) / x
for i in range(1, n):
b = sym.diff(a, x) / x
j += [sym.simplify(b * (-x) ** i)]
a = sym.simplify(b)
return j
def bessel_basis(n: int, k: int):
"""
Compute the sympy formulas for the normalized and rescaled spherical bessel
functions up to order n (excluded) and maximum frequency k (excluded).
Returns
-------
bess_basis: list
Bessel basis formulas taking in a single argument x.
Has length n where each element has length k. -> In total n*k many.
"""
zeros = Jn_zeros(n, k)
normalizer = []
for order in range(n):
normalizer_tmp = []
for i in range(k):
normalizer_tmp += [0.5 * Jn(zeros[order, i], order + 1) ** 2]
normalizer_tmp = (
1 / np.array(normalizer_tmp) ** 0.5
) # sqrt(2/(j_l+1)**2) , sqrt(1/c**3) not taken into account yet
normalizer += [normalizer_tmp]
f = spherical_bessel_formulas(n)
x = sym.symbols("x", real=True)
bess_basis = []
for order in range(n):
bess_basis_tmp = []
for i in range(k):
bess_basis_tmp += [
sym.simplify(
normalizer[order][i]
* f[order].subs(x, zeros[order, i] * x)
)
]
bess_basis += [bess_basis_tmp]
return bess_basis
def sph_harm_prefactor(l_degree: int, m_order: int):
"""
Computes the constant pre-factor for the spherical harmonic
of degree l and order m.
Arguments
---------
l_degree: int
Degree of the spherical harmonic. l >= 0
m_order: int
Order of the spherical harmonic. -l <= m <= l
Returns
-------
factor: float
"""
# sqrt((2*l+1)/4*pi * (l-m)!/(l+m)! )
return (
(2 * l_degree + 1)
/ (4 * np.pi)
* math.factorial(l_degree - abs(m_order))
/ math.factorial(l_degree + abs(m_order))
) ** 0.5
def associated_legendre_polynomials(
L_maxdegree: int, zero_m_only: bool = True, pos_m_only: bool = True
):
"""
Computes string formulas of the associated legendre polynomials
up to degree L (excluded).
Arguments
---------
L_maxdegree: int
Degree up to which to calculate the associated legendre polynomials
(degree L is excluded).
zero_m_only: bool
If True only calculate the polynomials for the polynomials where m=0.
pos_m_only: bool
If True only calculate the polynomials for the polynomials where m>=0.
Overwritten by zero_m_only.
Returns
-------
polynomials: list
Contains the sympy functions of the polynomials
(in total L many if zero_m_only is True else L^2 many).
"""
# calculations from http://web.cmb.usc.edu/people/alber/Software/tomominer/docs/cpp/group__legendre__polynomials.html
z = sym.symbols("z", real=True)
P_l_m = [
[0] * (2 * l_degree + 1) for l_degree in range(L_maxdegree)
] # for order l: -l <= m <= l
P_l_m[0][0] = 1
if L_maxdegree > 1:
if zero_m_only:
# m = 0
P_l_m[1][0] = z
for l_degree in range(2, L_maxdegree):
P_l_m[l_degree][0] = sym.simplify(
(
(2 * l_degree - 1) * z * P_l_m[l_degree - 1][0]
- (l_degree - 1) * P_l_m[l_degree - 2][0]
)
/ l_degree
)
return P_l_m
else:
# for m >= 0
for l_degree in range(1, L_maxdegree):
P_l_m[l_degree][l_degree] = sym.simplify(
(1 - 2 * l_degree)
* (1 - z**2) ** 0.5
* P_l_m[l_degree - 1][l_degree - 1]
) # P_00, P_11, P_22, P_33
for m_order in range(0, L_maxdegree - 1):
P_l_m[m_order + 1][m_order] = sym.simplify(
(2 * m_order + 1) * z * P_l_m[m_order][m_order]
) # P_10, P_21, P_32, P_43
for l_degree in range(2, L_maxdegree):
for m_order in range(l_degree - 1): # P_20, P_30, P_31
P_l_m[l_degree][m_order] = sym.simplify(
(
(2 * l_degree - 1)
* z
* P_l_m[l_degree - 1][m_order]
- (l_degree + m_order - 1)
* P_l_m[l_degree - 2][m_order]
)
/ (l_degree - m_order)
)
if not pos_m_only:
# for m < 0: P_l(-m) = (-1)^m * (l-m)!/(l+m)! * P_lm
for l_degree in range(1, L_maxdegree):
for m_order in range(
1, l_degree + 1
): # P_1(-1), P_2(-1) P_2(-2)
P_l_m[l_degree][-m_order] = sym.simplify(
(-1) ** m_order
* math.factorial(l_degree - m_order)
/ math.factorial(l_degree + m_order)
* P_l_m[l_degree][m_order]
)
return P_l_m
def real_sph_harm(
L_maxdegree: int,
use_theta: bool,
use_phi: bool = True,
zero_m_only: bool = True,
) -> None:
"""
Computes formula strings of the the real part of the spherical harmonics
up to degree L (excluded). Variables are either spherical coordinates phi
and theta (or cartesian coordinates x,y,z) on the UNIT SPHERE.
Arguments
---------
L_maxdegree: int
Degree up to which to calculate the spherical harmonics
(degree L is excluded).
use_theta: bool
- True: Expects the input of the formula strings to contain theta.
- False: Expects the input of the formula strings to contain z.
use_phi: bool
- True: Expects the input of the formula strings to contain phi.
- False: Expects the input of the formula strings to contain x and y.
Does nothing if zero_m_only is True
zero_m_only: bool
If True only calculate the harmonics where m=0.
Returns
-------
Y_lm_real: list
Computes formula strings of the the real part of the spherical
harmonics up to degree L (where degree L is not excluded).
In total L^2 many sph harm exist up to degree L (excluded).
However, if zero_m_only only is True then the total count
is reduced to L.
"""
z = sym.symbols("z", real=True)
P_l_m = associated_legendre_polynomials(L_maxdegree, zero_m_only)
if zero_m_only:
# for all m != 0: Y_lm = 0
Y_l_m = [[0] for l_degree in range(L_maxdegree)]
else:
Y_l_m = [
[0] * (2 * l_degree + 1) for l_degree in range(L_maxdegree)
] # for order l: -l <= m <= l
# convert expressions to spherical coordiantes
if use_theta:
# replace z by cos(theta)
theta = sym.symbols("theta", real=True)
for l_degree in range(L_maxdegree):
for m_order in range(len(P_l_m[l_degree])):
if not isinstance(P_l_m[l_degree][m_order], int):
P_l_m[l_degree][m_order] = P_l_m[l_degree][m_order].subs(
z, sym.cos(theta)
)
## calculate Y_lm
# Y_lm = N * P_lm(cos(theta)) * exp(i*m*phi)
# { sqrt(2) * (-1)^m * N * P_l|m| * sin(|m|*phi) if m < 0
# Y_lm_real = { Y_lm if m = 0
# { sqrt(2) * (-1)^m * N * P_lm * cos(m*phi) if m > 0
for l_degree in range(L_maxdegree):
Y_l_m[l_degree][0] = sym.simplify(
sph_harm_prefactor(l_degree, 0) * P_l_m[l_degree][0]
) # Y_l0
if not zero_m_only:
phi = sym.symbols("phi", real=True)
for l_degree in range(1, L_maxdegree):
# m > 0
for m_order in range(1, l_degree + 1):
Y_l_m[l_degree][m_order] = sym.simplify(
2**0.5
* (-1) ** m_order
* sph_harm_prefactor(l_degree, m_order)
* P_l_m[l_degree][m_order]
* sym.cos(m_order * phi)
)
# m < 0
for m_order in range(1, l_degree + 1):
Y_l_m[l_degree][-m_order] = sym.simplify(
2**0.5
* (-1) ** m_order
* sph_harm_prefactor(l_degree, -m_order)
* P_l_m[l_degree][m_order]
* sym.sin(m_order * phi)
)
# convert expressions to cartesian coordinates
if not use_phi:
# replace phi by atan2(y,x)
x, y = sym.symbols("x y", real=True)
for l_degree in range(L_maxdegree):
for m_order in range(len(Y_l_m[l_degree])):
Y_l_m[l_degree][m_order] = sym.simplify(
Y_l_m[l_degree][m_order].subs(phi, sym.atan2(y, x))
)
return Y_l_m
def get_sph_harm_basis(L_maxdegree: int, zero_m_only: bool = True):
"""Get a function calculating the spherical harmonics basis from z and phi."""
# retrieve equations
Y_lm = real_sph_harm(
L_maxdegree, use_theta=False, use_phi=True, zero_m_only=zero_m_only
)
Y_lm_flat = [Y for Y_l in Y_lm for Y in Y_l]
# convert to pytorch functions
z = sym.symbols("z", real=True)
variables = [z]
if not zero_m_only:
variables.append(sym.symbols("phi", real=True))
modules = {"sin": torch.sin, "cos": torch.cos, "sqrt": torch.sqrt}
sph_funcs = sym.lambdify(variables, Y_lm_flat, modules)
# Return as a single function
# args are either [cosφ] or [cosφ, ϑ]
def basis_fn(*args) -> torch.Tensor:
basis = sph_funcs(*args)
basis[0] = args[0].new_tensor(basis[0]).expand_as(args[0])
return torch.stack(basis, dim=1)
return basis_fn
| 11,335 | 32.838806 | 121 | py |
ocp | ocp-main/ocpmodels/models/gemnet_oc/layers/force_scaler.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import logging
import torch
class ForceScaler:
"""
Scales up the energy and then scales down the forces
to prevent NaNs and infs in calculations using AMP.
Inspired by torch.cuda.amp.GradScaler.
"""
def __init__(
self,
init_scale: float = 2.0**8,
growth_factor: float = 2.0,
backoff_factor: float = 0.5,
growth_interval: int = 2000,
max_force_iters: int = 50,
enabled: bool = True,
) -> None:
self.scale_factor = init_scale
self.growth_factor = growth_factor
self.backoff_factor = backoff_factor
self.growth_interval = growth_interval
self.max_force_iters = max_force_iters
self.enabled = enabled
self.finite_force_results = 0
def scale(self, energy):
return energy * self.scale_factor if self.enabled else energy
def unscale(self, forces):
return forces / self.scale_factor if self.enabled else forces
def calc_forces(self, energy, pos):
energy_scaled = self.scale(energy)
forces_scaled = -torch.autograd.grad(
energy_scaled,
pos,
grad_outputs=torch.ones_like(energy_scaled),
create_graph=True,
)[0]
# (nAtoms, 3)
forces = self.unscale(forces_scaled)
return forces
def calc_forces_and_update(self, energy, pos):
if self.enabled:
found_nans_or_infs = True
force_iters = 0
# Re-calculate forces until everything is nice and finite.
while found_nans_or_infs:
forces = self.calc_forces(energy, pos)
found_nans_or_infs = not torch.all(forces.isfinite())
if found_nans_or_infs:
self.finite_force_results = 0
# Prevent infinite loop
force_iters += 1
if force_iters == self.max_force_iters:
logging.warning(
"Too many non-finite force results in a batch. "
"Breaking scaling loop."
)
break
else:
# Delete graph to save memory
del forces
else:
self.finite_force_results += 1
self.update()
else:
forces = self.calc_forces(energy, pos)
return forces
def update(self) -> None:
if self.finite_force_results == 0:
self.scale_factor *= self.backoff_factor
if self.finite_force_results == self.growth_interval:
self.scale_factor *= self.growth_factor
self.finite_force_results = 0
logging.info(f"finite force step count: {self.finite_force_results}")
logging.info(f"scaling factor: {self.scale_factor}")
| 3,081 | 31.442105 | 77 | py |
ocp | ocp-main/ocpmodels/models/gemnet_oc/layers/spherical_basis.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import torch
from ocpmodels.modules.scaling import ScaleFactor
from .basis_utils import get_sph_harm_basis
from .radial_basis import GaussianBasis, RadialBasis
class CircularBasisLayer(torch.nn.Module):
"""
2D Fourier Bessel Basis
Arguments
---------
num_spherical: int
Number of basis functions. Controls the maximum frequency.
radial_basis: RadialBasis
Radial basis function.
cbf: dict
Name and hyperparameters of the circular basis function.
scale_basis: bool
Whether to scale the basis values for better numerical stability.
"""
def __init__(
self,
num_spherical: int,
radial_basis: RadialBasis,
cbf: dict,
scale_basis: bool = False,
) -> None:
super().__init__()
self.radial_basis = radial_basis
self.scale_basis = scale_basis
if self.scale_basis:
self.scale_cbf = ScaleFactor()
cbf_name = cbf["name"].lower()
cbf_hparams = cbf.copy()
del cbf_hparams["name"]
if cbf_name == "gaussian":
self.cosφ_basis = GaussianBasis(
start=-1, stop=1, num_gaussians=num_spherical, **cbf_hparams
)
elif cbf_name == "spherical_harmonics":
self.cosφ_basis = get_sph_harm_basis(
num_spherical, zero_m_only=True
)
else:
raise ValueError(f"Unknown cosine basis function '{cbf_name}'.")
def forward(self, D_ca, cosφ_cab):
rad_basis = self.radial_basis(D_ca) # (num_edges, num_radial)
cir_basis = self.cosφ_basis(cosφ_cab) # (num_triplets, num_spherical)
if self.scale_basis:
cir_basis = self.scale_cbf(cir_basis)
return rad_basis, cir_basis
# (num_edges, num_radial), (num_triplets, num_spherical)
class SphericalBasisLayer(torch.nn.Module):
"""
3D Fourier Bessel Basis
Arguments
---------
num_spherical: int
Number of basis functions. Controls the maximum frequency.
radial_basis: RadialBasis
Radial basis functions.
sbf: dict
Name and hyperparameters of the spherical basis function.
scale_basis: bool
Whether to scale the basis values for better numerical stability.
"""
def __init__(
self,
num_spherical: int,
radial_basis: RadialBasis,
sbf: dict,
scale_basis: bool = False,
) -> None:
super().__init__()
self.num_spherical = num_spherical
self.radial_basis = radial_basis
self.scale_basis = scale_basis
if self.scale_basis:
self.scale_sbf = ScaleFactor()
sbf_name = sbf["name"].lower()
sbf_hparams = sbf.copy()
del sbf_hparams["name"]
if sbf_name == "spherical_harmonics":
self.spherical_basis = get_sph_harm_basis(
num_spherical, zero_m_only=False
)
elif sbf_name == "legendre_outer":
circular_basis = get_sph_harm_basis(
num_spherical, zero_m_only=True
)
self.spherical_basis = lambda cosφ, ϑ: (
circular_basis(cosφ)[:, :, None]
* circular_basis(torch.cos(ϑ))[:, None, :]
).reshape(cosφ.shape[0], -1)
elif sbf_name == "gaussian_outer":
self.circular_basis = GaussianBasis(
start=-1, stop=1, num_gaussians=num_spherical, **sbf_hparams
)
self.spherical_basis = lambda cosφ, ϑ: (
self.circular_basis(cosφ)[:, :, None]
* self.circular_basis(torch.cos(ϑ))[:, None, :]
).reshape(cosφ.shape[0], -1)
else:
raise ValueError(f"Unknown spherical basis function '{sbf_name}'.")
def forward(self, D_ca, cosφ_cab, θ_cabd):
rad_basis = self.radial_basis(D_ca)
sph_basis = self.spherical_basis(cosφ_cab, θ_cabd)
# (num_quadruplets, num_spherical**2)
if self.scale_basis:
sph_basis = self.scale_sbf(sph_basis)
return rad_basis, sph_basis
# (num_edges, num_radial), (num_quadruplets, num_spherical**2)
| 4,369 | 29.347222 | 79 | py |
ocp | ocp-main/ocpmodels/models/gemnet_oc/layers/interaction_block.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import math
import torch
from ocpmodels.modules.scaling import ScaleFactor
from .atom_update_block import AtomUpdateBlock
from .base_layers import Dense, ResidualLayer
from .efficient import EfficientInteractionBilinear
from .embedding_block import EdgeEmbedding
class InteractionBlock(torch.nn.Module):
"""
Interaction block for GemNet-Q/dQ.
Arguments
---------
emb_size_atom: int
Embedding size of the atoms.
emb_size_edge: int
Embedding size of the edges.
emb_size_trip_in: int
(Down-projected) embedding size of the quadruplet edge embeddings
before the bilinear layer.
emb_size_trip_out: int
(Down-projected) embedding size of the quadruplet edge embeddings
after the bilinear layer.
emb_size_quad_in: int
(Down-projected) embedding size of the quadruplet edge embeddings
before the bilinear layer.
emb_size_quad_out: int
(Down-projected) embedding size of the quadruplet edge embeddings
after the bilinear layer.
emb_size_a2a_in: int
Embedding size in the atom interaction before the bilinear layer.
emb_size_a2a_out: int
Embedding size in the atom interaction after the bilinear layer.
emb_size_rbf: int
Embedding size of the radial basis transformation.
emb_size_cbf: int
Embedding size of the circular basis transformation (one angle).
emb_size_sbf: int
Embedding size of the spherical basis transformation (two angles).
num_before_skip: int
Number of residual blocks before the first skip connection.
num_after_skip: int
Number of residual blocks after the first skip connection.
num_concat: int
Number of residual blocks after the concatenation.
num_atom: int
Number of residual blocks in the atom embedding blocks.
num_atom_emb_layers: int
Number of residual blocks for transforming atom embeddings.
quad_interaction: bool
Whether to use quadruplet interactions.
atom_edge_interaction: bool
Whether to use atom-to-edge interactions.
edge_atom_interaction: bool
Whether to use edge-to-atom interactions.
atom_interaction: bool
Whether to use atom-to-atom interactions.
activation: str
Name of the activation function to use in the dense layers.
"""
def __init__(
self,
emb_size_atom,
emb_size_edge,
emb_size_trip_in,
emb_size_trip_out,
emb_size_quad_in,
emb_size_quad_out,
emb_size_a2a_in,
emb_size_a2a_out,
emb_size_rbf,
emb_size_cbf,
emb_size_sbf,
num_before_skip: int,
num_after_skip: int,
num_concat: int,
num_atom: int,
num_atom_emb_layers: int = 0,
quad_interaction: bool = False,
atom_edge_interaction: bool = False,
edge_atom_interaction: bool = False,
atom_interaction: bool = False,
activation=None,
) -> None:
super().__init__()
## ------------------------ Message Passing ----------------------- ##
# Dense transformation of skip connection
self.dense_ca = Dense(
emb_size_edge,
emb_size_edge,
activation=activation,
bias=False,
)
# Triplet Interaction
self.trip_interaction = TripletInteraction(
emb_size_in=emb_size_edge,
emb_size_out=emb_size_edge,
emb_size_trip_in=emb_size_trip_in,
emb_size_trip_out=emb_size_trip_out,
emb_size_rbf=emb_size_rbf,
emb_size_cbf=emb_size_cbf,
symmetric_mp=True,
swap_output=True,
activation=activation,
)
# Quadruplet Interaction
if quad_interaction:
self.quad_interaction = QuadrupletInteraction(
emb_size_edge=emb_size_edge,
emb_size_quad_in=emb_size_quad_in,
emb_size_quad_out=emb_size_quad_out,
emb_size_rbf=emb_size_rbf,
emb_size_cbf=emb_size_cbf,
emb_size_sbf=emb_size_sbf,
symmetric_mp=True,
activation=activation,
)
else:
self.quad_interaction = None
if atom_edge_interaction:
self.atom_edge_interaction = TripletInteraction(
emb_size_in=emb_size_atom,
emb_size_out=emb_size_edge,
emb_size_trip_in=emb_size_trip_in,
emb_size_trip_out=emb_size_trip_out,
emb_size_rbf=emb_size_rbf,
emb_size_cbf=emb_size_cbf,
symmetric_mp=True,
swap_output=True,
activation=activation,
)
else:
self.atom_edge_interaction = None
if edge_atom_interaction:
self.edge_atom_interaction = TripletInteraction(
emb_size_in=emb_size_edge,
emb_size_out=emb_size_atom,
emb_size_trip_in=emb_size_trip_in,
emb_size_trip_out=emb_size_trip_out,
emb_size_rbf=emb_size_rbf,
emb_size_cbf=emb_size_cbf,
symmetric_mp=False,
swap_output=False,
activation=activation,
)
else:
self.edge_atom_interaction = None
if atom_interaction:
self.atom_interaction = PairInteraction(
emb_size_atom=emb_size_atom,
emb_size_pair_in=emb_size_a2a_in,
emb_size_pair_out=emb_size_a2a_out,
emb_size_rbf=emb_size_rbf,
activation=activation,
)
else:
self.atom_interaction = None
## -------------------- Update Edge Embeddings -------------------- ##
# Residual layers before skip connection
self.layers_before_skip = torch.nn.ModuleList(
[
ResidualLayer(
emb_size_edge,
activation=activation,
)
for i in range(num_before_skip)
]
)
# Residual layers after skip connection
self.layers_after_skip = torch.nn.ModuleList(
[
ResidualLayer(
emb_size_edge,
activation=activation,
)
for i in range(num_after_skip)
]
)
## -------------------- Update Atom Embeddings -------------------- ##
self.atom_emb_layers = torch.nn.ModuleList(
[
ResidualLayer(
emb_size_atom,
activation=activation,
)
for _ in range(num_atom_emb_layers)
]
)
self.atom_update = AtomUpdateBlock(
emb_size_atom=emb_size_atom,
emb_size_edge=emb_size_edge,
emb_size_rbf=emb_size_rbf,
nHidden=num_atom,
activation=activation,
)
## ---------- Update Edge Embeddings with Atom Embeddings --------- ##
self.concat_layer = EdgeEmbedding(
emb_size_atom,
emb_size_edge,
emb_size_edge,
activation=activation,
)
self.residual_m = torch.nn.ModuleList(
[
ResidualLayer(emb_size_edge, activation=activation)
for _ in range(num_concat)
]
)
self.inv_sqrt_2 = 1 / math.sqrt(2.0)
num_eint = 2.0 + quad_interaction + atom_edge_interaction
self.inv_sqrt_num_eint = 1 / math.sqrt(num_eint)
num_aint = 1.0 + edge_atom_interaction + atom_interaction
self.inv_sqrt_num_aint = 1 / math.sqrt(num_aint)
def forward(
self,
h,
m,
bases_qint,
bases_e2e,
bases_a2e,
bases_e2a,
basis_a2a_rad,
basis_atom_update,
edge_index_main,
a2ee2a_graph,
a2a_graph,
id_swap,
trip_idx_e2e,
trip_idx_a2e,
trip_idx_e2a,
quad_idx,
):
"""
Returns
-------
h: torch.Tensor, shape=(nEdges, emb_size_atom)
Atom embeddings.
m: torch.Tensor, shape=(nEdges, emb_size_edge)
Edge embeddings (c->a).
"""
num_atoms = h.shape[0]
# Initial transformation
x_ca_skip = self.dense_ca(m) # (nEdges, emb_size_edge)
x_e2e = self.trip_interaction(
m,
bases_e2e,
trip_idx_e2e,
id_swap,
)
if self.quad_interaction is not None:
x_qint = self.quad_interaction(
m,
bases_qint,
quad_idx,
id_swap,
)
if self.atom_edge_interaction is not None:
x_a2e = self.atom_edge_interaction(
h,
bases_a2e,
trip_idx_a2e,
id_swap,
expand_idx=a2ee2a_graph["edge_index"][0],
)
if self.edge_atom_interaction is not None:
h_e2a = self.edge_atom_interaction(
m,
bases_e2a,
trip_idx_e2a,
id_swap,
idx_agg2=a2ee2a_graph["edge_index"][1],
idx_agg2_inner=a2ee2a_graph["target_neighbor_idx"],
agg2_out_size=num_atoms,
)
if self.atom_interaction is not None:
h_a2a = self.atom_interaction(
h,
basis_a2a_rad,
a2a_graph["edge_index"],
a2a_graph["target_neighbor_idx"],
)
## -------------- Merge Embeddings after interactions ------------- ##
x = x_ca_skip + x_e2e # (nEdges, emb_size_edge)
if self.quad_interaction is not None:
x += x_qint # (nEdges, emb_size_edge)
if self.atom_edge_interaction is not None:
x += x_a2e # (nEdges, emb_size_edge)
x = x * self.inv_sqrt_num_eint
# Merge atom embeddings after interactions
if self.edge_atom_interaction is not None:
h = h + h_e2a # (nEdges, emb_size_edge)
if self.atom_interaction is not None:
h = h + h_a2a # (nEdges, emb_size_edge)
h = h * self.inv_sqrt_num_aint
## -------------------- Update Edge Embeddings -------------------- ##
# Transformations before skip connection
for _, layer in enumerate(self.layers_before_skip):
x = layer(x) # (nEdges, emb_size_edge)
# Skip connection
m = m + x # (nEdges, emb_size_edge)
m = m * self.inv_sqrt_2
# Transformations after skip connection
for _, layer in enumerate(self.layers_after_skip):
m = layer(m) # (nEdges, emb_size_edge)
## -------------------- Update Atom Embeddings -------------------- ##
for layer in self.atom_emb_layers:
h = layer(h) # (nAtoms, emb_size_atom)
h2 = self.atom_update(h, m, basis_atom_update, edge_index_main[1])
# Skip connection
h = h + h2 # (nAtoms, emb_size_atom)
h = h * self.inv_sqrt_2
## ---------- Update Edge Embeddings with Atom Embeddings --------- ##
m2 = self.concat_layer(h, m, edge_index_main)
# (nEdges, emb_size_edge)
for _, layer in enumerate(self.residual_m):
m2 = layer(m2) # (nEdges, emb_size_edge)
# Skip connection
m = m + m2 # (nEdges, emb_size_edge)
m = m * self.inv_sqrt_2
return h, m
class QuadrupletInteraction(torch.nn.Module):
"""
Quadruplet-based message passing block.
Arguments
---------
emb_size_edge: int
Embedding size of the edges.
emb_size_quad_in: int
(Down-projected) embedding size of the quadruplet edge embeddings
before the bilinear layer.
emb_size_quad_out: int
(Down-projected) embedding size of the quadruplet edge embeddings
after the bilinear layer.
emb_size_rbf: int
Embedding size of the radial basis transformation.
emb_size_cbf: int
Embedding size of the circular basis transformation (one angle).
emb_size_sbf: int
Embedding size of the spherical basis transformation (two angles).
symmetric_mp: bool
Whether to use symmetric message passing and
update the edges in both directions.
activation: str
Name of the activation function to use in the dense layers.
"""
def __init__(
self,
emb_size_edge,
emb_size_quad_in,
emb_size_quad_out,
emb_size_rbf,
emb_size_cbf,
emb_size_sbf,
symmetric_mp=True,
activation=None,
) -> None:
super().__init__()
self.symmetric_mp = symmetric_mp
# Dense transformation
self.dense_db = Dense(
emb_size_edge,
emb_size_edge,
activation=activation,
bias=False,
)
# Up projections of basis representations,
# bilinear layer and scaling factors
self.mlp_rbf = Dense(
emb_size_rbf,
emb_size_edge,
activation=None,
bias=False,
)
self.scale_rbf = ScaleFactor()
self.mlp_cbf = Dense(
emb_size_cbf,
emb_size_quad_in,
activation=None,
bias=False,
)
self.scale_cbf = ScaleFactor()
self.mlp_sbf = EfficientInteractionBilinear(
emb_size_quad_in, emb_size_sbf, emb_size_quad_out
)
self.scale_sbf_sum = ScaleFactor()
# combines scaling for bilinear layer and summation
# Down and up projections
self.down_projection = Dense(
emb_size_edge,
emb_size_quad_in,
activation=activation,
bias=False,
)
self.up_projection_ca = Dense(
emb_size_quad_out,
emb_size_edge,
activation=activation,
bias=False,
)
if self.symmetric_mp:
self.up_projection_ac = Dense(
emb_size_quad_out,
emb_size_edge,
activation=activation,
bias=False,
)
self.inv_sqrt_2 = 1 / math.sqrt(2.0)
def forward(
self,
m,
bases,
idx,
id_swap,
):
"""
Returns
-------
m: torch.Tensor, shape=(nEdges, emb_size_edge)
Edge embeddings (c->a).
"""
x_db = self.dense_db(m) # (nEdges, emb_size_edge)
# Transform via radial basis
x_db2 = x_db * self.mlp_rbf(bases["rad"]) # (nEdges, emb_size_edge)
x_db = self.scale_rbf(x_db2, ref=x_db)
# Down project embeddings
x_db = self.down_projection(x_db) # (nEdges, emb_size_quad_in)
# Transform via circular basis
x_db = x_db[idx["triplet_in"]["in"]]
# (num_triplets_int, emb_size_quad_in)
x_db2 = x_db * self.mlp_cbf(bases["cir"])
# (num_triplets_int, emb_size_quad_in)
x_db = self.scale_cbf(x_db2, ref=x_db)
# Transform via spherical basis
x_db = x_db[idx["trip_in_to_quad"]]
# (num_quadruplets, emb_size_quad_in)
x = self.mlp_sbf(bases["sph"], x_db, idx["out"], idx["out_agg"])
# (nEdges, emb_size_quad_out)
x = self.scale_sbf_sum(x, ref=x_db)
# =>
# rbf(d_db)
# cbf(d_ba, angle_abd)
# sbf(d_ca, angle_cab, angle_cabd)
if self.symmetric_mp:
# Upproject embeddings
x_ca = self.up_projection_ca(x) # (nEdges, emb_size_edge)
x_ac = self.up_projection_ac(x) # (nEdges, emb_size_edge)
# Merge interaction of c->a and a->c
x_ac = x_ac[id_swap] # swap to add to edge a->c and not c->a
x_res = x_ca + x_ac
x_res = x_res * self.inv_sqrt_2
return x_res
else:
x_res = self.up_projection_ca(x)
return x_res
class TripletInteraction(torch.nn.Module):
"""
Triplet-based message passing block.
Arguments
---------
emb_size_in: int
Embedding size of the input embeddings.
emb_size_out: int
Embedding size of the output embeddings.
emb_size_trip_in: int
(Down-projected) embedding size of the quadruplet edge embeddings
before the bilinear layer.
emb_size_trip_out: int
(Down-projected) embedding size of the quadruplet edge embeddings
after the bilinear layer.
emb_size_rbf: int
Embedding size of the radial basis transformation.
emb_size_cbf: int
Embedding size of the circular basis transformation (one angle).
symmetric_mp: bool
Whether to use symmetric message passing and
update the edges in both directions.
swap_output: bool
Whether to swap the output embedding directions.
Only relevant if symmetric_mp is False.
activation: str
Name of the activation function to use in the dense layers.
"""
def __init__(
self,
emb_size_in,
emb_size_out,
emb_size_trip_in,
emb_size_trip_out,
emb_size_rbf,
emb_size_cbf,
symmetric_mp=True,
swap_output=True,
activation=None,
) -> None:
super().__init__()
self.symmetric_mp = symmetric_mp
self.swap_output = swap_output
# Dense transformation
self.dense_ba = Dense(
emb_size_in,
emb_size_in,
activation=activation,
bias=False,
)
# Up projections of basis representations, bilinear layer and scaling factors
self.mlp_rbf = Dense(
emb_size_rbf,
emb_size_in,
activation=None,
bias=False,
)
self.scale_rbf = ScaleFactor()
self.mlp_cbf = EfficientInteractionBilinear(
emb_size_trip_in, emb_size_cbf, emb_size_trip_out
)
self.scale_cbf_sum = ScaleFactor()
# combines scaling for bilinear layer and summation
# Down and up projections
self.down_projection = Dense(
emb_size_in,
emb_size_trip_in,
activation=activation,
bias=False,
)
self.up_projection_ca = Dense(
emb_size_trip_out,
emb_size_out,
activation=activation,
bias=False,
)
if self.symmetric_mp:
self.up_projection_ac = Dense(
emb_size_trip_out,
emb_size_out,
activation=activation,
bias=False,
)
self.inv_sqrt_2 = 1 / math.sqrt(2.0)
def forward(
self,
m,
bases,
idx,
id_swap,
expand_idx=None,
idx_agg2=None,
idx_agg2_inner=None,
agg2_out_size=None,
):
"""
Returns
-------
m: torch.Tensor, shape=(nEdges, emb_size_edge)
Edge embeddings.
"""
# Dense transformation
x_ba = self.dense_ba(m) # (nEdges, emb_size_edge)
if expand_idx is not None:
x_ba = x_ba[expand_idx]
# Transform via radial basis
rad_emb = self.mlp_rbf(bases["rad"]) # (nEdges, emb_size_edge)
x_ba2 = x_ba * rad_emb
x_ba = self.scale_rbf(x_ba2, ref=x_ba)
x_ba = self.down_projection(x_ba) # (nEdges, emb_size_trip_in)
# Transform via circular spherical basis
x_ba = x_ba[idx["in"]]
# Efficient bilinear layer
x = self.mlp_cbf(
basis=bases["cir"],
m=x_ba,
idx_agg_outer=idx["out"],
idx_agg_inner=idx["out_agg"],
idx_agg2_outer=idx_agg2,
idx_agg2_inner=idx_agg2_inner,
agg2_out_size=agg2_out_size,
)
# (num_atoms, emb_size_trip_out)
x = self.scale_cbf_sum(x, ref=x_ba)
# =>
# rbf(d_ba)
# cbf(d_ca, angle_cab)
if self.symmetric_mp:
# Up project embeddings
x_ca = self.up_projection_ca(x) # (nEdges, emb_size_edge)
x_ac = self.up_projection_ac(x) # (nEdges, emb_size_edge)
# Merge interaction of c->a and a->c
x_ac = x_ac[id_swap] # swap to add to edge a->c and not c->a
x_res = x_ca + x_ac
x_res = x_res * self.inv_sqrt_2
return x_res
else:
if self.swap_output:
x = x[id_swap]
x_res = self.up_projection_ca(x) # (nEdges, emb_size_edge)
return x_res
class PairInteraction(torch.nn.Module):
"""
Pair-based message passing block.
Arguments
---------
emb_size_atom: int
Embedding size of the atoms.
emb_size_pair_in: int
Embedding size of the atom pairs before the bilinear layer.
emb_size_pair_out: int
Embedding size of the atom pairs after the bilinear layer.
emb_size_rbf: int
Embedding size of the radial basis transformation.
activation: str
Name of the activation function to use in the dense layers.
"""
def __init__(
self,
emb_size_atom,
emb_size_pair_in,
emb_size_pair_out,
emb_size_rbf,
activation=None,
) -> None:
super().__init__()
# Bilinear layer and scaling factor
self.bilinear = Dense(
emb_size_rbf * emb_size_pair_in,
emb_size_pair_out,
activation=None,
bias=False,
)
self.scale_rbf_sum = ScaleFactor()
# Down and up projections
self.down_projection = Dense(
emb_size_atom,
emb_size_pair_in,
activation=activation,
bias=False,
)
self.up_projection = Dense(
emb_size_pair_out,
emb_size_atom,
activation=activation,
bias=False,
)
self.inv_sqrt_2 = 1 / math.sqrt(2.0)
def forward(
self,
h,
rad_basis,
edge_index,
target_neighbor_idx,
):
"""
Returns
-------
h: torch.Tensor, shape=(num_atoms, emb_size_atom)
Atom embeddings.
"""
num_atoms = h.shape[0]
x_b = self.down_projection(h) # (num_atoms, emb_size_edge)
x_ba = x_b[edge_index[0]] # (num_edges, emb_size_edge)
Kmax = torch.max(target_neighbor_idx) + 1
x2 = x_ba.new_zeros(num_atoms, Kmax, x_ba.shape[-1])
x2[edge_index[1], target_neighbor_idx] = x_ba
# (num_atoms, Kmax, emb_size_edge)
x_ba2 = rad_basis @ x2
# (num_atoms, emb_size_interm, emb_size_edge)
h_out = self.bilinear(x_ba2.reshape(num_atoms, -1))
h_out = self.scale_rbf_sum(h_out, ref=x_ba)
# (num_atoms, emb_size_edge)
h_out = self.up_projection(h_out) # (num_atoms, emb_size_atom)
return h_out
| 23,476 | 29.931489 | 85 | py |
ocp | ocp-main/ocpmodels/models/gemnet_oc/layers/__init__.py | 0 | 0 | 0 | py |
|
ocp | ocp-main/ocpmodels/models/gemnet_oc/layers/efficient.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
from typing import Optional
import torch
from torch_scatter import scatter
from ..initializers import he_orthogonal_init
from .base_layers import Dense
class BasisEmbedding(torch.nn.Module):
"""
Embed a basis (CBF, SBF), optionally using the efficient reformulation.
Arguments
---------
num_radial: int
Number of radial basis functions.
emb_size_interm: int
Intermediate embedding size of triplets/quadruplets.
num_spherical: int
Number of circular/spherical basis functions.
Only required if there is a circular/spherical basis.
"""
def __init__(
self,
num_radial: int,
emb_size_interm: int,
num_spherical: Optional[int] = None,
) -> None:
super().__init__()
self.num_radial = num_radial
self.num_spherical = num_spherical
if num_spherical is None:
self.weight = torch.nn.Parameter(
torch.empty(emb_size_interm, num_radial),
requires_grad=True,
)
else:
self.weight = torch.nn.Parameter(
torch.empty(num_radial, num_spherical, emb_size_interm),
requires_grad=True,
)
self.reset_parameters()
def reset_parameters(self) -> None:
he_orthogonal_init(self.weight)
def forward(
self,
rad_basis,
sph_basis=None,
idx_rad_outer=None,
idx_rad_inner=None,
idx_sph_outer=None,
idx_sph_inner=None,
num_atoms=None,
):
"""
Arguments
---------
rad_basis: torch.Tensor, shape=(num_edges, num_radial or num_orders * num_radial)
Raw radial basis.
sph_basis: torch.Tensor, shape=(num_triplets or num_quadruplets, num_spherical)
Raw spherical or circular basis.
idx_rad_outer: torch.Tensor, shape=(num_edges)
Atom associated with each radial basis value.
Optional, used for efficient edge aggregation.
idx_rad_inner: torch.Tensor, shape=(num_edges)
Enumerates radial basis values per atom.
Optional, used for efficient edge aggregation.
idx_sph_outer: torch.Tensor, shape=(num_triplets or num_quadruplets)
Edge associated with each circular/spherical basis value.
Optional, used for efficient triplet/quadruplet aggregation.
idx_sph_inner: torch.Tensor, shape=(num_triplets or num_quadruplets)
Enumerates circular/spherical basis values per edge.
Optional, used for efficient triplet/quadruplet aggregation.
num_atoms: int
Total number of atoms.
Optional, used for efficient edge aggregation.
Returns
-------
rad_W1: torch.Tensor, shape=(num_edges, emb_size_interm, num_spherical)
sph: torch.Tensor, shape=(num_edges, Kmax, num_spherical)
Kmax = maximum number of neighbors of the edges
"""
num_edges = rad_basis.shape[0]
if self.num_spherical is not None:
# MatMul: mul + sum over num_radial
rad_W1 = rad_basis @ self.weight.reshape(self.weight.shape[0], -1)
# (num_edges, emb_size_interm * num_spherical)
rad_W1 = rad_W1.reshape(num_edges, -1, sph_basis.shape[-1])
# (num_edges, emb_size_interm, num_spherical)
else:
# MatMul: mul + sum over num_radial
rad_W1 = rad_basis @ self.weight.T
# (num_edges, emb_size_interm)
if idx_rad_inner is not None:
# Zero padded dense matrix
# maximum number of neighbors
if idx_rad_outer.shape[0] == 0:
# catch empty idx_rad_outer
Kmax = 0
else:
Kmax = torch.max(idx_rad_inner) + 1
rad_W1_padded = rad_W1.new_zeros(
[num_atoms, Kmax] + list(rad_W1.shape[1:])
)
rad_W1_padded[idx_rad_outer, idx_rad_inner] = rad_W1
# (num_atoms, Kmax, emb_size_interm, ...)
rad_W1_padded = torch.transpose(rad_W1_padded, 1, 2)
# (num_atoms, emb_size_interm, Kmax, ...)
rad_W1_padded = rad_W1_padded.reshape(
num_atoms, rad_W1.shape[1], -1
)
# (num_atoms, emb_size_interm, Kmax2 * ...)
rad_W1 = rad_W1_padded
if idx_sph_inner is not None:
# Zero padded dense matrix
# maximum number of neighbors
if idx_sph_outer.shape[0] == 0:
# catch empty idx_sph_outer
Kmax = 0
else:
Kmax = torch.max(idx_sph_inner) + 1
sph2 = sph_basis.new_zeros(num_edges, Kmax, sph_basis.shape[-1])
sph2[idx_sph_outer, idx_sph_inner] = sph_basis
# (num_edges, Kmax, num_spherical)
sph2 = torch.transpose(sph2, 1, 2)
# (num_edges, num_spherical, Kmax)
if sph_basis is None:
return rad_W1
else:
if idx_sph_inner is None:
rad_W1 = rad_W1[idx_sph_outer]
# (num_triplets, emb_size_interm, num_spherical)
sph_W1 = rad_W1 @ sph_basis[:, :, None]
# (num_triplets, emb_size_interm, num_spherical)
return sph_W1.squeeze(-1)
else:
return rad_W1, sph2
class EfficientInteractionBilinear(torch.nn.Module):
"""
Efficient reformulation of the bilinear layer and subsequent summation.
Arguments
---------
emb_size_in: int
Embedding size of input triplets/quadruplets.
emb_size_interm: int
Intermediate embedding size of the basis transformation.
emb_size_out: int
Embedding size of output triplets/quadruplets.
"""
def __init__(
self,
emb_size_in: int,
emb_size_interm: int,
emb_size_out: int,
) -> None:
super().__init__()
self.emb_size_in = emb_size_in
self.emb_size_interm = emb_size_interm
self.emb_size_out = emb_size_out
self.bilinear = Dense(
self.emb_size_in * self.emb_size_interm,
self.emb_size_out,
bias=False,
activation=None,
)
def forward(
self,
basis,
m,
idx_agg_outer,
idx_agg_inner,
idx_agg2_outer=None,
idx_agg2_inner=None,
agg2_out_size=None,
):
"""
Arguments
---------
basis: Tuple (torch.Tensor, torch.Tensor),
shapes=((num_edges, emb_size_interm, num_spherical),
(num_edges, num_spherical, Kmax))
First element: Radial basis multiplied with weight matrix
Second element: Circular/spherical basis
m: torch.Tensor, shape=(num_edges, emb_size_in)
Input edge embeddings
idx_agg_outer: torch.Tensor, shape=(num_triplets or num_quadruplets)
Output edge aggregating this intermediate triplet/quadruplet edge.
idx_agg_inner: torch.Tensor, shape=(num_triplets or num_quadruplets)
Enumerates intermediate edges per output edge.
idx_agg2_outer: torch.Tensor, shape=(num_edges)
Output atom aggregating this edge.
idx_agg2_inner: torch.Tensor, shape=(num_edges)
Enumerates edges per output atom.
agg2_out_size: int
Number of output embeddings when aggregating twice. Typically
the number of atoms.
Returns
-------
m_ca: torch.Tensor, shape=(num_edges, emb_size)
Aggregated edge/atom embeddings.
"""
# num_spherical is actually num_spherical**2 for quadruplets
(rad_W1, sph) = basis
# (num_edges, emb_size_interm, num_spherical),
# (num_edges, num_spherical, Kmax)
num_edges = sph.shape[0]
# Create (zero-padded) dense matrix of the neighboring edge embeddings.
Kmax = torch.max(idx_agg_inner) + 1
m_padded = m.new_zeros(num_edges, Kmax, self.emb_size_in)
m_padded[idx_agg_outer, idx_agg_inner] = m
# (num_quadruplets/num_triplets, emb_size_in) -> (num_edges, Kmax, emb_size_in)
sph_m = torch.matmul(sph, m_padded)
# (num_edges, num_spherical, emb_size_in)
if idx_agg2_outer is not None:
Kmax2 = torch.max(idx_agg2_inner) + 1
sph_m_padded = sph_m.new_zeros(
agg2_out_size, Kmax2, sph_m.shape[1], sph_m.shape[2]
)
sph_m_padded[idx_agg2_outer, idx_agg2_inner] = sph_m
# (num_atoms, Kmax2, num_spherical, emb_size_in)
sph_m_padded = sph_m_padded.reshape(
agg2_out_size, -1, sph_m.shape[-1]
)
# (num_atoms, Kmax2 * num_spherical, emb_size_in)
rad_W1_sph_m = rad_W1 @ sph_m_padded
# (num_atoms, emb_size_interm, emb_size_in)
else:
# MatMul: mul + sum over num_spherical
rad_W1_sph_m = torch.matmul(rad_W1, sph_m)
# (num_edges, emb_size_interm, emb_size_in)
# Bilinear: Sum over emb_size_interm and emb_size_in
m_ca = self.bilinear(
rad_W1_sph_m.reshape(-1, rad_W1_sph_m.shape[1:].numel())
)
# (num_edges/num_atoms, emb_size_out)
return m_ca
| 9,599 | 34.555556 | 89 | py |
ocp | ocp-main/ocpmodels/models/utils/activations.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import torch
import torch.nn.functional as F
class Act(torch.nn.Module):
def __init__(self, act: str, slope: float = 0.05) -> None:
super(Act, self).__init__()
self.act = act
self.slope = slope
self.shift = torch.log(torch.tensor(2.0)).item()
def forward(self, input: torch.Tensor) -> torch.Tensor:
if self.act == "relu":
return F.relu(input)
elif self.act == "leaky_relu":
return F.leaky_relu(input)
elif self.act == "sp":
return F.softplus(input, beta=1)
elif self.act == "leaky_sp":
return F.softplus(input, beta=1) - self.slope * F.relu(-input)
elif self.act == "elu":
return F.elu(input, alpha=1)
elif self.act == "leaky_elu":
return F.elu(input, alpha=1) - self.slope * F.relu(-input)
elif self.act == "ssp":
return F.softplus(input, beta=1) - self.shift
elif self.act == "leaky_ssp":
return (
F.softplus(input, beta=1)
- self.slope * F.relu(-input)
- self.shift
)
elif self.act == "tanh":
return torch.tanh(input)
elif self.act == "leaky_tanh":
return torch.tanh(input) + self.slope * input
elif self.act == "swish":
return torch.sigmoid(input) * input
else:
raise RuntimeError(f"Undefined activation called {self.act}")
| 1,650 | 33.395833 | 74 | py |
ocp | ocp-main/ocpmodels/models/utils/__init__.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
| 177 | 34.6 | 65 | py |
ocp | ocp-main/ocpmodels/models/utils/basis.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import math
from typing import List, Optional
import numpy as np
import torch
import torch.nn as nn
from scipy.special import sph_harm
from torch.nn.init import _calculate_correct_fan
from .activations import Act
class Sine(nn.Module):
def __init__(self, w0: float = 30.0) -> None:
super(Sine, self).__init__()
self.w0 = w0
def forward(self, x: torch.Tensor) -> torch.Tensor:
return torch.sin(self.w0 * x)
class SIREN(nn.Module):
def __init__(
self,
layers: List[int],
num_in_features: int,
out_features: int,
w0: float = 30.0,
initializer="siren",
c: float = 6,
) -> None:
super(SIREN, self).__init__()
self.layers = [nn.Linear(num_in_features, layers[0]), Sine(w0=w0)]
for index in range(len(layers) - 1):
self.layers.extend(
[nn.Linear(layers[index], layers[index + 1]), Sine(w0=1)]
)
self.layers.append(nn.Linear(layers[-1], out_features))
self.network = nn.Sequential(*self.layers)
if initializer is not None and initializer == "siren":
for m in self.network:
if isinstance(m, nn.Linear):
num_input = float(m.weight.size(-1))
with torch.no_grad():
m.weight.uniform_(
-math.sqrt(6.0 / num_input),
math.sqrt(6.0 / num_input),
)
def forward(self, X):
return self.network(X)
class SINESmearing(nn.Module):
def __init__(
self,
num_in_features: int,
num_freqs: int = 40,
use_cosine: bool = False,
) -> None:
super(SINESmearing, self).__init__()
self.num_freqs = num_freqs
self.out_dim: int = num_in_features * self.num_freqs
self.use_cosine = use_cosine
freq = torch.arange(num_freqs).float()
freq = torch.pow(torch.ones_like(freq) * 1.1, freq)
self.freq_filter = nn.Parameter(
freq.view(-1, 1).repeat(1, num_in_features).view(1, -1),
requires_grad=False,
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = x.repeat(1, self.num_freqs)
x = x * self.freq_filter
if self.use_cosine:
return torch.cos(x)
else:
return torch.sin(x)
class GaussianSmearing(nn.Module):
def __init__(
self,
num_in_features: int,
start: int = 0,
end: int = 1,
num_freqs: int = 50,
) -> None:
super(GaussianSmearing, self).__init__()
self.num_freqs = num_freqs
offset = torch.linspace(start, end, num_freqs)
self.coeff: float = -0.5 / (offset[1] - offset[0]).item() ** 2
self.offset = nn.Parameter(
offset.view(-1, 1).repeat(1, num_in_features).view(1, -1),
requires_grad=False,
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = x.repeat(1, self.num_freqs)
x = x - self.offset
return torch.exp(self.coeff * torch.pow(x, 2))
class FourierSmearing(nn.Module):
def __init__(
self,
num_in_features: int,
num_freqs: int = 40,
use_cosine: bool = False,
) -> None:
super(FourierSmearing, self).__init__()
self.num_freqs = num_freqs
self.out_dim: int = num_in_features * self.num_freqs
self.use_cosine = use_cosine
freq = torch.arange(num_freqs).to(torch.float32)
self.freq_filter = nn.Parameter(
freq.view(-1, 1).repeat(1, num_in_features).view(1, -1),
requires_grad=False,
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = x.repeat(1, self.num_freqs)
x = x * self.freq_filter
if self.use_cosine:
return torch.cos(x)
else:
return torch.sin(x)
class Basis(nn.Module):
def __init__(
self,
num_in_features: int,
num_freqs: int = 50,
basis_type: str = "powersine",
act: str = "ssp",
sph: Optional["SphericalSmearing"] = None,
) -> None:
super(Basis, self).__init__()
self.num_freqs = num_freqs
self.basis_type = basis_type
if basis_type == "powersine":
self.smearing = SINESmearing(num_in_features, num_freqs)
self.out_dim = num_in_features * num_freqs
elif basis_type == "powercosine":
self.smearing = SINESmearing(
num_in_features, num_freqs, use_cosine=True
)
self.out_dim = num_in_features * num_freqs
elif basis_type == "fouriersine":
self.smearing = FourierSmearing(num_in_features, num_freqs)
self.out_dim = num_in_features * num_freqs
elif basis_type == "gauss":
self.smearing = GaussianSmearing(
num_in_features, start=0, end=1, num_freqs=num_freqs
)
self.out_dim = num_in_features * num_freqs
elif basis_type == "linact":
self.smearing = torch.nn.Sequential(
torch.nn.Linear(num_in_features, num_freqs * num_in_features),
Act(act),
)
self.out_dim = num_in_features * num_freqs
elif basis_type == "raw" or basis_type == "rawcat":
self.out_dim = num_in_features
elif "sph" in basis_type:
# by default, we use sine function to encode distance
# sph must be given here
assert sph is not None
# assumes the first three columns are normalizaed xyz
# the rest of the columns are distances
if "cat" in basis_type:
# concatenate
self.smearing_sine = SINESmearing(
num_in_features - 3, num_freqs
)
self.out_dim = sph.out_dim + (num_in_features - 3) * num_freqs
elif "mul" in basis_type:
self.smearing_sine = SINESmearing(
num_in_features - 3, num_freqs
)
self.lin = torch.nn.Linear(
self.smearing_sine.out_dim, num_in_features - 3
)
self.out_dim = (num_in_features - 3) * sph.out_dim
elif "m40" in basis_type:
dim = 40
self.smearing_sine = SINESmearing(
num_in_features - 3, num_freqs
)
self.lin = torch.nn.Linear(
self.smearing_sine.out_dim, dim
) # make the output dimensionality comparable.
self.out_dim = dim * sph.out_dim
elif "nosine" in basis_type:
# does not use sine smearing for encoding distance
self.out_dim = (num_in_features - 3) * sph.out_dim
else:
raise ValueError(
"cat or mul not specified for spherical harnomics."
)
else:
raise RuntimeError("Undefined basis type.")
def forward(self, x: torch.Tensor, edge_attr_sph=None):
if "sph" in self.basis_type:
if "nosine" not in self.basis_type:
x_sine = self.smearing_sine(
x[:, 3:]
) # the first three features correspond to edge_vec_normalized, so we ignore
if "cat" in self.basis_type:
# just concatenate spherical edge feature and sined node features
return torch.cat([edge_attr_sph, x_sine], dim=1)
elif "mul" in self.basis_type or "m40" in self.basis_type:
# multiply sined node features into spherical edge feature (inspired by theory in spherical harmonics)
r = self.lin(x_sine)
outer = torch.einsum("ik,ij->ikj", edge_attr_sph, r)
return torch.flatten(outer, start_dim=1)
else:
raise RuntimeError(
f"Unknown basis type called {self.basis_type}"
)
else:
outer = torch.einsum("ik,ij->ikj", edge_attr_sph, x[:, 3:])
return torch.flatten(outer, start_dim=1)
elif "raw" in self.basis_type:
# do nothing, just return node features
pass
else:
x = self.smearing(x)
return x
class SphericalSmearing(nn.Module):
def __init__(self, max_n: int = 10, option: str = "all") -> None:
super(SphericalSmearing, self).__init__()
self.max_n = max_n
m: List[int] = []
n: List[int] = []
for i in range(max_n):
for j in range(0, i + 1):
n.append(i)
m.append(j)
m = np.array(m)
n = np.array(n)
if option == "all":
self.m = m
self.n = n
elif option == "sine":
self.m = m[n % 2 == 1]
self.n = n[n % 2 == 1]
elif option == "cosine":
self.m = m[n % 2 == 0]
self.n = n[n % 2 == 0]
self.out_dim = int(np.sum(self.m == 0) + 2 * np.sum(self.m != 0))
def forward(self, xyz) -> torch.Tensor:
# assuming input is already normalized
assert xyz.size(1) == 3
xyz = xyz / xyz.norm(dim=-1).view(-1, 1)
phi = torch.acos(xyz[:, 2])
theta = torch.atan2(-xyz[:, 1], -xyz[:, 0]) + math.pi
phi = phi.cpu().numpy()
theta = theta.cpu().numpy()
m_tile = np.tile(self.m, (len(xyz), 1))
n_tile = np.tile(self.n, (len(xyz), 1))
theta_tile = np.tile(theta.reshape(len(xyz), 1), (1, len(self.m)))
phi_tile = np.tile(phi.reshape(len(xyz), 1), (1, len(self.m)))
harm = sph_harm(m_tile, n_tile, theta_tile, phi_tile)
harm_mzero = harm[:, self.m == 0]
harm_mnonzero = harm[:, self.m != 0]
harm_real = np.concatenate(
[harm_mzero.real, harm_mnonzero.real, harm_mnonzero.imag], axis=1
)
return torch.from_numpy(harm_real).to(torch.float32).to(xyz.device)
| 10,366 | 32.659091 | 122 | py |
ocp | ocp-main/ocpmodels/datasets/oc22_lmdb_dataset.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import bisect
import logging
import math
import pickle
import random
import warnings
from pathlib import Path
import lmdb
import numpy as np
import torch
from torch.utils.data import Dataset
from torch_geometric.data import Batch
from ocpmodels.common import distutils
from ocpmodels.common.registry import registry
from ocpmodels.common.utils import pyg2_data_transform
@registry.register_dataset("oc22_lmdb")
class OC22LmdbDataset(Dataset):
r"""Dataset class to load from LMDB files containing relaxation
trajectories or single point computations.
Useful for Structure to Energy & Force (S2EF), Initial State to
Relaxed State (IS2RS), and Initial State to Relaxed Energy (IS2RE) tasks.
The keys in the LMDB must be integers (stored as ascii objects) starting
from 0 through the length of the LMDB. For historical reasons any key named
"length" is ignored since that was used to infer length of many lmdbs in the same
folder, but lmdb lengths are now calculated directly from the number of keys.
Args:
config (dict): Dataset configuration
transform (callable, optional): Data transform function.
(default: :obj:`None`)
"""
def __init__(self, config, transform=None) -> None:
super(OC22LmdbDataset, self).__init__()
self.config = config
self.path = Path(self.config["src"])
self.data2train = self.config.get("data2train", "all")
if not self.path.is_file():
db_paths = sorted(self.path.glob("*.lmdb"))
assert len(db_paths) > 0, f"No LMDBs found in '{self.path}'"
self.metadata_path = self.path / "metadata.npz"
self._keys, self.envs = [], []
for db_path in db_paths:
cur_env = self.connect_db(db_path)
self.envs.append(cur_env)
# Get the number of stores data from the number of entries
# in the LMDB
num_entries = cur_env.stat()["entries"]
# If "length" encoded as ascii is present, we have one fewer
# data than the stats suggest
if cur_env.begin().get("length".encode("ascii")) is not None:
num_entries -= 1
# Append the keys (0->num_entries) as a list
self._keys.append(list(range(num_entries)))
keylens = [len(k) for k in self._keys]
self._keylen_cumulative = np.cumsum(keylens).tolist()
self.num_samples = sum(keylens)
if self.data2train != "all":
txt_paths = sorted(self.path.glob("*.txt"))
index = 0
self.indices = []
for txt_path in txt_paths:
lines = open(txt_path).read().splitlines()
for line in lines:
if self.data2train == "adslabs":
if "clean" not in line:
self.indices.append(index)
if self.data2train == "slabs":
if "clean" in line:
self.indices.append(index)
index += 1
self.num_samples = len(self.indices)
else:
self.metadata_path = self.path.parent / "metadata.npz"
self.env = self.connect_db(self.path)
num_entries = self.env.stat()["entries"]
# If "length" encoded as ascii is present, we have one fewer
# data than the stats suggest
if self.env.begin().get("length".encode("ascii")) is not None:
num_entries -= 1
self._keys = list(range(num_entries))
self.num_samples = num_entries
self.transform = transform
self.lin_ref = self.oc20_ref = False
# only needed for oc20 datasets, oc22 is total by default
self.train_on_oc20_total_energies = self.config.get(
"train_on_oc20_total_energies", False
)
if self.train_on_oc20_total_energies:
self.oc20_ref = pickle.load(open(config["oc20_ref"], "rb"))
if self.config.get("lin_ref", False):
coeff = np.load(self.config["lin_ref"], allow_pickle=True)["coeff"]
self.lin_ref = torch.nn.Parameter(
torch.tensor(coeff), requires_grad=False
)
self.subsample = self.config.get("subsample", False)
def __len__(self):
if self.subsample:
return min(self.subsample, self.num_samples)
return self.num_samples
def __getitem__(self, idx):
if self.data2train != "all":
idx = self.indices[idx]
if not self.path.is_file():
# Figure out which db this should be indexed from.
db_idx = bisect.bisect(self._keylen_cumulative, idx)
# Extract index of element within that db.
el_idx = idx
if db_idx != 0:
el_idx = idx - self._keylen_cumulative[db_idx - 1]
assert el_idx >= 0
# Return features.
datapoint_pickled = (
self.envs[db_idx]
.begin()
.get(f"{self._keys[db_idx][el_idx]}".encode("ascii"))
)
data_object = pyg2_data_transform(pickle.loads(datapoint_pickled))
data_object.id = f"{db_idx}_{el_idx}"
else:
datapoint_pickled = self.env.begin().get(
f"{self._keys[idx]}".encode("ascii")
)
data_object = pyg2_data_transform(pickle.loads(datapoint_pickled))
if self.transform is not None:
data_object = self.transform(data_object)
# make types consistent
sid = data_object.sid
if isinstance(sid, torch.Tensor):
sid = sid.item()
data_object.sid = sid
if "fid" in data_object:
fid = data_object.fid
if isinstance(fid, torch.Tensor):
fid = fid.item()
data_object.fid = fid
if hasattr(data_object, "y_relaxed"):
attr = "y_relaxed"
elif hasattr(data_object, "y"):
attr = "y"
# if targets are not available, test data is being used
else:
return data_object
# convert s2ef energies to raw energies
if attr == "y":
# OC20 data
if "oc22" not in data_object:
assert self.config.get(
"train_on_oc20_total_energies", False
), "To train OC20 or OC22+OC20 on total energies set train_on_oc20_total_energies=True"
randomid = f"random{sid}"
data_object[attr] += self.oc20_ref[randomid]
data_object.nads = 1
data_object.oc22 = 0
# convert is2re energies to raw energies
else:
if "oc22" not in data_object:
assert self.config.get(
"train_on_oc20_total_energies", False
), "To train OC20 or OC22+OC20 on total energies set train_on_oc20_total_energies=True"
randomid = f"random{sid}"
data_object[attr] += self.oc20_ref[randomid]
del data_object.force
del data_object.y_init
data_object.nads = 1
data_object.oc22 = 0
if self.lin_ref is not False:
lin_energy = sum(self.lin_ref[data_object.atomic_numbers.long()])
data_object[attr] -= lin_energy
# to jointly train on oc22+oc20, need to delete these oc20-only attributes
# ensure otf_graph=1 in your model configuration
if "edge_index" in data_object:
del data_object.edge_index
if "cell_offsets" in data_object:
del data_object.cell_offsets
if "distances" in data_object:
del data_object.distances
return data_object
def connect_db(self, lmdb_path=None):
env = lmdb.open(
str(lmdb_path),
subdir=False,
readonly=True,
lock=False,
readahead=True,
meminit=False,
max_readers=1,
)
return env
def close_db(self) -> None:
if not self.path.is_file():
for env in self.envs:
env.close()
else:
self.env.close()
| 8,611 | 35.961373 | 103 | py |
ocp | ocp-main/ocpmodels/datasets/lmdb_database.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is modified from the ASE db json backend
and is thus licensed under the corresponding LGPL2.1 license
The ASE notice for the LGPL2.1 license is available here:
https://gitlab.com/ase/ase/-/blob/master/LICENSE
"""
import base64
import json
import os
import sys
import zlib
from contextlib import ExitStack
from typing import Optional
import lmdb
import numpy as np
import orjson
from ase.db.core import Database, lock, now, ops
from ase.db.row import AtomsRow
from ase.io.jsonio import decode, encode
# These are special keys in the ASE LMDB that hold
# metadata and other info
RESERVED_KEYS = ["nextid", "metadata", "deleted_ids"]
class LMDBDatabase(Database):
def __enter__(self) -> "LMDBDatabase":
return self
def __init__(
self,
filename: Optional[str] = None,
create_indices: bool = True,
use_lock_file: bool = False,
serial: bool = False,
readonly: bool = False,
*args,
**kwargs,
) -> None:
"""
For the most part, this is identical to the standard ase db initiation
arguments, except that we add a readonly flag.
"""
super().__init__(
filename, create_indices, use_lock_file, serial, *args, **kwargs
)
# Add a readonly mode for when we're only training
# to make sure there's no parallel locks
self.readonly = readonly
if self.readonly:
# Open a new env
self.env = lmdb.open(
self.filename,
subdir=False,
meminit=False,
map_async=True,
readonly=True,
lock=False,
)
# Open a transaction and keep it open for fast read/writes!
self.txn = self.env.begin(write=False)
else:
# Open a new env with write access
self.env = lmdb.open(
self.filename,
map_size=1099511627776 * 2,
subdir=False,
meminit=False,
map_async=True,
)
self.txn = self.env.begin(write=True)
# Load all ids based on keys in the DB.
self._load_ids()
return
def __exit__(self, exc_type, exc_value, tb) -> None:
self.close()
def close(self) -> None:
# Close the lmdb environment and transaction
self.txn.commit()
self.env.close()
def _write(self, atoms, key_value_pairs, data, id):
Database._write(self, atoms, key_value_pairs, data)
mtime = now()
if isinstance(atoms, AtomsRow):
row = atoms
else:
row = AtomsRow(atoms)
row.ctime = mtime
row.user = os.getenv("USER")
dct = {}
for key in row.__dict__:
if key[0] == "_" or key in row._keys or key == "id":
continue
dct[key] = row[key]
dct["mtime"] = mtime
if key_value_pairs:
dct["key_value_pairs"] = key_value_pairs
if data:
dct["data"] = data
constraints = row.get("constraints")
if constraints:
dct["constraints"] = [
constraint.todict() for constraint in constraints
]
# json doesn't like Cell objects, so make it a cell
dct["cell"] = np.asarray(dct["cell"])
if id is None:
nextid = self._get_nextid()
id = nextid
nextid += 1
else:
data = self.txn.get("{id}".encode("ascii"))
assert data is not None
# Add the new entry, then add the id and write the nextid
self.txn.put(
f"{id}".encode("ascii"),
zlib.compress(
orjson.dumps(dct, option=orjson.OPT_SERIALIZE_NUMPY)
),
)
self.ids.append(id)
self.txn.put(
"nextid".encode("ascii"),
zlib.compress(
orjson.dumps(nextid, option=orjson.OPT_SERIALIZE_NUMPY)
),
)
return id
def delete(self, ids) -> None:
for id in ids:
self.txn.delete(f"{id}".encode("ascii"))
self.ids.remove(id)
self.deleted_ids += ids
self.txn.put(
"deleted_ids".encode("ascii"),
zlib.compress(
orjson.dumps(
self.deleted_ids, option=orjson.OPT_SERIALIZE_NUMPY
)
),
)
def _get_row(self, id, include_data: bool = True):
if id is None:
assert len(self.ids) == 1
id = self.ids[0]
data = self.txn.get(f"{id}".encode("ascii"))
if data is not None:
dct = orjson.loads(zlib.decompress(data))
else:
raise KeyError(f"Id {id} missing from the database!")
if not include_data:
dct.pop("data", None)
dct["id"] = id
return AtomsRow(dct)
def _get_row_by_index(self, index: int, include_data: bool = True):
"""Auxiliary function to get the ith entry, rather than
a specific id
"""
id = self.ids[index]
data = self.txn.get(f"{id}".encode("ascii"))
if data is not None:
dct = orjson.loads(zlib.decompress(data))
else:
raise KeyError(f"Id {id} missing from the database!")
if not include_data:
dct.pop("data", None)
dct["id"] = id
return AtomsRow(dct)
def _select(
self,
keys,
cmps,
explain: bool = False,
verbosity: int = 0,
limit=None,
offset: int = 0,
sort=None,
include_data: bool = True,
columns: str = "all",
):
if explain:
yield {"explain": (0, 0, 0, "scan table")}
return
if sort:
if sort[0] == "-":
reverse = True
sort = sort[1:]
else:
reverse = False
def f(row):
return row.get(sort, missing)
rows = []
missing = []
for row in self._select(keys, cmps):
key = row.get(sort)
if key is None:
missing.append((0, row))
else:
rows.append((key, row))
rows.sort(reverse=reverse, key=lambda x: x[0])
rows += missing
if limit:
rows = rows[offset : offset + limit]
for key, row in rows:
yield row
return
if not limit:
limit = -offset - 1
cmps = [(key, ops[op], val) for key, op, val in cmps]
n = 0
for id in self.ids:
if n - offset == limit:
return
row = self._get_row(id, include_data=False)
for key in keys:
if key not in row:
break
else:
for key, op, val in cmps:
if isinstance(key, int):
value = np.equal(row.numbers, key).sum()
else:
value = row.get(key)
if key == "pbc":
assert op in [ops["="], ops["!="]]
value = "".join("FT"[x] for x in value)
if value is None or not op(value, val):
break
else:
if n >= offset:
yield row
n += 1
@property
def metadata(self):
"""Load the metadata from the DB if present"""
if self._metadata is None:
metadata = self.txn.get("metadata".encode("ascii"))
if metadata is None:
self._metadata = {}
else:
self._metadata = orjson.loads(zlib.decompress(metadata))
return self._metadata.copy()
@metadata.setter
def metadata(self, dct):
self._metadata = dct
# Put the updated metadata dictionary
self.txn.put(
"metadata".encode("ascii"),
zlib.compress(
orjson.dumps(dct, option=orjson.OPT_SERIALIZE_NUMPY)
),
)
def _get_nextid(self):
"""Get the id of the next row to be written"""
# Get the nextid
nextid_data = self.txn.get("nextid".encode("ascii"))
if nextid_data is not None:
nextid = orjson.loads(zlib.decompress(nextid_data))
else:
# This db is empty; start at 1!
nextid = 1
return nextid
def count(self, selection=None, **kwargs) -> int:
"""Count rows.
See the select() method for the selection syntax. Use db.count() or
len(db) to count all rows.
"""
if selection is not None:
n = 0
for row in self.select(selection, **kwargs):
n += 1
return n
else:
# Fast count if there's no queries! Just get number of ids
return len(self.ids)
def _load_ids(self) -> None:
"""Load ids from the DB
Since ASE db ids are mostly 1-N integers, but can be missing entries
if ids have been deleted. To save space and operating under the assumption
that there will probably not be many deletions in most OCP datasets,
we just store the deleted ids.
"""
# Load the deleted ids
deleted_ids_data = self.txn.get("deleted_ids".encode("ascii"))
if deleted_ids_data is None:
self.deleted_ids = []
else:
self.deleted_ids = orjson.loads(zlib.decompress(deleted_ids_data))
# Reconstruct the full id list
self.ids = [
i
for i in range(1, self._get_nextid())
if i not in set(self.deleted_ids)
]
| 10,029 | 27.413598 | 82 | py |
ocp | ocp-main/ocpmodels/datasets/lmdb_dataset.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import bisect
import logging
import math
import pickle
import random
import warnings
from pathlib import Path
from typing import Optional, TypeVar
import lmdb
import numpy as np
import torch
from torch.utils.data import Dataset
from torch_geometric.data import Batch
from torch_geometric.data.data import BaseData
from ocpmodels.common import distutils
from ocpmodels.common.registry import registry
from ocpmodels.common.typing import assert_is_instance
from ocpmodels.common.utils import pyg2_data_transform
from ocpmodels.datasets.target_metadata_guesser import guess_property_metadata
T_co = TypeVar("T_co", covariant=True)
@registry.register_dataset("lmdb")
@registry.register_dataset("single_point_lmdb")
@registry.register_dataset("trajectory_lmdb")
class LmdbDataset(Dataset[T_co]):
r"""Dataset class to load from LMDB files containing relaxation
trajectories or single point computations.
Useful for Structure to Energy & Force (S2EF), Initial State to
Relaxed State (IS2RS), and Initial State to Relaxed Energy (IS2RE) tasks.
The keys in the LMDB must be integers (stored as ascii objects) starting
from 0 through the length of the LMDB. For historical reasons any key named
"length" is ignored since that was used to infer length of many lmdbs in the same
folder, but lmdb lengths are now calculated directly from the number of keys.
Args:
config (dict): Dataset configuration
transform (callable, optional): Data transform function.
(default: :obj:`None`)
"""
def __init__(self, config, transform=None) -> None:
super(LmdbDataset, self).__init__()
self.config = config
assert not self.config.get(
"train_on_oc20_total_energies", False
), "For training on total energies set dataset=oc22_lmdb"
self.path = Path(self.config["src"])
if not self.path.is_file():
db_paths = sorted(self.path.glob("*.lmdb"))
assert len(db_paths) > 0, f"No LMDBs found in '{self.path}'"
self.metadata_path = self.path / "metadata.npz"
self._keys = []
self.envs = []
for db_path in db_paths:
cur_env = self.connect_db(db_path)
self.envs.append(cur_env)
# If "length" encoded as ascii is present, use that
length_entry = cur_env.begin().get("length".encode("ascii"))
if length_entry is not None:
num_entries = pickle.loads(length_entry)
else:
# Get the number of stores data from the number of entries
# in the LMDB
num_entries = cur_env.stat()["entries"]
# Append the keys (0->num_entries) as a list
self._keys.append(list(range(num_entries)))
keylens = [len(k) for k in self._keys]
self._keylen_cumulative = np.cumsum(keylens).tolist()
self.num_samples = sum(keylens)
else:
self.metadata_path = self.path.parent / "metadata.npz"
self.env = self.connect_db(self.path)
# If "length" encoded as ascii is present, use that
length_entry = self.env.begin().get("length".encode("ascii"))
if length_entry is not None:
num_entries = pickle.loads(length_entry)
else:
# Get the number of stores data from the number of entries
# in the LMDB
num_entries = assert_is_instance(
self.env.stat()["entries"], int
)
self._keys = list(range(num_entries))
self.num_samples = num_entries
# If specified, limit dataset to only a portion of the entire dataset
# total_shards: defines total chunks to partition dataset
# shard: defines dataset shard to make visible
self.sharded = False
if "shard" in self.config and "total_shards" in self.config:
self.sharded = True
self.indices = range(self.num_samples)
# split all available indices into 'total_shards' bins
self.shards = np.array_split(
self.indices, self.config.get("total_shards", 1)
)
# limit each process to see a subset of data based off defined shard
self.available_indices = self.shards[self.config.get("shard", 0)]
self.num_samples = len(self.available_indices)
self.transform = transform
def __len__(self) -> int:
return self.num_samples
def __getitem__(self, idx: int):
# if sharding, remap idx to appropriate idx of the sharded set
if self.sharded:
idx = self.available_indices[idx]
if not self.path.is_file():
# Figure out which db this should be indexed from.
db_idx = bisect.bisect(self._keylen_cumulative, idx)
# Extract index of element within that db.
el_idx = idx
if db_idx != 0:
el_idx = idx - self._keylen_cumulative[db_idx - 1]
assert el_idx >= 0
# Return features.
datapoint_pickled = (
self.envs[db_idx]
.begin()
.get(f"{self._keys[db_idx][el_idx]}".encode("ascii"))
)
data_object = pyg2_data_transform(pickle.loads(datapoint_pickled))
data_object.id = f"{db_idx}_{el_idx}"
else:
datapoint_pickled = self.env.begin().get(
f"{self._keys[idx]}".encode("ascii")
)
data_object = pyg2_data_transform(pickle.loads(datapoint_pickled))
if self.transform is not None:
data_object = self.transform(data_object)
return data_object
def connect_db(self, lmdb_path: Optional[Path] = None):
env = lmdb.open(
str(lmdb_path),
subdir=False,
readonly=True,
lock=False,
readahead=True,
meminit=False,
max_readers=1,
)
return env
def close_db(self) -> None:
if not self.path.is_file():
for env in self.envs:
env.close()
else:
self.env.close()
def get_metadata(self, num_samples: int = 100):
# This will interogate the classic OCP LMDB format to determine
# which properties are present and attempt to guess their shapes
# and whether they are intensive or extensive.
# Grab an example data point
example_pyg_data = self.__getitem__(0)
# Check for all properties we've used for OCP datasets in the past
props = []
for potential_prop in [
"y",
"y_relaxed",
"stress",
"stresses",
"force",
"forces",
]:
if hasattr(example_pyg_data, potential_prop):
props.append(potential_prop)
# Get a bunch of random data samples and the number of atoms
sample_pyg = [
self[i]
for i in np.random.choice(
self.__len__(), size=(num_samples,), replace=False
)
]
atoms_lens = [data.natoms for data in sample_pyg]
# Guess the metadata for targets for each found property
metadata = {
"targets": {
prop: guess_property_metadata(
atoms_lens, [getattr(data, prop) for data in sample_pyg]
)
for prop in props
}
}
return metadata
class SinglePointLmdbDataset(LmdbDataset):
def __init__(self, config, transform=None) -> None:
super(SinglePointLmdbDataset, self).__init__(config, transform)
warnings.warn(
"SinglePointLmdbDataset is deprecated and will be removed in the future."
"Please use 'LmdbDataset' instead.",
stacklevel=3,
)
class TrajectoryLmdbDataset(LmdbDataset):
def __init__(self, config, transform=None) -> None:
super(TrajectoryLmdbDataset, self).__init__(config, transform)
warnings.warn(
"TrajectoryLmdbDataset is deprecated and will be removed in the future."
"Please use 'LmdbDataset' instead.",
stacklevel=3,
)
def data_list_collater(data_list, otf_graph: bool = False) -> BaseData:
batch = Batch.from_data_list(data_list)
if not otf_graph:
try:
n_neighbors = []
for _, data in enumerate(data_list):
n_index = data.edge_index[1, :]
n_neighbors.append(n_index.shape[0])
batch.neighbors = torch.tensor(n_neighbors)
except (NotImplementedError, TypeError):
logging.warning(
"LMDB does not contain edge index information, set otf_graph=True"
)
return batch
| 9,163 | 35.07874 | 85 | py |
ocp | ocp-main/ocpmodels/datasets/ase_datasets.py | import bisect
import copy
import functools
import glob
import logging
import os
import warnings
from pathlib import Path
from abc import ABC, abstractmethod
from typing import List
import ase
import numpy as np
from torch import tensor
from torch.utils.data import Dataset
from tqdm import tqdm
from ocpmodels.common.registry import registry
from ocpmodels.datasets.lmdb_database import LMDBDatabase
from ocpmodels.datasets.target_metadata_guesser import guess_property_metadata
from ocpmodels.preprocessing import AtomsToGraphs
def apply_one_tags(
atoms, skip_if_nonzero: bool = True, skip_always: bool = False
):
"""
This function will apply tags of 1 to an ASE atoms object.
It is used as an atoms_transform in the datasets contained in this file.
Certain models will treat atoms differently depending on their tags.
For example, GemNet-OC by default will only compute triplet and quadruplet interactions
for atoms with non-zero tags. This model throws an error if there are no tagged atoms.
For this reason, the default behavior is to tag atoms in structures with no tags.
args:
skip_if_nonzero (bool): If at least one atom has a nonzero tag, do not tag any atoms
skip_always (bool): Do not apply any tags. This arg exists so that this function can be disabled
without needing to pass a callable (which is currently difficult to do with main.py)
"""
if skip_always:
return atoms
if np.all(atoms.get_tags() == 0) or not skip_if_nonzero:
atoms.set_tags(np.ones(len(atoms)))
return atoms
class AseAtomsDataset(Dataset, ABC):
"""
This is an abstract Dataset that includes helpful utilities for turning
ASE atoms objects into OCP-usable data objects. This should not be instantiated directly
as get_atoms_object and load_dataset_get_ids are not implemented in this base class.
Derived classes must add at least two things:
self.get_atoms_object(id): a function that takes an identifier and returns a corresponding atoms object
self.load_dataset_get_ids(config: dict): This function is responsible for any initialization/loads
of the dataset and importantly must return a list of all possible identifiers that can be passed into
self.get_atoms_object(id)
Identifiers need not be any particular type.
"""
def __init__(
self, config, transform=None, atoms_transform=apply_one_tags
) -> None:
self.config = config
a2g_args = config.get("a2g_args", {})
# Make sure we always include PBC info in the resulting atoms objects
a2g_args["r_pbc"] = True
self.a2g = AtomsToGraphs(**a2g_args)
self.transform = transform
self.atoms_transform = atoms_transform
if self.config.get("keep_in_memory", False):
self.__getitem__ = functools.cache(self.__getitem__)
# Derived classes should extend this functionality to also create self.ids,
# a list of identifiers that can be passed to get_atoms_object()
self.ids = self.load_dataset_get_ids(config)
def __len__(self) -> int:
return len(self.ids)
def __getitem__(self, idx):
# Handle slicing
if isinstance(idx, slice):
return [self[i] for i in range(*idx.indices(len(self.ids)))]
# Get atoms object via derived class method
atoms = self.get_atoms_object(self.ids[idx])
# Transform atoms object
if self.atoms_transform is not None:
atoms = self.atoms_transform(
atoms, **self.config.get("atoms_transform_args", {})
)
if "sid" in atoms.info:
sid = atoms.info["sid"]
else:
sid = tensor([idx])
# Convert to data object
data_object = self.a2g.convert(atoms, sid)
data_object.pbc = tensor(atoms.pbc)
# Transform data object
if self.transform is not None:
data_object = self.transform(
data_object, **self.config.get("transform_args", {})
)
return data_object
@abstractmethod
def get_atoms_object(self, identifier):
# This function should return an ASE atoms object.
raise NotImplementedError(
"Returns an ASE atoms object. Derived classes should implement this function."
)
@abstractmethod
def load_dataset_get_ids(self, config):
# This function should return a list of ids that can be used to index into the database
raise NotImplementedError(
"Every ASE dataset needs to declare a function to load the dataset and return a list of ids."
)
def close_db(self) -> None:
# This method is sometimes called by a trainer
pass
def guess_target_metadata(self, num_samples: int = 100):
metadata = {}
if num_samples < len(self):
metadata["targets"] = guess_property_metadata(
[
self.get_atoms_object(self.ids[idx])
for idx in np.random.choice(
len(self), size=(num_samples,), replace=False
)
]
)
else:
metadata["targets"] = guess_property_metadata(
[
self.get_atoms_object(self.ids[idx])
for idx in range(len(self))
]
)
return metadata
def get_metadata(self):
return self.guess_target_metadata()
@registry.register_dataset("ase_read")
class AseReadDataset(AseAtomsDataset):
"""
This Dataset uses ase.io.read to load data from a directory on disk.
This is intended for small-scale testing and demonstrations of OCP.
Larger datasets are better served by the efficiency of other dataset types
such as LMDB.
For a full list of ASE-readable filetypes, see
https://wiki.fysik.dtu.dk/ase/ase/io/io.html
args:
config (dict):
src (str): The source folder that contains your ASE-readable files
pattern (str): Filepath matching each file you want to read
ex. "*/POSCAR", "*.cif", "*.xyz"
search recursively with two wildcards: "**/POSCAR" or "**/*.cif"
a2g_args (dict): Keyword arguments for ocpmodels.preprocessing.AtomsToGraphs()
default options will work for most users
If you are using this for a training dataset, set
"r_energy":True and/or "r_forces":True as appropriate
In that case, energy/forces must be in the files you read (ex. OUTCAR)
ase_read_args (dict): Keyword arguments for ase.io.read()
keep_in_memory (bool): Store data in memory. This helps avoid random reads if you need
to iterate over a dataset many times (e.g. training for many epochs).
Not recommended for large datasets.
atoms_transform_args (dict): Additional keyword arguments for the atoms_transform callable
transform_args (dict): Additional keyword arguments for the transform callable
atoms_transform (callable, optional): Additional preprocessing function applied to the Atoms
object. Useful for applying tags, for example.
transform (callable, optional): Additional preprocessing function for the Data object
"""
def load_dataset_get_ids(self, config) -> List[Path]:
self.ase_read_args = config.get("ase_read_args", {})
if ":" in self.ase_read_args.get("index", ""):
raise NotImplementedError(
"To read multiple structures from a single file, please use AseReadMultiStructureDataset."
)
self.path = Path(config["src"])
if self.path.is_file():
raise Exception("The specified src is not a directory")
return list(self.path.glob(f'{config["pattern"]}'))
def get_atoms_object(self, identifier):
try:
atoms = ase.io.read(identifier, **self.ase_read_args)
except Exception as err:
warnings.warn(f"{err} occured for: {identifier}")
raise err
return atoms
@registry.register_dataset("ase_read_multi")
class AseReadMultiStructureDataset(AseAtomsDataset):
"""
This Dataset can read multiple structures from each file using ase.io.read.
The disadvantage is that all files must be read at startup.
This is a significant cost for large datasets.
This is intended for small-scale testing and demonstrations of OCP.
Larger datasets are better served by the efficiency of other dataset types
such as LMDB.
For a full list of ASE-readable filetypes, see
https://wiki.fysik.dtu.dk/ase/ase/io/io.html
args:
config (dict):
src (str): The source folder that contains your ASE-readable files
pattern (str): Filepath matching each file you want to read
ex. "*.traj", "*.xyz"
search recursively with two wildcards: "**/POSCAR" or "**/*.cif"
index_file (str): Filepath to an indexing file, which contains each filename
and the number of structures contained in each file. For instance:
/path/to/relaxation1.traj 200
/path/to/relaxation2.traj 150
This will overrule the src and pattern that you specify!
a2g_args (dict): Keyword arguments for ocpmodels.preprocessing.AtomsToGraphs()
default options will work for most users
If you are using this for a training dataset, set
"r_energy":True and/or "r_forces":True as appropriate
In that case, energy/forces must be in the files you read (ex. OUTCAR)
ase_read_args (dict): Keyword arguments for ase.io.read()
keep_in_memory (bool): Store data in memory. This helps avoid random reads if you need
to iterate over a dataset many times (e.g. training for many epochs).
Not recommended for large datasets.
use_tqdm (bool): Use TQDM progress bar when initializing dataset
atoms_transform_args (dict): Additional keyword arguments for the atoms_transform callable
transform_args (dict): Additional keyword arguments for the transform callable
atoms_transform (callable, optional): Additional preprocessing function applied to the Atoms
object. Useful for applying tags, for example.
transform (callable, optional): Additional preprocessing function for the Data object
"""
def load_dataset_get_ids(self, config):
self.ase_read_args = config.get("ase_read_args", {})
if not hasattr(self.ase_read_args, "index"):
self.ase_read_args["index"] = ":"
if config.get("index_file", None) is not None:
f = open(config["index_file"], "r")
index = f.readlines()
ids = []
for line in index:
filename = line.split(" ")[0]
for i in range(int(line.split(" ")[1])):
ids.append(f"{filename} {i}")
return ids
self.path = Path(config["src"])
if self.path.is_file():
raise Exception("The specified src is not a directory")
filenames = list(self.path.glob(f'{config["pattern"]}'))
ids = []
if config.get("use_tqdm", True):
filenames = tqdm(filenames)
for filename in filenames:
try:
structures = ase.io.read(filename, **self.ase_read_args)
except Exception as err:
warnings.warn(f"{err} occured for: {filename}")
else:
for i, structure in enumerate(structures):
ids.append(f"{filename} {i}")
return ids
def get_atoms_object(self, identifier):
try:
atoms = ase.io.read(
"".join(identifier.split(" ")[:-1]), **self.ase_read_args
)[int(identifier.split(" ")[-1])]
except Exception as err:
warnings.warn(f"{err} occured for: {identifier}")
raise err
return atoms
def get_metadata(self):
return {}
class dummy_list(list):
def __init__(self, max) -> None:
self.max = max
return
def __len__(self):
return self.max
def __getitem__(self, idx):
# Handle slicing
if isinstance(idx, slice):
return [self[i] for i in range(*idx.indices(self.max))]
# Cast idx as int since it could be a tensor index
idx = int(idx)
# Handle negative indices (referenced from end)
if idx < 0:
idx += self.max
if 0 <= idx < self.max:
return idx
else:
raise IndexError
@registry.register_dataset("ase_db")
class AseDBDataset(AseAtomsDataset):
"""
This Dataset connects to an ASE Database, allowing the storage of atoms objects
with a variety of backends including JSON, SQLite, and database server options.
For more information, see:
https://databases.fysik.dtu.dk/ase/ase/db/db.html
args:
config (dict):
src (str): Either
- the path an ASE DB,
- the connection address of an ASE DB,
- a folder with multiple ASE DBs,
- a glob string to use to find ASE DBs, or
- a list of ASE db paths/addresses.
If a folder, every file will be attempted as an ASE DB, and warnings
are raised for any files that can't connect cleanly
Note that for large datasets, ID loading can be slow and there can be many
ids, so it's advised to make loading the id list as easy as possible. There is not
an obvious way to get a full list of ids from most ASE dbs besides simply looping
through the entire dataset. See the AseLMDBDataset which was written with this usecase
in mind.
connect_args (dict): Keyword arguments for ase.db.connect()
select_args (dict): Keyword arguments for ase.db.select()
You can use this to query/filter your database
a2g_args (dict): Keyword arguments for ocpmodels.preprocessing.AtomsToGraphs()
default options will work for most users
If you are using this for a training dataset, set
"r_energy":True and/or "r_forces":True as appropriate
In that case, energy/forces must be in the database
keep_in_memory (bool): Store data in memory. This helps avoid random reads if you need
to iterate over a dataset many times (e.g. training for many epochs).
Not recommended for large datasets.
atoms_transform_args (dict): Additional keyword arguments for the atoms_transform callable
transform_args (dict): Additional keyword arguments for the transform callable
atoms_transform (callable, optional): Additional preprocessing function applied to the Atoms
object. Useful for applying tags, for example.
transform (callable, optional): Additional preprocessing function for the Data object
"""
def load_dataset_get_ids(self, config) -> dummy_list:
if isinstance(config["src"], list):
filepaths = config["src"]
elif os.path.isfile(config["src"]):
filepaths = [config["src"]]
elif os.path.isdir(config["src"]):
filepaths = glob.glob(f'{config["src"]}/*')
else:
filepaths = glob.glob(config["src"])
self.dbs = []
for path in filepaths:
try:
self.dbs.append(
self.connect_db(path, config.get("connect_args", {}))
)
except ValueError:
logging.warning(
f"Tried to connect to {path} but it's not an ASE database!"
)
self.select_args = config.get("select_args", {})
# In order to get all of the unique IDs using the default ASE db interface
# we have to load all the data and check ids using a select. This is extremely
# inefficient for large dataset. If the db we're using already presents a list of
# ids and there is no query, we can just use that list instead and save ourselves
# a lot of time!
self.db_ids = []
for db in self.dbs:
if hasattr(db, "ids") and self.select_args == {}:
self.db_ids.append(db.ids)
else:
self.db_ids.append(
[row.id for row in db.select(**self.select_args)]
)
idlens = [len(ids) for ids in self.db_ids]
self._idlen_cumulative = np.cumsum(idlens).tolist()
return dummy_list(sum(idlens))
def get_atoms_object(self, idx):
# Figure out which db this should be indexed from.
db_idx = bisect.bisect(self._idlen_cumulative, idx)
# Extract index of element within that db
el_idx = idx
if db_idx != 0:
el_idx = idx - self._idlen_cumulative[db_idx - 1]
assert el_idx >= 0
atoms_row = self.dbs[db_idx]._get_row(self.db_ids[db_idx][el_idx])
atoms = atoms_row.toatoms()
if isinstance(atoms_row.data, dict):
atoms.info.update(atoms_row.data)
return atoms
def connect_db(self, address, connect_args={}):
db_type = connect_args.get("type", "extract_from_name")
if db_type == "lmdb" or (
db_type == "extract_from_name" and address.split(".")[-1] == "lmdb"
):
return LMDBDatabase(address, readonly=True, **connect_args)
else:
return ase.db.connect(address, **connect_args)
def close_db(self) -> None:
for db in self.dbs:
if hasattr(db, "close"):
db.close()
def get_metadata(self):
logging.warning(
"You specific a folder of ASE dbs, so it's impossible to know which metadata to use. Using the first!"
)
if self.dbs[0].metadata == {}:
return self.guess_target_metadata()
else:
return copy.deepcopy(self.dbs[0].metadata)
| 18,646 | 36.145418 | 114 | py |
ocp | ocp-main/ocpmodels/datasets/target_metadata_guesser.py | import logging
import numpy as np
def uniform_atoms_lengths(atoms_lens) -> bool:
# If all of the structures have the same number of atoms, it's really hard to know
# whether the entries are intensive or extensive, and whether
# some of the entries are per-atom or not
return len(set(atoms_lens)) == 1
def target_constant_shape(atoms_lens, target_samples) -> bool:
# Given a bunch of atoms lengths, and the corresponding samples for the target,
# determine whether the shape is always the same regardless of atom size
return len(set([sample.shape for sample in target_samples])) == 1
def target_per_atom(atoms_lens, target_samples) -> bool:
# Given a bunch of atoms lengths, and the corresponding samples for the target,
# determine whether the target is per-atom (first dimension == # atoms, others constant)
# If a sample target is just a number/float/etc, it can't be per-atom
if len(np.array(target_samples[0]).shape) == 0:
return False
first_dim_proportional = all(
[
np.array(sample).shape[0] == alen
for alen, sample in zip(atoms_lens, target_samples)
]
)
if len(np.array(target_samples[0]).shape) == 1:
other_dim_constant = True
else:
other_dim_constant = (
len(set([np.array(sample).shape[1:] for sample in target_samples]))
== 1
)
if first_dim_proportional and other_dim_constant:
return True
else:
return False
def target_extensive(atoms_lens, target_samples, threshold: float = 0.2):
# Guess whether a property is intensive or extensive.
# We guess by checking whether standard deviation of the per-atom
# properties capture >20% of the variation in the property
# Of course, with a small amount of data!
# If the targets are all the same shapes, we shouldn't be asking if the property
# is intensive or extensive!
assert target_constant_shape(
atoms_lens, target_samples
), "The shapes of this target are not constant!"
# Get the per-atom normalized properties
try:
compiled_target_array = np.array(
[
sample / atom_len
for sample, atom_len in zip(atoms_lens, target_samples)
]
)
except TypeError:
return False
# Calculate the normalized standard deviation of each element in the property output
target_samples_mean = np.mean(compiled_target_array, axis=0)
target_samples_normalized = compiled_target_array / target_samples_mean
# If there's not much variation in the per-atom normalized properties,
# guess extensive!
extensive_guess = target_samples_normalized.std(axis=0) < (
threshold * target_samples_normalized.mean(axis=0)
)
if extensive_guess.shape == ():
return extensive_guess
elif (
target_samples_normalized.std(axis=0)
< (threshold * target_samples_normalized.mean(axis=0))
).all():
return True
else:
return False
def guess_target_metadata(atoms_len, target_samples):
example_array = np.array(target_samples[0])
if example_array.dtype == object or example_array.dtype == str:
return {
"shape": None,
"type": "unknown",
"extensive": None,
"units": "unknown",
"comment": "Guessed property metadata. The property didn't seem to be a numpy array with any numeric type, so we dob't know what to do.",
}
elif target_constant_shape(atoms_len, target_samples):
target_shape = np.array(target_samples[0]).shape
if uniform_atoms_lengths(atoms_len):
if atoms_len[0] > 3 and target_per_atom(atoms_len, target_samples):
target_shape = list(target_samples[0].shape)
target_shape[0] = "N"
return {
"shape": tuple(target_shape),
"type": "per-atom",
"extensive": True,
"units": "unknown",
"comment": "Guessed property metadata. Because all the sampled atoms are the same length, we can't really know if it is per-atom or per-frame, but the first dimension happens to match the number of atoms.",
}
else:
return {
"shape": tuple(target_shape),
"type": "per-image",
"extensive": True,
"units": "unknown",
"comment": "Guessed property metadata. Because all the sampled atoms are the same length, we can't know if this is intensive of extensive, or per-image or per-frame",
}
elif target_extensive(atoms_len, target_samples):
return {
"shape": tuple(target_shape),
"type": "per-image",
"extensive": True,
"comment": "Guessed property metadata. It appears to be extensive based on a quick correlation with atom sizes",
}
else:
return {
"shape": tuple(target_shape),
"type": "per-image",
"extensive": False,
"units": "unknown",
"comment": "Guess property metadata. It appears to be intensive based on a quick correlation with atom sizes.",
}
elif target_per_atom(atoms_len, target_samples):
target_shape = list(target_samples[0].shape)[1:]
return {
"shape": tuple(target_shape),
"type": "per-atom",
"extensive": True,
"units": "unknown",
"comment": "Guessed property metadata. It appears to be a per-atom property.",
}
else:
return {
"shape": None,
"type": "unknown",
"extensive": None,
"units": "unknown",
"comment": "Guessed property metadata. The property was variable across different samples and didn't seem to be a per-atom property",
}
def guess_property_metadata(atoms_list):
atoms = atoms_list[0]
atoms_len = [len(atoms) for atoms in atoms_list]
targets = {}
if hasattr(atoms, "info"):
for key in atoms.info:
# Grab the property samples from the list of atoms
target_samples = [
np.array(atoms.info[key]) for atoms in atoms_list
]
# Guess the metadata
targets[f"info.{key}"] = guess_target_metadata(
atoms_len, target_samples
)
# Log a warning so the user knows what's happening
logging.warning(
f'Guessed metadata for atoms.info["{key}"]: {str(targets[f"info.{key}"])}'
)
if hasattr(atoms, "calc") and atoms.calc is not None:
for key in atoms.calc.results:
# Grab the property samples from the list of atoms
target_samples = [
np.array(atoms.calc.results[key]) for atoms in atoms_list
]
# Guess the metadata
targets[f"{key}"] = guess_target_metadata(
atoms_len, target_samples
)
# Log a warning so the user knows what's happening
logging.warning(
f'Guessed metadata for ASE calculator property ["{key}"]: {str(targets[key])}'
)
return targets
| 7,441 | 36.585859 | 226 | py |
ocp | ocp-main/ocpmodels/datasets/__init__.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .lmdb_dataset import (
LmdbDataset,
SinglePointLmdbDataset,
TrajectoryLmdbDataset,
data_list_collater,
)
from .oc22_lmdb_dataset import OC22LmdbDataset
from .ase_datasets import (
AseReadDataset,
AseReadMultiStructureDataset,
AseDBDataset,
)
| 454 | 22.947368 | 65 | py |
ocp | ocp-main/ocpmodels/datasets/embeddings/continuous_embeddings.py | """
CGCNN-like embeddings using continuous values instead of original k-hot.
Properties:
Group number
Period number
Electronegativity
Covalent radius
Valence electrons
First ionization energy
Electron affinity
Block
Atomic Volume
NaN stored for unavaialable parameters.
"""
CONTINUOUS_EMBEDDINGS = {
0: [
float("NaN"),
float("NaN"),
float("NaN"),
float("NaN"),
float("NaN"),
float("NaN"),
float("NaN"),
float("NaN"),
float("NaN"),
],
1: [
1.0,
1.0,
2.1877708435058594,
31.0,
1.0,
13.598434448242188,
0.754194974899292,
1.0,
14.100000381469727,
],
2: [
18.0,
1.0,
1.0,
28.0,
2.0,
24.587387084960938,
-19.700000762939453,
1.0,
31.799999237060547,
],
3: [
1.0,
2.0,
0.04886792600154877,
128.0,
1.0,
5.391714572906494,
0.6180490255355835,
1.0,
13.100000381469727,
],
4: [
2.0,
2.0,
0.1268472671508789,
96.0,
2.0,
9.322698593139648,
-2.4000000953674316,
1.0,
5.0,
],
5: [
13.0,
2.0,
0.25462737679481506,
84.0,
3.0,
8.298019409179688,
0.27972298860549927,
2.0,
4.599999904632568,
],
6: [
14.0,
2.0,
0.42752504348754883,
73.0,
4.0,
11.260295867919922,
1.2621190547943115,
2.0,
5.300000190734863,
],
7: [
15.0,
2.0,
0.5774819254875183,
71.0,
5.0,
14.534130096435547,
-1.399999976158142,
2.0,
17.299999237060547,
],
8: [
16.0,
2.0,
0.9416494369506836,
66.0,
6.0,
13.618054389953613,
1.461113452911377,
2.0,
14.0,
],
9: [
17.0,
2.0,
1.017681360244751,
57.0,
7.0,
17.422819137573242,
3.4011898040771484,
2.0,
17.100000381469727,
],
10: [
18.0,
2.0,
1.0,
58.0,
8.0,
21.56454086303711,
-3.0,
2.0,
16.799999237060547,
],
11: [
1.0,
3.0,
0.09459763765335083,
166.0,
1.0,
5.1390767097473145,
0.5479260087013245,
1.0,
23.700000762939453,
],
12: [
2.0,
3.0,
0.15242105722427368,
141.0,
2.0,
7.64623498916626,
-3.0,
1.0,
14.0,
],
13: [
13.0,
3.0,
0.2360926866531372,
121.0,
3.0,
5.9857683181762695,
0.43283000588417053,
2.0,
10.0,
],
14: [
14.0,
3.0,
0.3468157947063446,
111.0,
4.0,
8.15168285369873,
1.3895211219787598,
2.0,
12.100000381469727,
],
15: [
15.0,
3.0,
0.45102688670158386,
107.0,
5.0,
10.486685752868652,
0.7466070055961609,
2.0,
17.0,
],
16: [
16.0,
3.0,
0.6397251486778259,
105.0,
6.0,
10.360010147094727,
2.077104091644287,
2.0,
15.5,
],
17: [
17.0,
3.0,
0.8123772740364075,
102.0,
7.0,
12.967630386352539,
3.612725019454956,
2.0,
18.700000762939453,
],
18: [
18.0,
3.0,
1.0,
106.0,
8.0,
15.759611129760742,
-11.5,
2.0,
24.200000762939453,
],
19: [
1.0,
4.0,
0.12183826416730881,
203.0,
1.0,
4.340663433074951,
0.5014700293540955,
1.0,
45.29999923706055,
],
20: [
2.0,
4.0,
0.1901577115058899,
176.0,
2.0,
6.113155364990234,
0.024550000205636024,
1.0,
29.899999618530273,
],
21: [
3.0,
4.0,
0.3038673996925354,
170.0,
3.0,
6.561490058898926,
0.18799999356269836,
3.0,
15.0,
],
22: [
4.0,
4.0,
0.4055461883544922,
160.0,
4.0,
6.828120231628418,
0.07900000363588333,
3.0,
10.600000381469727,
],
23: [
5.0,
4.0,
0.4388898015022278,
153.0,
5.0,
6.746187210083008,
0.5249999761581421,
3.0,
8.350000381469727,
],
24: [
6.0,
4.0,
0.6017723083496094,
139.0,
6.0,
6.766510009765625,
0.6660000085830688,
3.0,
7.230000019073486,
],
25: [
7.0,
4.0,
0.6707264184951782,
150.0,
7.0,
7.434018135070801,
-3.0,
3.0,
7.389999866485596,
],
26: [
8.0,
4.0,
0.748727023601532,
142.0,
8.0,
7.902467727661133,
0.1509999930858612,
3.0,
7.099999904632568,
],
27: [
9.0,
4.0,
0.8832423686981201,
138.0,
9.0,
7.881010055541992,
0.6622564792633057,
3.0,
6.699999809265137,
],
28: [
10.0,
4.0,
0.9377039670944214,
124.0,
10.0,
7.639876842498779,
1.156000018119812,
3.0,
6.599999904632568,
],
29: [
11.0,
4.0,
0.9175541996955872,
132.0,
11.0,
7.726379871368408,
1.2350000143051147,
3.0,
7.099999904632568,
],
30: [
12.0,
4.0,
0.8100876808166504,
122.0,
12.0,
9.39419937133789,
-3.0,
3.0,
9.199999809265137,
],
31: [
13.0,
4.0,
0.7205410003662109,
122.0,
3.0,
5.999301910400391,
0.4300000071525574,
2.0,
11.800000190734863,
],
32: [
14.0,
4.0,
0.8001470565795898,
120.0,
4.0,
7.899435043334961,
1.2327120304107666,
2.0,
13.600000381469727,
],
33: [
15.0,
4.0,
0.825337290763855,
119.0,
5.0,
9.788999557495117,
0.8040000200271606,
2.0,
13.100000381469727,
],
34: [
16.0,
4.0,
0.9659121036529541,
120.0,
6.0,
9.752391815185547,
2.020669937133789,
2.0,
16.5,
],
35: [
17.0,
4.0,
1.0490256547927856,
120.0,
7.0,
11.813810348510742,
3.3635880947113037,
2.0,
23.5,
],
36: [
18.0,
4.0,
1.0,
116.0,
8.0,
13.999605178833008,
-3.0,
2.0,
32.20000076293945,
],
37: [
1.0,
5.0,
0.1764136552810669,
220.0,
1.0,
4.177127838134766,
0.4859200119972229,
1.0,
55.900001525878906,
],
38: [
2.0,
5.0,
0.26317858695983887,
195.0,
2.0,
5.694867134094238,
0.04800000041723251,
1.0,
33.70000076293945,
],
39: [
3.0,
5.0,
0.39239412546157837,
190.0,
3.0,
6.217259883880615,
0.3070000112056732,
3.0,
19.799999237060547,
],
40: [
4.0,
5.0,
0.4744466543197632,
175.0,
4.0,
6.633900165557861,
0.4259999990463257,
3.0,
14.100000381469727,
],
41: [
5.0,
5.0,
0.5561695098876953,
164.0,
5.0,
6.75885009765625,
0.9174060225486755,
3.0,
10.800000190734863,
],
42: [
6.0,
5.0,
0.6852949857711792,
154.0,
6.0,
7.092430114746094,
0.7480000257492065,
3.0,
9.399999618530273,
],
43: [
7.0,
5.0,
0.8753613233566284,
147.0,
7.0,
7.119380950927734,
0.550000011920929,
3.0,
8.5,
],
44: [
8.0,
5.0,
0.9579373002052307,
146.0,
8.0,
7.360499858856201,
1.0499999523162842,
3.0,
8.300000190734863,
],
45: [
9.0,
5.0,
0.9761914610862732,
142.0,
9.0,
7.458899974822998,
1.1369999647140503,
3.0,
8.300000190734863,
],
46: [
10.0,
5.0,
1.1242631673812866,
139.0,
12.0,
8.336859703063965,
0.5619999766349792,
3.0,
8.899999618530273,
],
47: [
11.0,
5.0,
0.9437955021858215,
145.0,
11.0,
7.576233863830566,
1.3020000457763672,
3.0,
10.300000190734863,
],
48: [
12.0,
5.0,
0.8015620112419128,
144.0,
12.0,
8.99382209777832,
-3.0,
3.0,
13.100000381469727,
],
49: [
13.0,
5.0,
0.7172747254371643,
142.0,
3.0,
5.786355018615723,
0.30000001192092896,
2.0,
15.699999809265137,
],
50: [
14.0,
5.0,
0.7622796893119812,
139.0,
4.0,
7.343916893005371,
1.1120669841766357,
2.0,
16.299999237060547,
],
51: [
15.0,
5.0,
0.7762722373008728,
139.0,
5.0,
8.608388900756836,
1.0460000038146973,
2.0,
18.399999618530273,
],
52: [
16.0,
5.0,
0.8622506260871887,
138.0,
6.0,
9.009659767150879,
1.9708759784698486,
2.0,
20.5,
],
53: [
17.0,
5.0,
0.9386428594589233,
139.0,
7.0,
10.45125961303711,
3.0590367317199707,
2.0,
25.700000762939453,
],
54: [
18.0,
5.0,
1.0,
140.0,
8.0,
12.129842758178711,
-0.0560000017285347,
2.0,
42.900001525878906,
],
55: [
1.0,
6.0,
0.18145304918289185,
244.0,
1.0,
3.8939056396484375,
0.47162601351737976,
1.0,
70.0,
],
56: [
2.0,
6.0,
0.3032951354980469,
215.0,
2.0,
5.211664199829102,
0.14462000131607056,
1.0,
39.0,
],
57: [
3.0,
6.0,
0.39465051889419556,
207.0,
3.0,
5.576900005340576,
0.4699999988079071,
3.0,
22.5,
],
58: [
4.0,
6.0,
0.5356179475784302,
204.0,
2.0,
5.538599967956543,
0.6499999761581421,
4.0,
21.0,
],
59: [
5.0,
6.0,
0.4288040101528168,
203.0,
2.0,
5.4730000495910645,
0.9620000123977661,
4.0,
20.799999237060547,
],
60: [
6.0,
6.0,
0.44721803069114685,
201.0,
2.0,
5.525000095367432,
1.9160000085830688,
4.0,
20.600000381469727,
],
61: [
7.0,
6.0,
0.4585537314414978,
199.0,
2.0,
5.581999778747559,
-3.0,
4.0,
20.229999542236328,
],
62: [
8.0,
6.0,
0.47021451592445374,
198.0,
2.0,
5.643710136413574,
-3.0,
4.0,
19.899999618530273,
],
63: [
9.0,
6.0,
0.5085079669952393,
198.0,
2.0,
5.670384883880615,
0.8640000224113464,
4.0,
28.899999618530273,
],
64: [
10.0,
6.0,
0.5033860206604004,
196.0,
2.0,
6.149796009063721,
-3.0,
4.0,
19.899999618530273,
],
65: [
11.0,
6.0,
0.5163695216178894,
194.0,
2.0,
5.863800048828125,
1.1649999618530273,
4.0,
19.200000762939453,
],
66: [
12.0,
6.0,
0.5297338366508484,
192.0,
2.0,
5.939050197601318,
0.35199999809265137,
4.0,
19.0,
],
67: [
13.0,
6.0,
0.5434919595718384,
192.0,
2.0,
6.021500110626221,
-3.0,
4.0,
18.700000762939453,
],
68: [
14.0,
6.0,
0.5576573014259338,
189.0,
2.0,
6.107699871063232,
-3.0,
4.0,
18.399999618530273,
],
69: [
15.0,
6.0,
0.5722439289093018,
190.0,
2.0,
6.184309959411621,
1.0290000438690186,
4.0,
18.100000381469727,
],
70: [
16.0,
6.0,
0.517667829990387,
187.0,
2.0,
6.254159927368164,
-0.019999999552965164,
4.0,
24.799999237060547,
],
71: [
17.0,
6.0,
0.6027398109436035,
187.0,
2.0,
5.425870895385742,
0.3400000035762787,
4.0,
17.799999237060547,
],
72: [
4.0,
6.0,
0.7352124452590942,
175.0,
4.0,
6.825069904327393,
0.014000000432133675,
3.0,
13.600000381469727,
],
73: [
5.0,
6.0,
0.8358832001686096,
170.0,
5.0,
7.549570083618164,
0.32199999690055847,
3.0,
10.899999618530273,
],
74: [
6.0,
6.0,
1.0192831754684448,
162.0,
6.0,
7.864029884338379,
0.8162599802017212,
3.0,
9.529999732971191,
],
75: [
7.0,
6.0,
1.1745918989181519,
151.0,
7.0,
7.83351993560791,
0.15000000596046448,
3.0,
8.850000381469727,
],
76: [
8.0,
6.0,
1.2392759323120117,
144.0,
8.0,
8.43822956085205,
1.100000023841858,
3.0,
8.430000305175781,
],
77: [
9.0,
6.0,
1.4759982824325562,
141.0,
9.0,
8.967020034790039,
1.5637999773025513,
3.0,
8.539999961853027,
],
78: [
10.0,
6.0,
1.4510095119476318,
136.0,
10.0,
8.958829879760742,
2.128000020980835,
3.0,
9.100000381469727,
],
79: [
11.0,
6.0,
1.4267007112503052,
136.0,
11.0,
9.225552558898926,
2.3086299896240234,
3.0,
10.199999809265137,
],
80: [
12.0,
6.0,
1.1647894382476807,
132.0,
12.0,
10.437503814697266,
-3.0,
3.0,
14.800000190734863,
],
81: [
13.0,
6.0,
0.924509584903717,
145.0,
3.0,
6.1082868576049805,
0.37700000405311584,
2.0,
17.200000762939453,
],
82: [
14.0,
6.0,
0.9313225746154785,
146.0,
4.0,
7.416679382324219,
0.3567431569099426,
2.0,
18.299999237060547,
],
83: [
15.0,
6.0,
0.8136501312255859,
148.0,
5.0,
7.285515785217285,
0.9423620104789734,
2.0,
21.299999237060547,
],
84: [
16.0,
6.0,
0.9256306886672974,
140.0,
6.0,
8.413999557495117,
1.899999976158142,
2.0,
22.700000762939453,
],
85: [
float("NaN"),
float("NaN"),
float("NaN"),
float("NaN"),
float("NaN"),
float("NaN"),
float("NaN"),
float("NaN"),
float("NaN"),
],
86: [18.0, 6.0, 1.0, 150.0, 8.0, 10.748499870300293, -3.0, 2.0, 50.5],
87: [
float("NaN"),
float("NaN"),
float("NaN"),
float("NaN"),
float("NaN"),
float("NaN"),
float("NaN"),
float("NaN"),
float("NaN"),
],
88: [
2.0,
7.0,
0.3596253991127014,
221.0,
2.0,
5.27842378616333,
0.10000000149011612,
1.0,
45.0,
],
89: [
3.0,
7.0,
0.4583164155483246,
215.0,
3.0,
5.380226135253906,
0.3499999940395355,
3.0,
22.540000915527344,
],
90: [
4.0,
7.0,
0.5557018518447876,
206.0,
2.0,
6.306700229644775,
-3.0,
4.0,
19.799999237060547,
],
91: [
5.0,
7.0,
0.623065710067749,
200.0,
2.0,
5.889999866485596,
-3.0,
4.0,
15.0,
],
92: [
6.0,
7.0,
0.6181179881095886,
196.0,
2.0,
6.194049835205078,
-3.0,
4.0,
12.5,
],
93: [
7.0,
7.0,
0.6132539510726929,
190.0,
2.0,
6.265500068664551,
-3.0,
4.0,
21.100000381469727,
],
94: [
8.0,
7.0,
0.6084716320037842,
187.0,
2.0,
6.0258002281188965,
-3.0,
4.0,
12.289999961853027,
],
95: [
9.0,
7.0,
0.6834156513214111,
180.0,
2.0,
5.973800182342529,
-3.0,
4.0,
20.799999237060547,
],
96: [
10.0,
7.0,
0.6900094747543335,
169.0,
2.0,
5.991399765014648,
-3.0,
4.0,
18.280000686645508,
],
97: [
float("NaN"),
float("NaN"),
float("NaN"),
float("NaN"),
float("NaN"),
float("NaN"),
float("NaN"),
float("NaN"),
float("NaN"),
],
98: [
float("NaN"),
float("NaN"),
float("NaN"),
float("NaN"),
float("NaN"),
float("NaN"),
float("NaN"),
float("NaN"),
float("NaN"),
],
99: [
float("NaN"),
float("NaN"),
float("NaN"),
float("NaN"),
float("NaN"),
float("NaN"),
float("NaN"),
float("NaN"),
float("NaN"),
],
100: [
float("NaN"),
float("NaN"),
float("NaN"),
float("NaN"),
float("NaN"),
float("NaN"),
float("NaN"),
float("NaN"),
float("NaN"),
],
}
| 19,481 | 16.394643 | 74 | py |
ocp | ocp-main/ocpmodels/datasets/embeddings/khot_embeddings.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
Original CGCNN k-hot elemental embeddings.
"""
KHOT_EMBEDDINGS = {
1: [
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
],
2: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
],
3: [
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
],
4: [
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
5: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
6: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
7: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
],
8: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
],
9: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
],
10: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
],
11: [
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
],
12: [
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
],
13: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
],
14: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
],
15: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
],
16: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
],
17: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
],
18: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
],
19: [
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
],
20: [
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
],
21: [
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
],
22: [
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
],
23: [
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
],
24: [
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
],
25: [
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
],
26: [
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
],
27: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
],
28: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
],
29: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
],
30: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
],
31: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
],
32: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
],
33: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
],
34: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
],
35: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
],
36: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
],
37: [
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
],
38: [
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
],
39: [
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
],
40: [
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
],
41: [
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
],
42: [
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
],
43: [
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
],
44: [
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
],
45: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
],
46: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
],
47: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
],
48: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
],
49: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
],
50: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
],
51: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
],
52: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
],
53: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
],
54: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
],
55: [
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
],
56: [
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
],
57: [
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
],
58: [
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
],
59: [
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
],
60: [
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
],
61: [
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
62: [
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
],
63: [
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
],
64: [
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
],
65: [
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
],
66: [
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
],
67: [
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
],
68: [
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
],
69: [
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
],
70: [
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
],
71: [
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
],
72: [
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
],
73: [
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
],
74: [
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
],
75: [
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
],
76: [
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
],
77: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
],
78: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
],
79: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
],
80: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
],
81: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
],
82: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
],
83: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
],
84: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
],
85: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
86: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
87: [
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
88: [
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
],
89: [
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
],
90: [
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
],
91: [
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
],
92: [
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
],
93: [
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
],
94: [
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
95: [
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
],
96: [
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
],
97: [
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
98: [
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
99: [
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
100: [
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
}
| 103,138 | 9.957081 | 63 | py |
ocp | ocp-main/ocpmodels/datasets/embeddings/qmof_khot_embeddings.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
k-hot elemental embeddings from QMOF, motivated by the following Github Issue threads:
https://github.com/txie-93/cgcnn/issues/2
https://github.com/arosen93/QMOF/issues/18
"""
QMOF_KHOT_EMBEDDINGS = {
1: [
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
],
2: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
],
3: [
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
],
4: [
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
],
5: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
],
6: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
],
7: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
],
8: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
],
9: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
],
10: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
],
11: [
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
],
12: [
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
],
13: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
],
14: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
],
15: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
],
16: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
],
17: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
],
18: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
],
19: [
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
],
20: [
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
],
21: [
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
],
22: [
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
],
23: [
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
],
24: [
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
],
25: [
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
],
26: [
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
],
27: [
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
],
28: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
],
29: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
],
30: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
],
31: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
],
32: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
],
33: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
],
34: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
],
35: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
],
36: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
],
37: [
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
],
38: [
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
],
39: [
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
],
40: [
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
],
41: [
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
],
42: [
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
],
43: [
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
],
44: [
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
],
45: [
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
],
46: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
],
47: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
],
48: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
],
49: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
],
50: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
],
51: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
],
52: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
],
53: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
],
54: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
],
55: [
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
],
56: [
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
],
57: [
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
],
58: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
],
59: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
],
60: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
],
61: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
],
62: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
],
63: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
],
64: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
],
65: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
],
66: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
],
67: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
],
68: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
],
69: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
],
70: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
],
71: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
],
72: [
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
],
73: [
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
],
74: [
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
],
75: [
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
],
76: [
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
],
77: [
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
],
78: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
],
79: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
],
80: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
],
81: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
],
82: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
],
83: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
],
84: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
],
85: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
],
86: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
],
87: [
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
],
88: [
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
],
89: [
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
],
90: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
],
91: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
],
92: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
],
93: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
],
94: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
],
95: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
],
96: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
],
97: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
],
98: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
],
99: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
],
100: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
],
101: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
],
102: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
],
103: [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
],
}
| 83,702 | 9.960194 | 86 | py |
ocp | ocp-main/ocpmodels/datasets/embeddings/__init__.py | __all__ = [
"ATOMIC_RADII",
"KHOT_EMBEDDINGS",
"CONTINUOUS_EMBEDDINGS",
"QMOF_KHOT_EMBEDDINGS",
]
from .atomic_radii import ATOMIC_RADII
from .continuous_embeddings import CONTINUOUS_EMBEDDINGS
from .khot_embeddings import KHOT_EMBEDDINGS
from .qmof_khot_embeddings import QMOF_KHOT_EMBEDDINGS
| 311 | 25 | 56 | py |
ocp | ocp-main/ocpmodels/datasets/embeddings/atomic_radii.py | """
Atomic radii in picometers
NaN stored for unavailable parameters.
"""
ATOMIC_RADII = {
0: float("NaN"),
1: 25.0,
2: 120.0,
3: 145.0,
4: 105.0,
5: 85.0,
6: 70.0,
7: 65.0,
8: 60.0,
9: 50.0,
10: 160.0,
11: 180.0,
12: 150.0,
13: 125.0,
14: 110.0,
15: 100.0,
16: 100.0,
17: 100.0,
18: 71.0,
19: 220.0,
20: 180.0,
21: 160.0,
22: 140.0,
23: 135.0,
24: 140.0,
25: 140.0,
26: 140.0,
27: 135.0,
28: 135.0,
29: 135.0,
30: 135.0,
31: 130.0,
32: 125.0,
33: 115.0,
34: 115.0,
35: 115.0,
36: float("NaN"),
37: 235.0,
38: 200.0,
39: 180.0,
40: 155.0,
41: 145.0,
42: 145.0,
43: 135.0,
44: 130.0,
45: 135.0,
46: 140.0,
47: 160.0,
48: 155.0,
49: 155.0,
50: 145.0,
51: 145.0,
52: 140.0,
53: 140.0,
54: float("NaN"),
55: 260.0,
56: 215.0,
57: 195.0,
58: 185.0,
59: 185.0,
60: 185.0,
61: 185.0,
62: 185.0,
63: 185.0,
64: 180.0,
65: 175.0,
66: 175.0,
67: 175.0,
68: 175.0,
69: 175.0,
70: 175.0,
71: 175.0,
72: 155.0,
73: 145.0,
74: 135.0,
75: 135.0,
76: 130.0,
77: 135.0,
78: 135.0,
79: 135.0,
80: 150.0,
81: 190.0,
82: 180.0,
83: 160.0,
84: 190.0,
85: float("NaN"),
86: float("NaN"),
87: float("NaN"),
88: 215.0,
89: 195.0,
90: 180.0,
91: 180.0,
92: 175.0,
93: 175.0,
94: 175.0,
95: 175.0,
96: float("NaN"),
97: float("NaN"),
98: float("NaN"),
99: float("NaN"),
100: float("NaN"),
}
| 1,670 | 14.330275 | 38 | py |
ocp | ocp-main/ocpmodels/tasks/task.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import logging
import os
from ocpmodels.common.registry import registry
from ocpmodels.trainers.forces_trainer import ForcesTrainer
class BaseTask:
def __init__(self, config) -> None:
self.config = config
def setup(self, trainer) -> None:
self.trainer = trainer
if self.config["checkpoint"] is not None:
self.trainer.load_checkpoint(self.config["checkpoint"])
# save checkpoint path to runner state for slurm resubmissions
self.chkpt_path = os.path.join(
self.trainer.config["cmd"]["checkpoint_dir"], "checkpoint.pt"
)
def run(self):
raise NotImplementedError
@registry.register_task("train")
class TrainTask(BaseTask):
def _process_error(self, e: RuntimeError) -> None:
e_str = str(e)
if (
"find_unused_parameters" in e_str
and "torch.nn.parallel.DistributedDataParallel" in e_str
):
for name, parameter in self.trainer.model.named_parameters():
if parameter.requires_grad and parameter.grad is None:
logging.warning(
f"Parameter {name} has no gradient. Consider removing it from the model."
)
def run(self) -> None:
try:
self.trainer.train(
disable_eval_tqdm=self.config.get(
"hide_eval_progressbar", False
)
)
except RuntimeError as e:
self._process_error(e)
raise e
@registry.register_task("predict")
class PredictTask(BaseTask):
def run(self) -> None:
assert (
self.trainer.test_loader is not None
), "Test dataset is required for making predictions"
assert self.config["checkpoint"]
results_file = "predictions"
self.trainer.predict(
self.trainer.test_loader,
results_file=results_file,
disable_tqdm=self.config.get("hide_eval_progressbar", False),
)
@registry.register_task("validate")
class ValidateTask(BaseTask):
def run(self) -> None:
# Note that the results won't be precise on multi GPUs due to padding of extra images (although the difference should be minor)
assert (
self.trainer.val_loader is not None
), "Val dataset is required for making predictions"
assert self.config["checkpoint"]
self.trainer.validate(
split="val",
disable_tqdm=self.config.get("hide_eval_progressbar", False),
)
@registry.register_task("run-relaxations")
class RelxationTask(BaseTask):
def run(self) -> None:
assert isinstance(
self.trainer, ForcesTrainer
), "Relaxations are only possible for ForcesTrainer"
assert (
self.trainer.relax_dataset is not None
), "Relax dataset is required for making predictions"
assert self.config["checkpoint"]
self.trainer.run_relaxations()
| 3,181 | 31.141414 | 135 | py |
ocp | ocp-main/ocpmodels/tasks/__init__.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
__all__ = ["TrainTask", "PredictTask", "ValidateTask", "RelxationTask"]
from .task import PredictTask, RelxationTask, TrainTask, ValidateTask
| 321 | 34.777778 | 71 | py |
ocp | ocp-main/ocpmodels/preprocessing/atoms_to_graphs.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
from typing import Optional
import ase.db.sqlite
import ase.io.trajectory
import numpy as np
import torch
from torch_geometric.data import Data
from ocpmodels.common.utils import collate
try:
from pymatgen.io.ase import AseAtomsAdaptor
except Exception:
pass
try:
shell = get_ipython().__class__.__name__
if shell == "ZMQInteractiveShell":
from tqdm.notebook import tqdm
else:
from tqdm import tqdm
except NameError:
from tqdm import tqdm
class AtomsToGraphs:
"""A class to help convert periodic atomic structures to graphs.
The AtomsToGraphs class takes in periodic atomic structures in form of ASE atoms objects and converts
them into graph representations for use in PyTorch. The primary purpose of this class is to determine the
nearest neighbors within some radius around each individual atom, taking into account PBC, and set the
pair index and distance between atom pairs appropriately. Lastly, atomic properties and the graph information
are put into a PyTorch geometric data object for use with PyTorch.
Args:
max_neigh (int): Maximum number of neighbors to consider.
radius (int or float): Cutoff radius in Angstroms to search for neighbors.
r_energy (bool): Return the energy with other properties. Default is False, so the energy will not be returned.
r_forces (bool): Return the forces with other properties. Default is False, so the forces will not be returned.
r_distances (bool): Return the distances with other properties.
Default is False, so the distances will not be returned.
r_edges (bool): Return interatomic edges with other properties. Default is True, so edges will be returned.
r_fixed (bool): Return a binary vector with flags for fixed (1) vs free (0) atoms.
Default is True, so the fixed indices will be returned.
r_pbc (bool): Return the periodic boundary conditions with other properties.
Default is False, so the periodic boundary conditions will not be returned.
Attributes:
max_neigh (int): Maximum number of neighbors to consider.
radius (int or float): Cutoff radius in Angstoms to search for neighbors.
r_energy (bool): Return the energy with other properties. Default is False, so the energy will not be returned.
r_forces (bool): Return the forces with other properties. Default is False, so the forces will not be returned.
r_distances (bool): Return the distances with other properties.
Default is False, so the distances will not be returned.
r_edges (bool): Return interatomic edges with other properties. Default is True, so edges will be returned.
r_fixed (bool): Return a binary vector with flags for fixed (1) vs free (0) atoms.
Default is True, so the fixed indices will be returned.
r_pbc (bool): Return the periodic boundary conditions with other properties.
Default is False, so the periodic boundary conditions will not be returned.
"""
def __init__(
self,
max_neigh: int = 200,
radius: int = 6,
r_energy: bool = False,
r_forces: bool = False,
r_distances: bool = False,
r_edges: bool = True,
r_fixed: bool = True,
r_pbc: bool = False,
) -> None:
self.max_neigh = max_neigh
self.radius = radius
self.r_energy = r_energy
self.r_forces = r_forces
self.r_distances = r_distances
self.r_fixed = r_fixed
self.r_edges = r_edges
self.r_pbc = r_pbc
def _get_neighbors_pymatgen(self, atoms: ase.Atoms):
"""Preforms nearest neighbor search and returns edge index, distances,
and cell offsets"""
struct = AseAtomsAdaptor.get_structure(atoms)
_c_index, _n_index, _offsets, n_distance = struct.get_neighbor_list(
r=self.radius, numerical_tol=0, exclude_self=True
)
_nonmax_idx = []
for i in range(len(atoms)):
idx_i = (_c_index == i).nonzero()[0]
# sort neighbors by distance, remove edges larger than max_neighbors
idx_sorted = np.argsort(n_distance[idx_i])[: self.max_neigh]
_nonmax_idx.append(idx_i[idx_sorted])
_nonmax_idx = np.concatenate(_nonmax_idx)
_c_index = _c_index[_nonmax_idx]
_n_index = _n_index[_nonmax_idx]
n_distance = n_distance[_nonmax_idx]
_offsets = _offsets[_nonmax_idx]
return _c_index, _n_index, n_distance, _offsets
def _reshape_features(self, c_index, n_index, n_distance, offsets):
"""Stack center and neighbor index and reshapes distances,
takes in np.arrays and returns torch tensors"""
edge_index = torch.LongTensor(np.vstack((n_index, c_index)))
edge_distances = torch.FloatTensor(n_distance)
cell_offsets = torch.LongTensor(offsets)
# remove distances smaller than a tolerance ~ 0. The small tolerance is
# needed to correct for pymatgen's neighbor_list returning self atoms
# in a few edge cases.
nonzero = torch.where(edge_distances >= 1e-8)[0]
edge_index = edge_index[:, nonzero]
edge_distances = edge_distances[nonzero]
cell_offsets = cell_offsets[nonzero]
return edge_index, edge_distances, cell_offsets
def convert(self, atoms: ase.Atoms, sid=None):
"""Convert a single atomic stucture to a graph.
Args:
atoms (ase.atoms.Atoms): An ASE atoms object.
sid (uniquely identifying object): An identifier that can be used to track the structure in downstream
tasks. Common sids used in OCP datasets include unique strings or integers.
Returns:
data (torch_geometric.data.Data): A torch geometic data object with positions, atomic_numbers, tags,
and optionally, energy, forces, distances, edges, and periodic boundary conditions.
Optional properties can included by setting r_property=True when constructing the class.
"""
# set the atomic numbers, positions, and cell
atomic_numbers = torch.Tensor(atoms.get_atomic_numbers())
positions = torch.Tensor(atoms.get_positions())
cell = torch.Tensor(np.array(atoms.get_cell())).view(1, 3, 3)
natoms = positions.shape[0]
# initialized to torch.zeros(natoms) if tags missing.
# https://wiki.fysik.dtu.dk/ase/_modules/ase/atoms.html#Atoms.get_tags
tags = torch.Tensor(atoms.get_tags())
# put the minimum data in torch geometric data object
data = Data(
cell=cell,
pos=positions,
atomic_numbers=atomic_numbers,
natoms=natoms,
tags=tags,
)
# Optionally add a systemid (sid) to the object
if sid is not None:
data.sid = sid
# optionally include other properties
if self.r_edges:
# run internal functions to get padded indices and distances
split_idx_dist = self._get_neighbors_pymatgen(atoms)
edge_index, edge_distances, cell_offsets = self._reshape_features(
*split_idx_dist
)
data.edge_index = edge_index
data.cell_offsets = cell_offsets
if self.r_energy:
energy = atoms.get_potential_energy(apply_constraint=False)
data.y = energy
if self.r_forces:
forces = torch.Tensor(atoms.get_forces(apply_constraint=False))
data.force = forces
if self.r_distances and self.r_edges:
data.distances = edge_distances
if self.r_fixed:
fixed_idx = torch.zeros(natoms)
if hasattr(atoms, "constraints"):
from ase.constraints import FixAtoms
for constraint in atoms.constraints:
if isinstance(constraint, FixAtoms):
fixed_idx[constraint.index] = 1
data.fixed = fixed_idx
if self.r_pbc:
data.pbc = torch.tensor(atoms.pbc)
return data
def convert_all(
self,
atoms_collection,
processed_file_path: Optional[str] = None,
collate_and_save=False,
disable_tqdm=False,
):
"""Convert all atoms objects in a list or in an ase.db to graphs.
Args:
atoms_collection (list of ase.atoms.Atoms or ase.db.sqlite.SQLite3Database):
Either a list of ASE atoms objects or an ASE database.
processed_file_path (str):
A string of the path to where the processed file will be written. Default is None.
collate_and_save (bool): A boolean to collate and save or not. Default is False, so will not write a file.
Returns:
data_list (list of torch_geometric.data.Data):
A list of torch geometric data objects containing molecular graph info and properties.
"""
# list for all data
data_list = []
if isinstance(atoms_collection, list):
atoms_iter = atoms_collection
elif isinstance(atoms_collection, ase.db.sqlite.SQLite3Database):
atoms_iter = atoms_collection.select()
elif isinstance(
atoms_collection, ase.io.trajectory.SlicedTrajectory
) or isinstance(atoms_collection, ase.io.trajectory.TrajectoryReader):
atoms_iter = atoms_collection
else:
raise NotImplementedError
for atoms in tqdm(
atoms_iter,
desc="converting ASE atoms collection to graphs",
total=len(atoms_collection),
unit=" systems",
disable=disable_tqdm,
):
# check if atoms is an ASE Atoms object this for the ase.db case
if not isinstance(atoms, ase.atoms.Atoms):
atoms = atoms.toatoms()
data = self.convert(atoms)
data_list.append(data)
if collate_and_save:
data, slices = collate(data_list)
torch.save((data, slices), processed_file_path)
return data_list
| 10,360 | 40.115079 | 119 | py |
ocp | ocp-main/ocpmodels/preprocessing/__init__.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
from .atoms_to_graphs import AtomsToGraphs
| 222 | 23.777778 | 63 | py |
ocp | ocp-main/ocpmodels/trainers/base_trainer.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import datetime
import errno
import logging
import os
import random
import subprocess
from abc import ABC, abstractmethod
from collections import defaultdict
from typing import cast, Dict, Optional
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import yaml
from torch.nn.parallel.distributed import DistributedDataParallel
from torch.utils.data import DataLoader
from tqdm import tqdm
import ocpmodels
from ocpmodels.common import distutils, gp_utils
from ocpmodels.common.data_parallel import (
BalancedBatchSampler,
OCPDataParallel,
ParallelCollater,
)
from ocpmodels.common.registry import registry
from ocpmodels.common.typing import assert_is_instance
from ocpmodels.common.utils import load_state_dict, save_checkpoint
from ocpmodels.modules.evaluator import Evaluator
from ocpmodels.modules.exponential_moving_average import (
ExponentialMovingAverage,
)
from ocpmodels.modules.loss import AtomwiseL2Loss, DDPLoss, L2MAELoss
from ocpmodels.modules.normalizer import Normalizer
from ocpmodels.modules.scaling.compat import load_scales_compat
from ocpmodels.modules.scaling.util import ensure_fitted
from ocpmodels.modules.scheduler import LRScheduler
@registry.register_trainer("base")
class BaseTrainer(ABC):
@property
def _unwrapped_model(self):
module = self.model
while isinstance(module, (OCPDataParallel, DistributedDataParallel)):
module = module.module
return module
def __init__(
self,
task,
model,
dataset,
optimizer,
identifier,
normalizer=None,
timestamp_id: Optional[str] = None,
run_dir=None,
is_debug: bool = False,
is_hpo: bool = False,
print_every: int = 100,
seed=None,
logger: str = "tensorboard",
local_rank: int = 0,
amp: bool = False,
cpu: bool = False,
name: str = "base_trainer",
slurm={},
noddp: bool = False,
) -> None:
self.name = name
self.cpu = cpu
self.epoch = 0
self.step = 0
if torch.cuda.is_available() and not self.cpu:
self.device = torch.device(f"cuda:{local_rank}")
else:
self.device = torch.device("cpu")
self.cpu = True # handle case when `--cpu` isn't specified
# but there are no gpu devices available
if run_dir is None:
run_dir = os.getcwd()
if timestamp_id is None:
timestamp = torch.tensor(datetime.datetime.now().timestamp()).to(
self.device
)
# create directories from master rank only
distutils.broadcast(timestamp, 0)
timestamp = datetime.datetime.fromtimestamp(
timestamp.float().item()
).strftime("%Y-%m-%d-%H-%M-%S")
if identifier:
self.timestamp_id = f"{timestamp}-{identifier}"
else:
self.timestamp_id = timestamp
else:
self.timestamp_id = timestamp_id
try:
commit_hash = (
subprocess.check_output(
[
"git",
"-C",
assert_is_instance(ocpmodels.__path__[0], str),
"describe",
"--always",
]
)
.strip()
.decode("ascii")
)
# catch instances where code is not being run from a git repo
except Exception:
commit_hash = None
logger_name = logger if isinstance(logger, str) else logger["name"]
self.config = {
"task": task,
"trainer": "forces" if name == "s2ef" else "energy",
"model": assert_is_instance(model.pop("name"), str),
"model_attributes": model,
"optim": optimizer,
"logger": logger,
"amp": amp,
"gpus": distutils.get_world_size() if not self.cpu else 0,
"cmd": {
"identifier": identifier,
"print_every": print_every,
"seed": seed,
"timestamp_id": self.timestamp_id,
"commit": commit_hash,
"checkpoint_dir": os.path.join(
run_dir, "checkpoints", self.timestamp_id
),
"results_dir": os.path.join(
run_dir, "results", self.timestamp_id
),
"logs_dir": os.path.join(
run_dir, "logs", logger_name, self.timestamp_id
),
},
"slurm": slurm,
"noddp": noddp,
}
# AMP Scaler
self.scaler = torch.cuda.amp.GradScaler() if amp else None
if "SLURM_JOB_ID" in os.environ and "folder" in self.config["slurm"]:
if "SLURM_ARRAY_JOB_ID" in os.environ:
self.config["slurm"]["job_id"] = "%s_%s" % (
os.environ["SLURM_ARRAY_JOB_ID"],
os.environ["SLURM_ARRAY_TASK_ID"],
)
else:
self.config["slurm"]["job_id"] = os.environ["SLURM_JOB_ID"]
self.config["slurm"]["folder"] = self.config["slurm"][
"folder"
].replace("%j", self.config["slurm"]["job_id"])
if isinstance(dataset, list):
if len(dataset) > 0:
self.config["dataset"] = dataset[0]
if len(dataset) > 1:
self.config["val_dataset"] = dataset[1]
if len(dataset) > 2:
self.config["test_dataset"] = dataset[2]
elif isinstance(dataset, dict):
self.config["dataset"] = dataset.get("train", None)
self.config["val_dataset"] = dataset.get("val", None)
self.config["test_dataset"] = dataset.get("test", None)
else:
self.config["dataset"] = dataset
self.normalizer = normalizer
# This supports the legacy way of providing norm parameters in dataset
if self.config.get("dataset", None) is not None and normalizer is None:
self.normalizer = self.config["dataset"]
if not is_debug and distutils.is_master() and not is_hpo:
os.makedirs(self.config["cmd"]["checkpoint_dir"], exist_ok=True)
os.makedirs(self.config["cmd"]["results_dir"], exist_ok=True)
os.makedirs(self.config["cmd"]["logs_dir"], exist_ok=True)
self.is_debug = is_debug
self.is_hpo = is_hpo
if self.is_hpo:
# conditional import is necessary for checkpointing
# sets the hpo checkpoint frequency
# default is no checkpointing
self.hpo_checkpoint_every = self.config["optim"].get(
"checkpoint_every", -1
)
if distutils.is_master():
print(yaml.dump(self.config, default_flow_style=False))
self.load()
self.evaluator = Evaluator(task=name)
def load(self) -> None:
self.load_seed_from_config()
self.load_logger()
self.load_datasets()
self.load_task()
self.load_model()
self.load_loss()
self.load_optimizer()
self.load_extras()
def load_seed_from_config(self) -> None:
# https://pytorch.org/docs/stable/notes/randomness.html
seed = self.config["cmd"]["seed"]
if seed is None:
return
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def load_logger(self) -> None:
self.logger = None
if not self.is_debug and distutils.is_master() and not self.is_hpo:
assert (
self.config["logger"] is not None
), "Specify logger in config"
logger = self.config["logger"]
logger_name = logger if isinstance(logger, str) else logger["name"]
assert logger_name, "Specify logger name"
self.logger = registry.get_logger_class(logger_name)(self.config)
def get_sampler(
self, dataset, batch_size: int, shuffle: bool
) -> BalancedBatchSampler:
if "load_balancing" in self.config["optim"]:
balancing_mode = self.config["optim"]["load_balancing"]
force_balancing = True
else:
balancing_mode = "atoms"
force_balancing = False
if gp_utils.initialized():
num_replicas = gp_utils.get_dp_world_size()
rank = gp_utils.get_dp_rank()
else:
num_replicas = distutils.get_world_size()
rank = distutils.get_rank()
sampler = BalancedBatchSampler(
dataset,
batch_size=batch_size,
num_replicas=num_replicas,
rank=rank,
device=self.device,
mode=balancing_mode,
shuffle=shuffle,
force_balancing=force_balancing,
)
return sampler
def get_dataloader(self, dataset, sampler) -> DataLoader:
loader = DataLoader(
dataset,
collate_fn=self.parallel_collater,
num_workers=self.config["optim"]["num_workers"],
pin_memory=True,
batch_sampler=sampler,
)
return loader
def load_datasets(self) -> None:
self.parallel_collater = ParallelCollater(
0 if self.cpu else 1,
self.config["model_attributes"].get("otf_graph", False),
)
self.train_loader = None
self.val_loader = None
self.test_loader = None
if self.config.get("dataset", None):
self.train_dataset = registry.get_dataset_class(
self.config["task"]["dataset"]
)(self.config["dataset"])
self.train_sampler = self.get_sampler(
self.train_dataset,
self.config["optim"]["batch_size"],
shuffle=True,
)
self.train_loader = self.get_dataloader(
self.train_dataset,
self.train_sampler,
)
if self.config.get("val_dataset", None):
self.val_dataset = registry.get_dataset_class(
self.config["task"]["dataset"]
)(self.config["val_dataset"])
self.val_sampler = self.get_sampler(
self.val_dataset,
self.config["optim"].get(
"eval_batch_size", self.config["optim"]["batch_size"]
),
shuffle=False,
)
self.val_loader = self.get_dataloader(
self.val_dataset,
self.val_sampler,
)
if self.config.get("test_dataset", None):
self.test_dataset = registry.get_dataset_class(
self.config["task"]["dataset"]
)(self.config["test_dataset"])
self.test_sampler = self.get_sampler(
self.test_dataset,
self.config["optim"].get(
"eval_batch_size", self.config["optim"]["batch_size"]
),
shuffle=False,
)
self.test_loader = self.get_dataloader(
self.test_dataset,
self.test_sampler,
)
# Normalizer for the dataset.
# Compute mean, std of training set labels.
self.normalizers = {}
if self.normalizer.get("normalize_labels", False):
if "target_mean" in self.normalizer:
self.normalizers["target"] = Normalizer(
mean=self.normalizer["target_mean"],
std=self.normalizer["target_std"],
device=self.device,
)
else:
self.normalizers["target"] = Normalizer(
tensor=self.train_loader.dataset.data.y[
self.train_loader.dataset.__indices__
],
device=self.device,
)
@abstractmethod
def load_task(self):
"""Initialize task-specific information. Derived classes should implement this function."""
def load_model(self) -> None:
# Build model
if distutils.is_master():
logging.info(f"Loading model: {self.config['model']}")
# TODO: depreicated, remove.
bond_feat_dim = None
bond_feat_dim = self.config["model_attributes"].get(
"num_gaussians", 50
)
loader = self.train_loader or self.val_loader or self.test_loader
self.model = registry.get_model_class(self.config["model"])(
loader.dataset[0].x.shape[-1]
if loader
and hasattr(loader.dataset[0], "x")
and loader.dataset[0].x is not None
else None,
bond_feat_dim,
self.num_targets,
**self.config["model_attributes"],
).to(self.device)
if distutils.is_master():
logging.info(
f"Loaded {self.model.__class__.__name__} with "
f"{self.model.num_params} parameters."
)
if self.logger is not None:
self.logger.watch(self.model)
self.model = OCPDataParallel(
self.model,
output_device=self.device,
num_gpus=1 if not self.cpu else 0,
)
if distutils.initialized() and not self.config["noddp"]:
self.model = DistributedDataParallel(
self.model, device_ids=[self.device]
)
def load_checkpoint(self, checkpoint_path: str) -> None:
if not os.path.isfile(checkpoint_path):
raise FileNotFoundError(
errno.ENOENT, "Checkpoint file not found", checkpoint_path
)
logging.info(f"Loading checkpoint from: {checkpoint_path}")
map_location = torch.device("cpu") if self.cpu else self.device
checkpoint = torch.load(checkpoint_path, map_location=map_location)
self.epoch = checkpoint.get("epoch", 0)
self.step = checkpoint.get("step", 0)
self.best_val_metric = checkpoint.get("best_val_metric", None)
self.primary_metric = checkpoint.get("primary_metric", None)
# Match the "module." count in the keys of model and checkpoint state_dict
# DataParallel model has 1 "module.", DistributedDataParallel has 2 "module."
# Not using either of the above two would have no "module."
ckpt_key_count = next(iter(checkpoint["state_dict"])).count("module")
mod_key_count = next(iter(self.model.state_dict())).count("module")
key_count_diff = mod_key_count - ckpt_key_count
if key_count_diff > 0:
new_dict = {
key_count_diff * "module." + k: v
for k, v in checkpoint["state_dict"].items()
}
elif key_count_diff < 0:
new_dict = {
k[len("module.") * abs(key_count_diff) :]: v
for k, v in checkpoint["state_dict"].items()
}
else:
new_dict = checkpoint["state_dict"]
strict = self.config["task"].get("strict_load", True)
load_state_dict(self.model, new_dict, strict=strict)
if "optimizer" in checkpoint:
self.optimizer.load_state_dict(checkpoint["optimizer"])
if "scheduler" in checkpoint and checkpoint["scheduler"] is not None:
self.scheduler.scheduler.load_state_dict(checkpoint["scheduler"])
if "ema" in checkpoint and checkpoint["ema"] is not None:
self.ema.load_state_dict(checkpoint["ema"])
else:
self.ema = None
scale_dict = checkpoint.get("scale_dict", None)
if scale_dict:
logging.info(
"Overwriting scaling factors with those loaded from checkpoint. "
"If you're generating predictions with a pretrained checkpoint, this is the correct behavior. "
"To disable this, delete `scale_dict` from the checkpoint. "
)
load_scales_compat(self._unwrapped_model, scale_dict)
for key in checkpoint["normalizers"]:
if key in self.normalizers:
self.normalizers[key].load_state_dict(
checkpoint["normalizers"][key]
)
if self.scaler and checkpoint["amp"]:
self.scaler.load_state_dict(checkpoint["amp"])
def load_loss(self) -> None:
self.loss_fn: Dict[str, str] = {
"energy": self.config["optim"].get("loss_energy", "mae"),
"force": self.config["optim"].get("loss_force", "mae"),
}
for loss, loss_name in self.loss_fn.items():
if loss_name in ["l1", "mae"]:
self.loss_fn[loss] = nn.L1Loss()
elif loss_name == "mse":
self.loss_fn[loss] = nn.MSELoss()
elif loss_name == "l2mae":
self.loss_fn[loss] = L2MAELoss()
elif loss_name == "atomwisel2":
self.loss_fn[loss] = AtomwiseL2Loss()
else:
raise NotImplementedError(
f"Unknown loss function name: {loss_name}"
)
self.loss_fn[loss] = DDPLoss(self.loss_fn[loss])
def load_optimizer(self) -> None:
optimizer = self.config["optim"].get("optimizer", "AdamW")
optimizer = getattr(optim, optimizer)
if self.config["optim"].get("weight_decay", 0) > 0:
# Do not regularize bias etc.
params_decay = []
params_no_decay = []
for name, param in self.model.named_parameters():
if param.requires_grad:
if "embedding" in name:
params_no_decay += [param]
elif "frequencies" in name:
params_no_decay += [param]
elif "bias" in name:
params_no_decay += [param]
else:
params_decay += [param]
self.optimizer = optimizer(
[
{"params": params_no_decay, "weight_decay": 0},
{
"params": params_decay,
"weight_decay": self.config["optim"]["weight_decay"],
},
],
lr=self.config["optim"]["lr_initial"],
**self.config["optim"].get("optimizer_params", {}),
)
else:
self.optimizer = optimizer(
params=self.model.parameters(),
lr=self.config["optim"]["lr_initial"],
**self.config["optim"].get("optimizer_params", {}),
)
def load_extras(self) -> None:
self.scheduler = LRScheduler(self.optimizer, self.config["optim"])
self.clip_grad_norm = self.config["optim"].get("clip_grad_norm")
self.ema_decay = self.config["optim"].get("ema_decay")
if self.ema_decay:
self.ema = ExponentialMovingAverage(
self.model.parameters(),
self.ema_decay,
)
else:
self.ema = None
def save(
self,
metrics=None,
checkpoint_file: str = "checkpoint.pt",
training_state: bool = True,
):
if not self.is_debug and distutils.is_master():
if training_state:
return save_checkpoint(
{
"epoch": self.epoch,
"step": self.step,
"state_dict": self.model.state_dict(),
"optimizer": self.optimizer.state_dict(),
"scheduler": self.scheduler.scheduler.state_dict()
if self.scheduler.scheduler_type != "Null"
else None,
"normalizers": {
key: value.state_dict()
for key, value in self.normalizers.items()
},
"config": self.config,
"val_metrics": metrics,
"ema": self.ema.state_dict() if self.ema else None,
"amp": self.scaler.state_dict()
if self.scaler
else None,
"best_val_metric": self.best_val_metric,
"primary_metric": self.config["task"].get(
"primary_metric",
self.evaluator.task_primary_metric[self.name],
),
},
checkpoint_dir=self.config["cmd"]["checkpoint_dir"],
checkpoint_file=checkpoint_file,
)
else:
if self.ema:
self.ema.store()
self.ema.copy_to()
ckpt_path = save_checkpoint(
{
"state_dict": self.model.state_dict(),
"normalizers": {
key: value.state_dict()
for key, value in self.normalizers.items()
},
"config": self.config,
"val_metrics": metrics,
"amp": self.scaler.state_dict()
if self.scaler
else None,
},
checkpoint_dir=self.config["cmd"]["checkpoint_dir"],
checkpoint_file=checkpoint_file,
)
if self.ema:
self.ema.restore()
return ckpt_path
return None
def save_hpo(self, epoch, step: int, metrics, checkpoint_every: int):
# default is no checkpointing
# checkpointing frequency can be adjusted by setting checkpoint_every in steps
# to checkpoint every time results are communicated to Ray Tune set checkpoint_every=1
if checkpoint_every != -1 and step % checkpoint_every == 0:
with tune.checkpoint_dir( # noqa: F821
step=step
) as checkpoint_dir:
path = os.path.join(checkpoint_dir, "checkpoint")
torch.save(self.save_state(epoch, step, metrics), path)
def hpo_update(
self, epoch, step, train_metrics, val_metrics, test_metrics=None
):
progress = {
"steps": step,
"epochs": epoch,
"act_lr": self.optimizer.param_groups[0]["lr"],
}
# checkpointing must occur before reporter
# default is no checkpointing
self.save_hpo(
epoch,
step,
val_metrics,
self.hpo_checkpoint_every,
)
# report metrics to tune
tune_reporter( # noqa: F821
iters=progress,
train_metrics={
k: train_metrics[k]["metric"] for k in self.metrics
},
val_metrics={k: val_metrics[k]["metric"] for k in val_metrics},
test_metrics=test_metrics,
)
@abstractmethod
def train(self):
"""Derived classes should implement this function."""
@torch.no_grad()
def validate(self, split: str = "val", disable_tqdm: bool = False):
ensure_fitted(self._unwrapped_model, warn=True)
if distutils.is_master():
logging.info(f"Evaluating on {split}.")
if self.is_hpo:
disable_tqdm = True
self.model.eval()
if self.ema:
self.ema.store()
self.ema.copy_to()
evaluator, metrics = Evaluator(task=self.name), {}
rank = distutils.get_rank()
loader = self.val_loader if split == "val" else self.test_loader
for i, batch in tqdm(
enumerate(loader),
total=len(loader),
position=rank,
desc="device {}".format(rank),
disable=disable_tqdm,
):
# Forward.
with torch.cuda.amp.autocast(enabled=self.scaler is not None):
out = self._forward(batch)
loss = self._compute_loss(out, batch)
# Compute metrics.
metrics = self._compute_metrics(out, batch, evaluator, metrics)
metrics = evaluator.update("loss", loss.item(), metrics)
aggregated_metrics = {}
for k in metrics:
aggregated_metrics[k] = {
"total": distutils.all_reduce(
metrics[k]["total"], average=False, device=self.device
),
"numel": distutils.all_reduce(
metrics[k]["numel"], average=False, device=self.device
),
}
aggregated_metrics[k]["metric"] = (
aggregated_metrics[k]["total"] / aggregated_metrics[k]["numel"]
)
metrics = aggregated_metrics
log_dict = {k: metrics[k]["metric"] for k in metrics}
log_dict.update({"epoch": self.epoch})
if distutils.is_master():
log_str = ["{}: {:.4f}".format(k, v) for k, v in log_dict.items()]
logging.info(", ".join(log_str))
# Make plots.
if self.logger is not None:
self.logger.log(
log_dict,
step=self.step,
split=split,
)
if self.ema:
self.ema.restore()
return metrics
@abstractmethod
def _forward(self, batch_list):
"""Derived classes should implement this function."""
@abstractmethod
def _compute_loss(self, out, batch_list):
"""Derived classes should implement this function."""
def _backward(self, loss) -> None:
self.optimizer.zero_grad()
loss.backward()
# Scale down the gradients of shared parameters
if hasattr(self.model.module, "shared_parameters"):
for p, factor in self.model.module.shared_parameters:
if hasattr(p, "grad") and p.grad is not None:
p.grad.detach().div_(factor)
else:
if not hasattr(self, "warned_shared_param_no_grad"):
self.warned_shared_param_no_grad = True
logging.warning(
"Some shared parameters do not have a gradient. "
"Please check if all shared parameters are used "
"and point to PyTorch parameters."
)
if self.clip_grad_norm:
if self.scaler:
self.scaler.unscale_(self.optimizer)
grad_norm = torch.nn.utils.clip_grad_norm_(
self.model.parameters(),
max_norm=self.clip_grad_norm,
)
if self.logger is not None:
self.logger.log(
{"grad_norm": grad_norm}, step=self.step, split="train"
)
if self.scaler:
self.scaler.step(self.optimizer)
self.scaler.update()
else:
self.optimizer.step()
if self.ema:
self.ema.update()
def save_results(
self, predictions, results_file: Optional[str], keys
) -> None:
if results_file is None:
return
results_file_path = os.path.join(
self.config["cmd"]["results_dir"],
f"{self.name}_{results_file}_{distutils.get_rank()}.npz",
)
np.savez_compressed(
results_file_path,
ids=predictions["id"],
**{key: predictions[key] for key in keys},
)
distutils.synchronize()
if distutils.is_master():
gather_results = defaultdict(list)
full_path = os.path.join(
self.config["cmd"]["results_dir"],
f"{self.name}_{results_file}.npz",
)
for i in range(distutils.get_world_size()):
rank_path = os.path.join(
self.config["cmd"]["results_dir"],
f"{self.name}_{results_file}_{i}.npz",
)
rank_results = np.load(rank_path, allow_pickle=True)
gather_results["ids"].extend(rank_results["ids"])
for key in keys:
gather_results[key].extend(rank_results[key])
os.remove(rank_path)
# Because of how distributed sampler works, some system ids
# might be repeated to make no. of samples even across GPUs.
_, idx = np.unique(gather_results["ids"], return_index=True)
gather_results["ids"] = np.array(gather_results["ids"])[idx]
for k in keys:
if k == "forces":
gather_results[k] = np.concatenate(
np.array(gather_results[k])[idx]
)
elif k == "chunk_idx":
gather_results[k] = np.cumsum(
np.array(gather_results[k])[idx]
)[:-1]
else:
gather_results[k] = np.array(gather_results[k])[idx]
logging.info(f"Writing results to {full_path}")
np.savez_compressed(full_path, **gather_results)
| 29,997 | 36.264596 | 111 | py |
ocp | ocp-main/ocpmodels/trainers/energy_trainer.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import logging
from typing import Optional
import torch
import torch_geometric
from tqdm import tqdm
from ocpmodels.common import distutils
from ocpmodels.common.registry import registry
from ocpmodels.modules.scaling.util import ensure_fitted
from ocpmodels.trainers.base_trainer import BaseTrainer
@registry.register_trainer("energy")
class EnergyTrainer(BaseTrainer):
"""
Trainer class for the Initial Structure to Relaxed Energy (IS2RE) task.
.. note::
Examples of configurations for task, model, dataset and optimizer
can be found in `configs/ocp_is2re <https://github.com/Open-Catalyst-Project/baselines/tree/master/configs/ocp_is2re/>`_.
Args:
task (dict): Task configuration.
model (dict): Model configuration.
dataset (dict): Dataset configuration. The dataset needs to be a SinglePointLMDB dataset.
optimizer (dict): Optimizer configuration.
identifier (str): Experiment identifier that is appended to log directory.
run_dir (str, optional): Path to the run directory where logs are to be saved.
(default: :obj:`None`)
is_debug (bool, optional): Run in debug mode.
(default: :obj:`False`)
is_hpo (bool, optional): Run hyperparameter optimization with Ray Tune.
(default: :obj:`False`)
print_every (int, optional): Frequency of printing logs.
(default: :obj:`100`)
seed (int, optional): Random number seed.
(default: :obj:`None`)
logger (str, optional): Type of logger to be used.
(default: :obj:`tensorboard`)
local_rank (int, optional): Local rank of the process, only applicable for distributed training.
(default: :obj:`0`)
amp (bool, optional): Run using automatic mixed precision.
(default: :obj:`False`)
slurm (dict): Slurm configuration. Currently just for keeping track.
(default: :obj:`{}`)
"""
def __init__(
self,
task,
model,
dataset,
optimizer,
identifier,
normalizer=None,
timestamp_id: Optional[str] = None,
run_dir=None,
is_debug: bool = False,
is_hpo: bool = False,
print_every: int = 100,
seed=None,
logger: str = "tensorboard",
local_rank: int = 0,
amp: bool = False,
cpu: bool = False,
slurm={},
noddp: bool = False,
) -> None:
super().__init__(
task=task,
model=model,
dataset=dataset,
optimizer=optimizer,
identifier=identifier,
normalizer=normalizer,
timestamp_id=timestamp_id,
run_dir=run_dir,
is_debug=is_debug,
is_hpo=is_hpo,
print_every=print_every,
seed=seed,
logger=logger,
local_rank=local_rank,
amp=amp,
cpu=cpu,
name="is2re",
slurm=slurm,
noddp=noddp,
)
def load_task(self) -> None:
logging.info(f"Loading dataset: {self.config['task']['dataset']}")
self.num_targets = 1
@torch.no_grad()
def predict(
self,
loader,
per_image: bool = True,
results_file=None,
disable_tqdm: bool = False,
):
ensure_fitted(self._unwrapped_model)
if distutils.is_master() and not disable_tqdm:
logging.info("Predicting on test.")
assert isinstance(
loader,
(
torch.utils.data.dataloader.DataLoader,
torch_geometric.data.Batch,
),
)
rank = distutils.get_rank()
if isinstance(loader, torch_geometric.data.Batch):
loader = [[loader]]
self.model.eval()
if self.ema:
self.ema.store()
self.ema.copy_to()
if self.normalizers is not None and "target" in self.normalizers:
self.normalizers["target"].to(self.device)
predictions = {"id": [], "energy": []}
for _, batch in tqdm(
enumerate(loader),
total=len(loader),
position=rank,
desc="device {}".format(rank),
disable=disable_tqdm,
):
with torch.cuda.amp.autocast(enabled=self.scaler is not None):
out = self._forward(batch)
if self.normalizers is not None and "target" in self.normalizers:
out["energy"] = self.normalizers["target"].denorm(
out["energy"]
)
if per_image:
predictions["id"].extend(
[str(i) for i in batch[0].sid.tolist()]
)
predictions["energy"].extend(
out["energy"].cpu().detach().numpy()
)
else:
predictions["energy"] = out["energy"].detach()
return predictions
self.save_results(predictions, results_file, keys=["energy"])
if self.ema:
self.ema.restore()
return predictions
def train(self, disable_eval_tqdm: bool = False) -> None:
ensure_fitted(self._unwrapped_model, warn=True)
eval_every = self.config["optim"].get(
"eval_every", len(self.train_loader)
)
primary_metric = self.config["task"].get(
"primary_metric", self.evaluator.task_primary_metric[self.name]
)
self.best_val_metric = 1e9
# Calculate start_epoch from step instead of loading the epoch number
# to prevent inconsistencies due to different batch size in checkpoint.
start_epoch = self.step // len(self.train_loader)
for epoch_int in range(
start_epoch, self.config["optim"]["max_epochs"]
):
self.train_sampler.set_epoch(epoch_int)
skip_steps = self.step % len(self.train_loader)
train_loader_iter = iter(self.train_loader)
for i in range(skip_steps, len(self.train_loader)):
self.epoch = epoch_int + (i + 1) / len(self.train_loader)
self.step = epoch_int * len(self.train_loader) + i + 1
self.model.train()
# Get a batch.
batch = next(train_loader_iter)
# Forward, loss, backward.
with torch.cuda.amp.autocast(enabled=self.scaler is not None):
out = self._forward(batch)
loss = self._compute_loss(out, batch)
loss = self.scaler.scale(loss) if self.scaler else loss
self._backward(loss)
scale = self.scaler.get_scale() if self.scaler else 1.0
# Compute metrics.
self.metrics = self._compute_metrics(
out,
batch,
self.evaluator,
metrics={},
)
self.metrics = self.evaluator.update(
"loss", loss.item() / scale, self.metrics
)
# Log metrics.
log_dict = {k: self.metrics[k]["metric"] for k in self.metrics}
log_dict.update(
{
"lr": self.scheduler.get_lr(),
"epoch": self.epoch,
"step": self.step,
}
)
if (
self.step % self.config["cmd"]["print_every"] == 0
and distutils.is_master()
and not self.is_hpo
):
log_str = [
"{}: {:.2e}".format(k, v) for k, v in log_dict.items()
]
print(", ".join(log_str))
self.metrics = {}
if self.logger is not None:
self.logger.log(
log_dict,
step=self.step,
split="train",
)
# Evaluate on val set after every `eval_every` iterations.
if self.step % eval_every == 0:
self.save(
checkpoint_file="checkpoint.pt", training_state=True
)
if self.val_loader is not None:
val_metrics = self.validate(
split="val",
disable_tqdm=disable_eval_tqdm,
)
if (
val_metrics[
self.evaluator.task_primary_metric[self.name]
]["metric"]
< self.best_val_metric
):
self.best_val_metric = val_metrics[
self.evaluator.task_primary_metric[self.name]
]["metric"]
self.save(
metrics=val_metrics,
checkpoint_file="best_checkpoint.pt",
training_state=False,
)
if self.test_loader is not None:
self.predict(
self.test_loader,
results_file="predictions",
disable_tqdm=False,
)
if self.is_hpo:
self.hpo_update(
self.epoch,
self.step,
self.metrics,
val_metrics,
)
if self.scheduler.scheduler_type == "ReduceLROnPlateau":
if self.step % eval_every == 0:
self.scheduler.step(
metrics=val_metrics[primary_metric]["metric"],
)
else:
self.scheduler.step()
torch.cuda.empty_cache()
self.train_dataset.close_db()
if self.config.get("val_dataset", False):
self.val_dataset.close_db()
if self.config.get("test_dataset", False):
self.test_dataset.close_db()
def _forward(self, batch_list):
output = self.model(batch_list)
if output.shape[-1] == 1:
output = output.view(-1)
return {
"energy": output,
}
def _compute_loss(self, out, batch_list):
energy_target = torch.cat(
[batch.y_relaxed.to(self.device) for batch in batch_list], dim=0
)
if self.normalizer.get("normalize_labels", False):
target_normed = self.normalizers["target"].norm(energy_target)
else:
target_normed = energy_target
loss = self.loss_fn["energy"](out["energy"], target_normed)
return loss
def _compute_metrics(self, out, batch_list, evaluator, metrics={}):
energy_target = torch.cat(
[batch.y_relaxed.to(self.device) for batch in batch_list], dim=0
)
if self.normalizer.get("normalize_labels", False):
out["energy"] = self.normalizers["target"].denorm(out["energy"])
metrics = evaluator.eval(
out,
{"energy": energy_target},
prev_metrics=metrics,
)
return metrics
| 11,855 | 33.768328 | 129 | py |
ocp | ocp-main/ocpmodels/trainers/__init__.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
__all__ = [
"BaseTrainer",
"ForcesTrainer",
"EnergyTrainer",
]
from .base_trainer import BaseTrainer
from .energy_trainer import EnergyTrainer
from .forces_trainer import ForcesTrainer
| 376 | 24.133333 | 65 | py |
ocp | ocp-main/ocpmodels/trainers/forces_trainer.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import logging
import os
import pathlib
from collections import defaultdict
from pathlib import Path
from typing import Optional
import numpy as np
import torch
import torch_geometric
from tqdm import tqdm
from ocpmodels.common import distutils
from ocpmodels.common.registry import registry
from ocpmodels.common.relaxation.ml_relaxation import ml_relax
from ocpmodels.common.utils import check_traj_files
from ocpmodels.modules.evaluator import Evaluator
from ocpmodels.modules.normalizer import Normalizer
from ocpmodels.modules.scaling.util import ensure_fitted
from ocpmodels.trainers.base_trainer import BaseTrainer
@registry.register_trainer("forces")
class ForcesTrainer(BaseTrainer):
"""
Trainer class for the Structure to Energy & Force (S2EF) and Initial State to
Relaxed State (IS2RS) tasks.
.. note::
Examples of configurations for task, model, dataset and optimizer
can be found in `configs/ocp_s2ef <https://github.com/Open-Catalyst-Project/baselines/tree/master/configs/ocp_is2re/>`_
and `configs/ocp_is2rs <https://github.com/Open-Catalyst-Project/baselines/tree/master/configs/ocp_is2rs/>`_.
Args:
task (dict): Task configuration.
model (dict): Model configuration.
dataset (dict): Dataset configuration. The dataset needs to be a SinglePointLMDB dataset.
optimizer (dict): Optimizer configuration.
identifier (str): Experiment identifier that is appended to log directory.
run_dir (str, optional): Path to the run directory where logs are to be saved.
(default: :obj:`None`)
is_debug (bool, optional): Run in debug mode.
(default: :obj:`False`)
is_hpo (bool, optional): Run hyperparameter optimization with Ray Tune.
(default: :obj:`False`)
print_every (int, optional): Frequency of printing logs.
(default: :obj:`100`)
seed (int, optional): Random number seed.
(default: :obj:`None`)
logger (str, optional): Type of logger to be used.
(default: :obj:`tensorboard`)
local_rank (int, optional): Local rank of the process, only applicable for distributed training.
(default: :obj:`0`)
amp (bool, optional): Run using automatic mixed precision.
(default: :obj:`False`)
slurm (dict): Slurm configuration. Currently just for keeping track.
(default: :obj:`{}`)
"""
def __init__(
self,
task,
model,
dataset,
optimizer,
identifier,
normalizer=None,
timestamp_id: Optional[str] = None,
run_dir: Optional[str] = None,
is_debug: bool = False,
is_hpo: bool = False,
print_every: int = 100,
seed: Optional[int] = None,
logger: str = "tensorboard",
local_rank: int = 0,
amp: bool = False,
cpu: bool = False,
slurm={},
noddp: bool = False,
) -> None:
super().__init__(
task=task,
model=model,
dataset=dataset,
optimizer=optimizer,
identifier=identifier,
normalizer=normalizer,
timestamp_id=timestamp_id,
run_dir=run_dir,
is_debug=is_debug,
is_hpo=is_hpo,
print_every=print_every,
seed=seed,
logger=logger,
local_rank=local_rank,
amp=amp,
cpu=cpu,
name="s2ef",
slurm=slurm,
noddp=noddp,
)
def load_task(self) -> None:
logging.info(f"Loading dataset: {self.config['task']['dataset']}")
if "relax_dataset" in self.config["task"]:
self.relax_dataset = registry.get_dataset_class("lmdb")(
self.config["task"]["relax_dataset"]
)
self.relax_sampler = self.get_sampler(
self.relax_dataset,
self.config["optim"].get(
"eval_batch_size", self.config["optim"]["batch_size"]
),
shuffle=False,
)
self.relax_loader = self.get_dataloader(
self.relax_dataset,
self.relax_sampler,
)
self.num_targets = 1
# If we're computing gradients wrt input, set mean of normalizer to 0 --
# since it is lost when compute dy / dx -- and std to forward target std
if self.config["model_attributes"].get("regress_forces", True):
if self.normalizer.get("normalize_labels", False):
if "grad_target_mean" in self.normalizer:
self.normalizers["grad_target"] = Normalizer(
mean=self.normalizer["grad_target_mean"],
std=self.normalizer["grad_target_std"],
device=self.device,
)
else:
self.normalizers["grad_target"] = Normalizer(
tensor=self.train_loader.dataset.data.y[
self.train_loader.dataset.__indices__
],
device=self.device,
)
self.normalizers["grad_target"].mean.fill_(0)
# Takes in a new data source and generates predictions on it.
@torch.no_grad()
def predict(
self,
data_loader,
per_image: bool = True,
results_file=None,
disable_tqdm: bool = False,
):
ensure_fitted(self._unwrapped_model, warn=True)
if distutils.is_master() and not disable_tqdm:
logging.info("Predicting on test.")
assert isinstance(
data_loader,
(
torch.utils.data.dataloader.DataLoader,
torch_geometric.data.Batch,
),
)
rank = distutils.get_rank()
if isinstance(data_loader, torch_geometric.data.Batch):
data_loader = [[data_loader]]
self.model.eval()
if self.ema:
self.ema.store()
self.ema.copy_to()
if self.normalizers is not None and "target" in self.normalizers:
self.normalizers["target"].to(self.device)
self.normalizers["grad_target"].to(self.device)
predictions = {"id": [], "energy": [], "forces": [], "chunk_idx": []}
for i, batch_list in tqdm(
enumerate(data_loader),
total=len(data_loader),
position=rank,
desc="device {}".format(rank),
disable=disable_tqdm,
):
with torch.cuda.amp.autocast(enabled=self.scaler is not None):
out = self._forward(batch_list)
if self.normalizers is not None and "target" in self.normalizers:
out["energy"] = self.normalizers["target"].denorm(
out["energy"]
)
out["forces"] = self.normalizers["grad_target"].denorm(
out["forces"]
)
if per_image:
systemids = [
str(i) + "_" + str(j)
for i, j in zip(
batch_list[0].sid.tolist(), batch_list[0].fid.tolist()
)
]
predictions["id"].extend(systemids)
batch_natoms = torch.cat(
[batch.natoms for batch in batch_list]
)
batch_fixed = torch.cat([batch.fixed for batch in batch_list])
# total energy target requires predictions to be saved in float32
# default is float16
if (
self.config["task"].get("prediction_dtype", "float16")
== "float32"
or self.config["task"]["dataset"] == "oc22_lmdb"
):
predictions["energy"].extend(
out["energy"].cpu().detach().to(torch.float32).numpy()
)
forces = out["forces"].cpu().detach().to(torch.float32)
else:
predictions["energy"].extend(
out["energy"].cpu().detach().to(torch.float16).numpy()
)
forces = out["forces"].cpu().detach().to(torch.float16)
per_image_forces = torch.split(forces, batch_natoms.tolist())
per_image_forces = [
force.numpy() for force in per_image_forces
]
# evalAI only requires forces on free atoms
if results_file is not None:
_per_image_fixed = torch.split(
batch_fixed, batch_natoms.tolist()
)
_per_image_free_forces = [
force[(fixed == 0).tolist()]
for force, fixed in zip(
per_image_forces, _per_image_fixed
)
]
_chunk_idx = np.array(
[
free_force.shape[0]
for free_force in _per_image_free_forces
]
)
per_image_forces = _per_image_free_forces
predictions["chunk_idx"].extend(_chunk_idx)
predictions["forces"].extend(per_image_forces)
else:
predictions["energy"] = out["energy"].detach()
predictions["forces"] = out["forces"].detach()
if self.ema:
self.ema.restore()
return predictions
predictions["forces"] = np.array(predictions["forces"])
predictions["chunk_idx"] = np.array(predictions["chunk_idx"])
predictions["energy"] = np.array(predictions["energy"])
predictions["id"] = np.array(predictions["id"])
self.save_results(
predictions, results_file, keys=["energy", "forces", "chunk_idx"]
)
if self.ema:
self.ema.restore()
return predictions
def update_best(
self,
primary_metric,
val_metrics,
disable_eval_tqdm: bool = True,
) -> None:
if (
"mae" in primary_metric
and val_metrics[primary_metric]["metric"] < self.best_val_metric
) or (
"mae" not in primary_metric
and val_metrics[primary_metric]["metric"] > self.best_val_metric
):
self.best_val_metric = val_metrics[primary_metric]["metric"]
self.save(
metrics=val_metrics,
checkpoint_file="best_checkpoint.pt",
training_state=False,
)
if self.test_loader is not None:
self.predict(
self.test_loader,
results_file="predictions",
disable_tqdm=disable_eval_tqdm,
)
def train(self, disable_eval_tqdm: bool = False) -> None:
ensure_fitted(self._unwrapped_model, warn=True)
eval_every = self.config["optim"].get(
"eval_every", len(self.train_loader)
)
checkpoint_every = self.config["optim"].get(
"checkpoint_every", eval_every
)
primary_metric = self.config["task"].get(
"primary_metric", self.evaluator.task_primary_metric[self.name]
)
if (
not hasattr(self, "primary_metric")
or self.primary_metric != primary_metric
):
self.best_val_metric = 1e9 if "mae" in primary_metric else -1.0
else:
primary_metric = self.primary_metric
self.metrics = {}
# Calculate start_epoch from step instead of loading the epoch number
# to prevent inconsistencies due to different batch size in checkpoint.
start_epoch = self.step // len(self.train_loader)
for epoch_int in range(
start_epoch, self.config["optim"]["max_epochs"]
):
self.train_sampler.set_epoch(epoch_int)
skip_steps = self.step % len(self.train_loader)
train_loader_iter = iter(self.train_loader)
for i in range(skip_steps, len(self.train_loader)):
self.epoch = epoch_int + (i + 1) / len(self.train_loader)
self.step = epoch_int * len(self.train_loader) + i + 1
self.model.train()
# Get a batch.
batch = next(train_loader_iter)
# Forward, loss, backward.
with torch.cuda.amp.autocast(enabled=self.scaler is not None):
out = self._forward(batch)
loss = self._compute_loss(out, batch)
loss = self.scaler.scale(loss) if self.scaler else loss
self._backward(loss)
scale = self.scaler.get_scale() if self.scaler else 1.0
# Compute metrics.
self.metrics = self._compute_metrics(
out,
batch,
self.evaluator,
self.metrics,
)
self.metrics = self.evaluator.update(
"loss", loss.item() / scale, self.metrics
)
# Log metrics.
log_dict = {k: self.metrics[k]["metric"] for k in self.metrics}
log_dict.update(
{
"lr": self.scheduler.get_lr(),
"epoch": self.epoch,
"step": self.step,
}
)
if (
self.step % self.config["cmd"]["print_every"] == 0
and distutils.is_master()
and not self.is_hpo
):
log_str = [
"{}: {:.2e}".format(k, v) for k, v in log_dict.items()
]
logging.info(", ".join(log_str))
self.metrics = {}
if self.logger is not None:
self.logger.log(
log_dict,
step=self.step,
split="train",
)
if (
checkpoint_every != -1
and self.step % checkpoint_every == 0
):
self.save(
checkpoint_file="checkpoint.pt", training_state=True
)
# Evaluate on val set every `eval_every` iterations.
if self.step % eval_every == 0:
if self.val_loader is not None:
val_metrics = self.validate(
split="val",
disable_tqdm=disable_eval_tqdm,
)
self.update_best(
primary_metric,
val_metrics,
disable_eval_tqdm=disable_eval_tqdm,
)
if self.is_hpo:
self.hpo_update(
self.epoch,
self.step,
self.metrics,
val_metrics,
)
if self.config["task"].get("eval_relaxations", False):
if "relax_dataset" not in self.config["task"]:
logging.warning(
"Cannot evaluate relaxations, relax_dataset not specified"
)
else:
self.run_relaxations()
if self.scheduler.scheduler_type == "ReduceLROnPlateau":
if self.step % eval_every == 0:
self.scheduler.step(
metrics=val_metrics[primary_metric]["metric"],
)
else:
self.scheduler.step()
torch.cuda.empty_cache()
if checkpoint_every == -1:
self.save(checkpoint_file="checkpoint.pt", training_state=True)
self.train_dataset.close_db()
if self.config.get("val_dataset", False):
self.val_dataset.close_db()
if self.config.get("test_dataset", False):
self.test_dataset.close_db()
def _forward(self, batch_list):
# forward pass.
if self.config["model_attributes"].get("regress_forces", True):
out_energy, out_forces = self.model(batch_list)
else:
out_energy = self.model(batch_list)
if out_energy.shape[-1] == 1:
out_energy = out_energy.view(-1)
out = {
"energy": out_energy,
}
if self.config["model_attributes"].get("regress_forces", True):
out["forces"] = out_forces
return out
def _compute_loss(self, out, batch_list) -> int:
loss = []
# Energy loss.
energy_target = torch.cat(
[batch.y.to(self.device) for batch in batch_list], dim=0
)
if self.normalizer.get("normalize_labels", False):
energy_target = self.normalizers["target"].norm(energy_target)
energy_mult = self.config["optim"].get("energy_coefficient", 1)
loss.append(
energy_mult * self.loss_fn["energy"](out["energy"], energy_target)
)
# Force loss.
if self.config["model_attributes"].get("regress_forces", True):
force_target = torch.cat(
[batch.force.to(self.device) for batch in batch_list], dim=0
)
if self.normalizer.get("normalize_labels", False):
force_target = self.normalizers["grad_target"].norm(
force_target
)
tag_specific_weights = self.config["task"].get(
"tag_specific_weights", []
)
if tag_specific_weights != []:
# handle tag specific weights as introduced in forcenet
assert len(tag_specific_weights) == 3
batch_tags = torch.cat(
[
batch.tags.float().to(self.device)
for batch in batch_list
],
dim=0,
)
weight = torch.zeros_like(batch_tags)
weight[batch_tags == 0] = tag_specific_weights[0]
weight[batch_tags == 1] = tag_specific_weights[1]
weight[batch_tags == 2] = tag_specific_weights[2]
if self.config["optim"].get("loss_force", "l2mae") == "l2mae":
# zero out nans, if any
found_nans_or_infs = not torch.all(
out["forces"].isfinite()
)
if found_nans_or_infs is True:
logging.warning("Found nans while computing loss")
out["forces"] = torch.nan_to_num(
out["forces"], nan=0.0
)
dists = torch.norm(
out["forces"] - force_target, p=2, dim=-1
)
weighted_dists_sum = (dists * weight).sum()
num_samples = out["forces"].shape[0]
num_samples = distutils.all_reduce(
num_samples, device=self.device
)
weighted_dists_sum = (
weighted_dists_sum
* distutils.get_world_size()
/ num_samples
)
force_mult = self.config["optim"].get(
"force_coefficient", 30
)
loss.append(force_mult * weighted_dists_sum)
else:
raise NotImplementedError
else:
# Force coefficient = 30 has been working well for us.
force_mult = self.config["optim"].get("force_coefficient", 30)
if self.config["task"].get("train_on_free_atoms", False):
fixed = torch.cat(
[batch.fixed.to(self.device) for batch in batch_list]
)
mask = fixed == 0
if (
self.config["optim"]
.get("loss_force", "mae")
.startswith("atomwise")
):
force_mult = self.config["optim"].get(
"force_coefficient", 1
)
natoms = torch.cat(
[
batch.natoms.to(self.device)
for batch in batch_list
]
)
natoms = torch.repeat_interleave(natoms, natoms)
force_loss = force_mult * self.loss_fn["force"](
out["forces"][mask],
force_target[mask],
natoms=natoms[mask],
batch_size=batch_list[0].natoms.shape[0],
)
loss.append(force_loss)
else:
loss.append(
force_mult
* self.loss_fn["force"](
out["forces"][mask], force_target[mask]
)
)
else:
loss.append(
force_mult
* self.loss_fn["force"](out["forces"], force_target)
)
# Sanity check to make sure the compute graph is correct.
for lc in loss:
assert hasattr(lc, "grad_fn")
loss = sum(loss)
return loss
def _compute_metrics(self, out, batch_list, evaluator, metrics={}):
natoms = torch.cat(
[batch.natoms.to(self.device) for batch in batch_list], dim=0
)
target = {
"energy": torch.cat(
[batch.y.to(self.device) for batch in batch_list], dim=0
),
"forces": torch.cat(
[batch.force.to(self.device) for batch in batch_list], dim=0
),
"natoms": natoms,
}
out["natoms"] = natoms
if self.config["task"].get("eval_on_free_atoms", True):
fixed = torch.cat(
[batch.fixed.to(self.device) for batch in batch_list]
)
mask = fixed == 0
out["forces"] = out["forces"][mask]
target["forces"] = target["forces"][mask]
s_idx = 0
natoms_free = []
for natoms in target["natoms"]:
natoms_free.append(
torch.sum(mask[s_idx : s_idx + natoms]).item()
)
s_idx += natoms
target["natoms"] = torch.LongTensor(natoms_free).to(self.device)
out["natoms"] = torch.LongTensor(natoms_free).to(self.device)
if self.normalizer.get("normalize_labels", False):
out["energy"] = self.normalizers["target"].denorm(out["energy"])
out["forces"] = self.normalizers["grad_target"].denorm(
out["forces"]
)
metrics = evaluator.eval(out, target, prev_metrics=metrics)
return metrics
def run_relaxations(self, split: str = "val") -> None:
ensure_fitted(self._unwrapped_model)
# When set to true, uses deterministic CUDA scatter ops, if available.
# https://pytorch.org/docs/stable/generated/torch.use_deterministic_algorithms.html#torch.use_deterministic_algorithms
# Only implemented for GemNet-OC currently.
registry.register(
"set_deterministic_scatter",
self.config["task"].get("set_deterministic_scatter", False),
)
logging.info("Running ML-relaxations")
self.model.eval()
if self.ema:
self.ema.store()
self.ema.copy_to()
evaluator_is2rs, metrics_is2rs = Evaluator(task="is2rs"), {}
evaluator_is2re, metrics_is2re = Evaluator(task="is2re"), {}
# Need both `pos_relaxed` and `y_relaxed` to compute val IS2R* metrics.
# Else just generate predictions.
if (
hasattr(self.relax_dataset[0], "pos_relaxed")
and self.relax_dataset[0].pos_relaxed is not None
) and (
hasattr(self.relax_dataset[0], "y_relaxed")
and self.relax_dataset[0].y_relaxed is not None
):
split = "val"
else:
split = "test"
ids = []
relaxed_positions = []
chunk_idx = []
for i, batch in tqdm(
enumerate(self.relax_loader), total=len(self.relax_loader)
):
if i >= self.config["task"].get("num_relaxation_batches", 1e9):
break
# If all traj files already exist, then skip this batch
if check_traj_files(
batch, self.config["task"]["relax_opt"].get("traj_dir", None)
):
logging.info(f"Skipping batch: {batch[0].sid.tolist()}")
continue
relaxed_batch = ml_relax(
batch=batch,
model=self,
steps=self.config["task"].get("relaxation_steps", 200),
fmax=self.config["task"].get("relaxation_fmax", 0.0),
relax_opt=self.config["task"]["relax_opt"],
save_full_traj=self.config["task"].get("save_full_traj", True),
device=self.device,
transform=None,
)
if self.config["task"].get("write_pos", False):
systemids = [str(i) for i in relaxed_batch.sid.tolist()]
natoms = relaxed_batch.natoms.tolist()
positions = torch.split(relaxed_batch.pos, natoms)
batch_relaxed_positions = [pos.tolist() for pos in positions]
relaxed_positions += batch_relaxed_positions
chunk_idx += natoms
ids += systemids
if split == "val":
mask = relaxed_batch.fixed == 0
s_idx = 0
natoms_free = []
for natoms in relaxed_batch.natoms:
natoms_free.append(
torch.sum(mask[s_idx : s_idx + natoms]).item()
)
s_idx += natoms
target = {
"energy": relaxed_batch.y_relaxed,
"positions": relaxed_batch.pos_relaxed[mask],
"cell": relaxed_batch.cell,
"pbc": torch.tensor([True, True, True]),
"natoms": torch.LongTensor(natoms_free),
}
prediction = {
"energy": relaxed_batch.y,
"positions": relaxed_batch.pos[mask],
"cell": relaxed_batch.cell,
"pbc": torch.tensor([True, True, True]),
"natoms": torch.LongTensor(natoms_free),
}
metrics_is2rs = evaluator_is2rs.eval(
prediction,
target,
metrics_is2rs,
)
metrics_is2re = evaluator_is2re.eval(
{"energy": prediction["energy"]},
{"energy": target["energy"]},
metrics_is2re,
)
if self.config["task"].get("write_pos", False):
rank = distutils.get_rank()
pos_filename = os.path.join(
self.config["cmd"]["results_dir"], f"relaxed_pos_{rank}.npz"
)
np.savez_compressed(
pos_filename,
ids=ids,
pos=np.array(relaxed_positions, dtype=object),
chunk_idx=chunk_idx,
)
distutils.synchronize()
if distutils.is_master():
gather_results = defaultdict(list)
full_path = os.path.join(
self.config["cmd"]["results_dir"],
"relaxed_positions.npz",
)
for i in range(distutils.get_world_size()):
rank_path = os.path.join(
self.config["cmd"]["results_dir"],
f"relaxed_pos_{i}.npz",
)
rank_results = np.load(rank_path, allow_pickle=True)
gather_results["ids"].extend(rank_results["ids"])
gather_results["pos"].extend(rank_results["pos"])
gather_results["chunk_idx"].extend(
rank_results["chunk_idx"]
)
os.remove(rank_path)
# Because of how distributed sampler works, some system ids
# might be repeated to make no. of samples even across GPUs.
_, idx = np.unique(gather_results["ids"], return_index=True)
gather_results["ids"] = np.array(gather_results["ids"])[idx]
gather_results["pos"] = np.concatenate(
np.array(gather_results["pos"])[idx]
)
gather_results["chunk_idx"] = np.cumsum(
np.array(gather_results["chunk_idx"])[idx]
)[
:-1
] # np.split does not need last idx, assumes n-1:end
logging.info(f"Writing results to {full_path}")
np.savez_compressed(full_path, **gather_results)
if split == "val":
for task in ["is2rs", "is2re"]:
metrics = eval(f"metrics_{task}")
aggregated_metrics = {}
for k in metrics:
aggregated_metrics[k] = {
"total": distutils.all_reduce(
metrics[k]["total"],
average=False,
device=self.device,
),
"numel": distutils.all_reduce(
metrics[k]["numel"],
average=False,
device=self.device,
),
}
aggregated_metrics[k]["metric"] = (
aggregated_metrics[k]["total"]
/ aggregated_metrics[k]["numel"]
)
metrics = aggregated_metrics
# Make plots.
log_dict = {
f"{task}_{k}": metrics[k]["metric"] for k in metrics
}
if self.logger is not None:
self.logger.log(
log_dict,
step=self.step,
split=split,
)
if distutils.is_master():
logging.info(metrics)
if self.ema:
self.ema.restore()
registry.unregister("set_deterministic_scatter")
| 31,922 | 37.554348 | 127 | py |
msvi | msvi-main/setup.py | import setuptools
setuptools.setup(
name="msvi",
version="0.0.1",
author="Valerii Iakovlev",
author_email="[email protected]",
url="https://github.com/yakovlev31/msvi",
packages=setuptools.find_packages(),
python_requires=">=3.9",
)
| 270 | 21.583333 | 45 | py |
msvi | msvi-main/experiments/rmnist/val.py | from types import SimpleNamespace
import torch
import wandb
from tqdm import tqdm
from einops import reduce
import msvi.utils.rmnist as data_utils
import utils
torch.backends.cudnn.benchmark = True # type: ignore
# Read parameters.
argparser = data_utils.create_argparser()
param = SimpleNamespace(**vars(argparser.parse_args()))
param.tags.append("val")
# Load data.
train_dataset, val_dataset, _ = data_utils.create_datasets(param)
train_loader, val_loader, _ = data_utils.create_dataloaders(param, train_dataset, val_dataset, val_dataset)
# Create and load model.
utils.set_seed(param.seed)
device = torch.device(param.device)
g, F, h = data_utils.get_model_components(param)
elbo = data_utils.create_elbo(g, F, h, param).to(device)
utils.load_model(elbo, param.model_folder, param.name, device)
elbo.eval()
wandb.init(
mode="disabled", # online/disabled
project="AVMS",
group=param.group,
tags=param.tags,
name=param.name,
config=vars(param),
save_code=True,
)
loss_fn = torch.nn.MSELoss(reduction="none")
with torch.no_grad():
losses = []
for batch in tqdm(val_loader, total=len(val_loader)):
t, y, traj_inds = [bi.to(device) for bi in batch]
t_inf, y_inf = utils.get_inference_data(t, y, param.delta_inf)
y_pd = torch.zeros_like(y)
for i in range(param.n_mc_samples):
x0 = utils.get_x0(elbo, t_inf, y_inf)
y_pd += utils._pred_full_traj(elbo, t, x0)
y_pd /= param.n_mc_samples
loss_per_traj = reduce(loss_fn(y_pd, y), "s m n d -> s () () ()", "mean").view(-1).detach().cpu().numpy().ravel()
losses.extend(loss_per_traj)
mean_loss = sum(losses) / len(losses)
print(mean_loss)
wandb.run.summary.update({"mean_val_loss": mean_loss}) # type: ignore
| 1,789 | 25.323529 | 121 | py |
msvi | msvi-main/experiments/rmnist/test.py | from types import SimpleNamespace
import torch
import wandb
from tqdm import tqdm
from einops import reduce
import msvi.utils.rmnist as data_utils
import utils
torch.backends.cudnn.benchmark = True # type: ignore
# Read parameters.
argparser = data_utils.create_argparser()
param = SimpleNamespace(**vars(argparser.parse_args()))
param.tags.append("test")
# Load data.
train_dataset, val_dataset, test_dataset = data_utils.create_datasets(param)
train_loader, val_loader, test_loader = data_utils.create_dataloaders(param, train_dataset, val_dataset, test_dataset)
# Create and load model.
utils.set_seed(param.seed)
device = torch.device(param.device)
g, F, h = data_utils.get_model_components(param)
elbo = data_utils.create_elbo(g, F, h, param).to(device)
utils.load_model(elbo, param.model_folder, param.name, device)
elbo.eval()
wandb.init(
mode="disabled", # online/disabled
project="AVMS",
group=param.group,
tags=param.tags,
name=param.name,
config=vars(param),
save_code=True,
)
loss_fn = torch.nn.MSELoss(reduction="none")
with torch.no_grad():
losses = []
for batch in tqdm(test_loader, total=len(test_loader)):
t, y, traj_inds = [bi.to(device) for bi in batch]
t_inf, y_inf = utils.get_inference_data(t, y, param.delta_inf)
y_pd = torch.zeros_like(y)
for i in range(param.n_mc_samples):
x0 = utils.get_x0(elbo, t_inf, y_inf)
y_pd += utils._pred_full_traj(elbo, t, x0)
y_pd /= param.n_mc_samples
loss_per_traj = reduce(loss_fn(y_pd, y), "s m n d -> s () () ()", "mean").view(-1).detach().cpu().numpy().ravel()
losses.extend(loss_per_traj)
mean_loss = sum(losses) / len(losses)
print(mean_loss)
wandb.run.summary.update({"mean_test_loss": mean_loss}) # type: ignore
| 1,815 | 25.705882 | 121 | py |
msvi | msvi-main/experiments/rmnist/utils.py | import os
from collections import deque
import numpy as np
import torch
import msvi.posterior
from einops import rearrange
ndarray = np.ndarray
Tensor = torch.Tensor
def set_seed(seed):
np.random.seed(seed)
torch.manual_seed(seed)
def save_model(model, path, name):
if not os.path.isdir(path):
os.makedirs(path)
torch.save(model.state_dict(), path+name+".pt")
def load_model(model, path, name, device):
model.load_state_dict(torch.load(path+name+".pt", map_location=device), strict=False)
def get_inference_data(t: Tensor, y: Tensor, delta_inf: float) -> tuple[list[Tensor], list[Tensor]]:
t_inf, y_inf = [], []
for i in range(t.shape[0]):
inf_inds = torch.argwhere(t[[i]] <= delta_inf)[:, 1]
t_inf.append(t[[i]][:, inf_inds, :])
y_inf.append(y[[i]][:, inf_inds, :, :])
return t_inf, y_inf
def get_x0(elbo, t: list[Tensor], y: list[Tensor]) -> Tensor:
x0 = []
for ti, yi in zip(t, y):
elbo.q.rec_net.update_time_grids(ti)
gamma, tau = elbo.q.rec_net(yi)
x0.append(gamma[:, [0], :] + tau[:, [0], :] * torch.randn_like(tau[:, [0], :]))
return torch.cat(x0)
def _pred_full_traj(elbo, t: Tensor, x0: Tensor) -> Tensor:
elbo.p.set_theta(elbo.q.sample_theta())
S, M, K = x0.shape[0], t.shape[1], x0.shape[2]
x = torch.zeros((S, M, K), dtype=x0.dtype, device=x0.device)
x[:, [0], :] = x0
for i in range(1, M):
x[:, [i], :] = elbo.p.F(x[:, [i-1], :], t=msvi.posterior.extract_time_grids(t[:, i-1:i+1, :], n_blocks=1))
return elbo.p._sample_lik(x)
def pred_full_traj(param, elbo, t: Tensor, y: Tensor) -> Tensor:
t_inf, y_inf = get_inference_data(t, y, param.delta_inf)
x0 = get_x0(elbo, t_inf, y_inf)
y_full_traj = _pred_full_traj(elbo, t, x0)
return y_full_traj
class BatchMovingAverage():
"""Computes moving average over the last `k` mini-batches
and stores the smallest recorded moving average in `min_avg`."""
def __init__(self, k: int) -> None:
self.values = deque([], maxlen=k)
self.min_avg = np.inf
def add_value(self, value: float) -> None:
self.values.append(value)
def get_average(self) -> float:
if len(self.values) == 0:
avg = np.nan
else:
avg = sum(self.values) / len(self.values)
if avg < self.min_avg:
self.min_avg = avg
return avg
def get_min_average(self):
return self.min_avg
def kl_norm_norm(mu0: Tensor, mu1: Tensor, sig0: Tensor, sig1: Tensor) -> Tensor:
"""Calculates KL divergence between two K-dimensional Normal
distributions with diagonal covariance matrices.
Args:
mu0: Mean of the first distribution. Has shape (*, K).
mu1: Mean of the second distribution. Has shape (*, K).
std0: Diagonal of the covatiance matrix of the first distribution. Has shape (*, K).
std1: Diagonal of the covatiance matrix of the second distribution. Has shape (*, K).
Returns:
KL divergence between the distributions. Has shape (*, 1).
"""
assert mu0.shape == mu1.shape == sig0.shape == sig1.shape, (f"{mu0.shape=} {mu1.shape=} {sig0.shape=} {sig1.shape=}")
a = (sig0 / sig1).pow(2).sum(-1, keepdim=True)
b = ((mu1 - mu0).pow(2) / sig1**2).sum(-1, keepdim=True)
c = 2 * (torch.log(sig1) - torch.log(sig0)).sum(-1, keepdim=True)
kl = 0.5 * (a + b + c - mu0.shape[-1])
return kl
def create_mask(x: Tensor) -> Tensor:
"""Masks the 'velocity' part of the latent space since we want to use
only the 'position' to reconstruct the observsations."""
K = x.shape[2]
mask = torch.ones_like(x)
mask[:, :, K//2:] = 0.0
return mask
def param_norm(module):
total_norm = 0.0
for p in module.parameters():
if p.requires_grad:
total_norm += p.data.norm(2).item()
return total_norm
def grad_norm(module):
total_norm = 0.0
for p in module.parameters():
if p.requires_grad:
total_norm += p.grad.data.norm(2).item()
return total_norm
def split_trajectories(t, y, new_traj_len, batch_size):
s, m, n, d = y.shape
t_new = torch.empty((s, m-new_traj_len+1, new_traj_len, 1), dtype=t.dtype, device=t.device)
y_new = torch.empty((s, m-new_traj_len+1, new_traj_len, n, d), dtype=y.dtype, device=y.device)
for i in range(m - new_traj_len + 1):
t_new[:, i] = t[:, i:i+new_traj_len]
y_new[:, i] = y[:, i:i+new_traj_len]
t_new = rearrange(t_new, "a b c () -> (a b) c ()")
t_new -= torch.min(t_new, dim=1, keepdim=True)[0]
y_new = rearrange(y_new, "a b c n d -> (a b) c n d")
inds = np.random.choice(t_new.shape[0], size=batch_size, replace=False)
t_new = t_new[inds]
y_new = y_new[inds]
return t_new, y_new
| 4,823 | 30.122581 | 121 | py |
msvi | msvi-main/experiments/rmnist/train.py | from types import SimpleNamespace
import torch
import torch.nn as nn
import torch.optim as optim
import wandb
from tqdm import tqdm
import msvi.utils.rmnist as data_utils
import utils
torch.backends.cudnn.benchmark = True # type: ignore
# Read parameters.
argparser = data_utils.create_argparser()
param = SimpleNamespace(**vars(argparser.parse_args()))
param.tags.append("train")
# Load data.
train_dataset, val_dataset, _ = data_utils.create_datasets(param)
train_loader, val_loader, _ = data_utils.create_dataloaders(param, train_dataset, val_dataset, val_dataset)
# Create model.
utils.set_seed(param.seed)
device = torch.device(param.device)
g, F, h = data_utils.get_model_components(param)
elbo = data_utils.create_elbo(g, F, h, param).to(device)
# Training.
optimizer = optim.Adam(elbo.parameters(), lr=param.lr)
scheduler = data_utils.get_scheduler(optimizer, param.n_iters, param.lr)
bma = utils.BatchMovingAverage(k=10)
data_transform = data_utils.get_data_transform()
wandb.init(
mode="disabled", # online/disabled
project="AVMS",
group=param.group,
tags=param.tags,
name=param.name,
config=vars(param),
save_code=True,
)
utils.set_seed(param.seed)
for i in tqdm(range(param.n_iters), total=param.n_iters):
elbo.train()
t, y, traj_inds = [bi.to(device) for bi in next(iter(train_loader))]
# t = t + (torch.rand_like(t) - 0.5) * 2 * param.sigT
y = data_transform(y)
L1, L2, L3, x, s = elbo(t, y, traj_inds, param.block_size, scaler=1.0)
L1 *= len(train_dataset) / param.batch_size
L2 *= len(train_dataset) / param.batch_size
loss = -(L1 - L2 - L3)
optimizer.zero_grad()
loss.backward()
optimizer.step()
scheduler.step()
# Validation on full trajectory predictions.
if i % int(0.00333 * param.n_iters) == 0 or i == param.n_iters - 1:
with torch.no_grad():
elbo.eval()
t_val, y_val, _ = [bi.to(device) for bi in next(iter(val_loader))]
y_full_traj = utils.pred_full_traj(param, elbo, t, y)
y_val_full_traj = utils.pred_full_traj(param, elbo, t_val, y_val)
train_full_traj_mse = nn.MSELoss()(y_full_traj, y).item()
val_full_traj_mse = nn.MSELoss()(y_val_full_traj, y_val).item()
bma.add_value(val_full_traj_mse)
if bma.get_average() <= bma.get_min_average():
utils.save_model(elbo, param.model_folder, param.name)
wandb.log(
{
"-L1": -L1.item(),
"L2": L2.item(),
"L3": L3.item(),
"-ELBO": loss.item(),
"train_full_traj_mse": train_full_traj_mse,
"val_full_traj_mse": val_full_traj_mse,
"lr": optimizer.param_groups[0]["lr"],
"scaler": 1.0,
},
step=i
)
if param.visualize == 1:
data_utils.visualize_trajectories(
traj=[
y[[0]].detach().cpu().numpy(),
y_full_traj[[0]].detach().cpu().numpy(),
y_val[[0]].detach().cpu().numpy(),
y_val_full_traj[[0]].detach().cpu().numpy(),
],
vis_inds=list(range(y.shape[1]))[:-1:max(1, int(0.09*y.shape[1]))],
title=f"Iteration {i}",
path=f"./img/{param.name}/",
img_name=f"iter_{i}.png",
)
| 3,556 | 29.663793 | 107 | py |
msvi | msvi-main/experiments/tests/lv_vms.py | from types import SimpleNamespace
import torch.optim as optim
from tqdm import tqdm
import utils
param = {
"T": 50, # terminal time
"M": 250, # number of observations in [0, T]
"sigY": 0.001, # observation noise
"seed": 1400, # random seed
"max_len": 201, # truncation length for the trajectories
"batch_size": 3,
"lr": 0.01, # learning rate
"n_iters": 5000, # number of optimization iterations
"solver_kwargs": {"method": "rk4", "rtol": 1e-5, "atol": 1e-5, "adjoint": False},
}
param = SimpleNamespace(**param)
train_dataset = utils.create_datasets(param)
train_loader = utils.create_dataloaders(train_dataset, param)
utils.set_seed(param.seed)
g, F, _ = utils.get_model_components(param, construct_h=False)
elbo = utils.create_vms_elbo(g, F, param, S=len(train_dataset))
optimizer = optim.Adam(elbo.parameters(), lr=param.lr)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[4500], gamma=0.1)
for i in tqdm(range(param.n_iters), total=param.n_iters):
t, y, traj_inds = next(iter(train_loader))
L1, L2, L3, _, _ = elbo(t, y, traj_inds, block_size=10, scaler=1)
loss = -(L1 - L2 - L3)
optimizer.zero_grad()
loss.backward()
optimizer.step()
scheduler.step()
print("Inferred parameter values =", elbo.q.posterior_param["mu_theta_F"][0:4])
print(f"True parameter values = {utils.LV_PARAM}")
| 1,390 | 27.979167 | 85 | py |
msvi | msvi-main/experiments/tests/lv_avms.py | from types import SimpleNamespace
import torch.optim as optim
from tqdm import tqdm
import utils
param = {
"T": 50, # terminal time
"M": 250, # number of observations in [0, T]
"sigY": 0.001, # observation noise
"max_len": 201, # truncation length for the trajectories
"seed": 1400, # random seed
"batch_size": 3,
"lr": 0.01, # learning rate
"n_iters": 5000, # number of optimization iterations
"solver_kwargs": {"method": "rk4", "rtol": 1e-5, "atol": 1e-5, "adjoint": False},
# Parameters for recognition network.
"h_agg_attn": "tdp",
"h_agg_pos_enc": "rpeNN",
"h_agg_stat_layers": 2,
"K": 2,
"m_h": 16,
"h_agg_max_tokens": 500,
"h_agg_max_time": 100,
"h_agg_delta_r": 10,
"h_agg_p": -1,
"n": 1,
"drop_prob": 0,
"block_size": 1,
}
param = SimpleNamespace(**param)
train_dataset = utils.create_datasets(param)
train_loader = utils.create_dataloaders(train_dataset, param)
utils.set_seed(param.seed)
g, F, h = utils.get_model_components(param, construct_h=True)
elbo = utils.create_avms_elbo(g, F, h, param)
optimizer = optim.Adam(elbo.parameters(), lr=param.lr)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[4500], gamma=0.1)
for i in tqdm(range(param.n_iters), total=param.n_iters):
t, y, traj_inds = next(iter(train_loader))
elbo.q.rec_net.update_time_grids(t)
L1, L2, L3, _, _ = elbo(t, y, traj_inds, block_size=param.block_size, scaler=1)
loss = -(L1 - L2 - L3)
optimizer.zero_grad()
loss.backward()
optimizer.step()
scheduler.step()
print("Inferred parameter values =", elbo.q.posterior_param["mu_theta_F"][0:4])
print(f"True parameter values = {utils.LV_PARAM}")
| 1,733 | 25.676923 | 85 | py |
msvi | msvi-main/experiments/tests/lv_vss.py | from types import SimpleNamespace
import torch.optim as optim
from tqdm import tqdm
import utils
param = {
"T": 50, # terminal time
"M": 250, # number of observations in [0, T]
"sigY": 0.001, # observation noise
"seed": 1400, # random seed
"max_len": 10, # truncation length for the trajectories
"batch_size": 3,
"lr": 0.01, # learning rate
"n_iters": 5000, # number of optimization iterations
"solver_kwargs": {"method": "rk4", "rtol": 1e-5, "atol": 1e-5, "adjoint": False},
}
param = SimpleNamespace(**param)
train_dataset = utils.create_datasets(param)
train_loader = utils.create_dataloaders(train_dataset, param)
utils.set_seed(param.seed)
g, F, _ = utils.get_model_components(param, construct_h=False)
elbo = utils.create_vss_elbo(g, F, param, S=len(train_dataset))
optimizer = optim.Adam(elbo.parameters(), lr=param.lr)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[4500], gamma=0.1)
for i in tqdm(range(param.n_iters), total=param.n_iters):
t, y, traj_inds = next(iter(train_loader))
L1, L2, L3, _, _ = elbo(t, y, traj_inds, block_size=1, scaler=1)
loss = -(L1 - L2 - L3)
optimizer.zero_grad()
loss.backward()
optimizer.step()
scheduler.step()
print("Inferred parameter values =", elbo.q.posterior_param["mu_theta_F"][0:4])
print(f"True parameter values = {utils.LV_PARAM}")
| 1,388 | 27.9375 | 85 | py |
msvi | msvi-main/experiments/tests/utils.py | import numpy as np
import scipy.integrate
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
from torch.utils.data import DataLoader
from einops import rearrange
from einops.layers.torch import Rearrange
import msvi.decoder
import msvi.trans_func
import msvi.rec_net
import msvi.model
import msvi.posterior
import msvi.elbo
import msvi.utils.utils
from msvi.dataset import TrajectoryDataset
# Use Lotka-Volterra for sanity check.
ndarray = np.ndarray
LV_PARAM = [2.0/3, 4.0/3, 1.0, 1.0] # parameters of the system
LV_IC = np.array(
[
[0.9, 1.8],
[1.9, 0.9],
[0.45, 0.9]
]
) # initial conditions
def generate_irregular_time_grid(T, intensity, min_dist):
"""Generates irregular time grid on the interval [0, T].
Args:
T (float): Terminal time.
intensity (float): Intensity of the observations (per second).
min_dist (float): Smallest distance between time points.
Returns:
t (ndarray): 1D array with time points.
"""
t = [0.0]
while t[-1] < T:
t.append(t[-1] + np.random.exponential(1.0/intensity))
t.pop(-1)
t[-1] = T
leave_mask = [True] * len(t)
for i in range(0, len(t)):
if leave_mask[i] is True:
for j in range(i+1, len(t)):
dist = t[j] - t[i]
if dist < min_dist:
leave_mask[j] = False
return np.array(t)[leave_mask]
def lv_dynamics(t, x):
alpha, beta, gamma, delta = LV_PARAM
dzdt = np.array(
[
alpha * x[0] - beta * x[0] * x[1],
delta * x[0] * x[1] - gamma * x[1],
]
)
return dzdt
def generate_data(T: float, M: int, sigY: float, seed: int) -> tuple[ndarray, ...]:
np.random.seed(seed)
t = np.empty(len(LV_IC), dtype=object)
x = np.empty(len(LV_IC), dtype=object)
y = np.empty(len(LV_IC), dtype=object)
for i in range(len(LV_IC)):
# ti = np.linspace(0, LV_T, LV_M)
ti = generate_irregular_time_grid(T, M/T, min_dist=0.02)
xi = scipy.integrate.solve_ivp(lv_dynamics, ti[[0, -1]], LV_IC[i], method="RK45", rtol=1e-5, atol=1e-5, t_eval=ti).y.T
t[i] = rearrange(ti, "m -> m ()")
x[i] = rearrange(xi, "m d -> m () d")
y[i] = x[i] + sigY * np.random.randn(*x[i].shape)
return t, x, y
def create_datasets(param) -> TrajectoryDataset:
t, _, y = generate_data(param.T, param.M, param.sigY, param.seed)
t = [torch.tensor(ti, dtype=torch.float64) for ti in t]
y = [torch.tensor(yi, dtype=torch.float32) for yi in y]
train_dataset = TrajectoryDataset(t, y, max_len=param.max_len)
return train_dataset
def create_dataloaders(dataset: TrajectoryDataset, param) -> DataLoader:
dataloader = DataLoader(dataset, batch_size=param.batch_size, shuffle=True)
return dataloader
def get_model_components(param, construct_h: bool):
g = Decoder(param.sigY)
F = msvi.trans_func.ODETransitionFunction(
f=nn.Sequential(TrueDynamicsFunction()),
layers_to_count=[TrueDynamicsFunction],
solver_kwargs=param.solver_kwargs
)
if construct_h is True:
phi_enc = nn.Sequential(Rearrange("s m () d -> s m d"), nn.Linear(2, param.m_h*param.K))
phi_agg = msvi.utils.utils.create_agg_net(param, "static")
phi_gamma = nn.Linear(param.m_h*param.K, 2)
phi_tau = nn.Linear(param.m_h*param.K, 2)
h = msvi.rec_net.RecognitionNet(phi_enc, phi_agg, phi_gamma, phi_tau, 0)
else:
h = None
return g, F, h
def create_vss_elbo(g, F, param, S):
prior_param_dict = nn.ParameterDict({
"mu0": Parameter(0.0 * torch.ones([2]), False),
"sig0": Parameter(1.0 * torch.ones([2]), False),
"sigXi": Parameter(0.001 * torch.ones([1]), False),
"mu_theta": Parameter(0.0 * torch.ones([1]), False),
"sig_theta": Parameter(1.0 * torch.ones([1]), False),
})
posterior_param_dict = nn.ParameterDict({
"mu_theta_g": Parameter(0.0 * torch.ones(g.param_count())),
"log_sig_theta_g": Parameter(-7.0 * torch.ones(g.param_count())),
"mu_theta_F": Parameter(0.0 * torch.ones(F.param_count())),
"log_sig_theta_F": Parameter(-7.0 * torch.ones(F.param_count())),
"gamma": Parameter(0.0 * torch.ones([S, 1, 2])),
"log_tau": Parameter(-7.0 * torch.ones([S, param.max_len-1, 2])),
})
p = msvi.model.ModelNormal(prior_param_dict, g, F)
q = msvi.posterior.SingleShootingPosterior(posterior_param_dict, F)
elbo = msvi.elbo.SingleShootingELBO(p, q)
elbo.p.set_theta(elbo.q.sample_theta())
return elbo
def create_vms_elbo(g, F, param, S):
prior_param_dict = nn.ParameterDict({
"mu0": Parameter(0.0 * torch.ones([2]), False),
"sig0": Parameter(1.0 * torch.ones([2]), False),
"sigXi": Parameter(0.001 * torch.ones([1]), False),
"mu_theta": Parameter(0.0 * torch.ones([1]), False),
"sig_theta": Parameter(1.0 * torch.ones([1]), False),
})
posterior_param_dict = nn.ParameterDict({
"mu_theta_g": Parameter(0.0 * torch.ones(g.param_count())),
"log_sig_theta_g": Parameter(-7.0 * torch.ones(g.param_count())),
"mu_theta_F": Parameter(0.0 * torch.ones(F.param_count())),
"log_sig_theta_F": Parameter(-7.0 * torch.ones(F.param_count())),
"gamma": Parameter(0.0 * torch.ones([S, param.max_len-1, 2])),
"log_tau": Parameter(-7.0 * torch.ones([S, param.max_len-1, 2])),
})
p = msvi.model.ModelNormal(prior_param_dict, g, F)
q = msvi.posterior.MultipleShootingPosterior(posterior_param_dict, F)
elbo = msvi.elbo.MultipleShootingELBO(p, q)
elbo.p.set_theta(elbo.q.sample_theta())
return elbo
def create_avms_elbo(g, F, h, param):
prior_param_dict = nn.ParameterDict({
"mu0": Parameter(0.0 * torch.ones([2]), False),
"sig0": Parameter(1.0 * torch.ones([2]), False),
"sigXi": Parameter(0.001 * torch.ones([1]), False),
"mu_theta": Parameter(0.0 * torch.ones([1]), False),
"sig_theta": Parameter(1.0 * torch.ones([1]), False),
})
posterior_param_dict = nn.ParameterDict({
"mu_theta_g": Parameter(0.0 * torch.ones(g.param_count())),
"log_sig_theta_g": Parameter(-7.0 * torch.ones(g.param_count())),
"mu_theta_F": Parameter(0.0 * torch.ones(F.param_count())),
"log_sig_theta_F": Parameter(-7.0 * torch.ones(F.param_count())),
})
p = msvi.model.ModelNormal(prior_param_dict, g, F)
q = msvi.posterior.AmortizedMultipleShootingPosterior(posterior_param_dict, F, h)
elbo = msvi.elbo.AmortizedMultipleShootingELBO(p, q)
elbo.p.set_theta(elbo.q.sample_theta())
return elbo
def set_seed(seed):
np.random.seed(seed)
torch.manual_seed(seed)
class TrueDynamicsFunction(nn.Module):
def __init__(self):
super().__init__()
self.weight = Parameter(torch.zeros(4)) # alpha, beta, gamma, delta
self.bias = Parameter(torch.zeros(1)) # dummy parameter required for compatibility with msvi.trans_func
def forward(self, x):
alpha, beta, gamma, delta = self.weight
x1, x2 = x[..., [0]], x[..., [1]]
dxdt = torch.zeros_like(x)
dxdt[..., [0]] = alpha * x1 - beta * x1 * x2
dxdt[..., [1]] = delta * x1 * x2 - gamma * x2
return dxdt
class Decoder(msvi.decoder.IDecoder):
def __init__(self, sigY: float) -> None:
super().__init__()
self.sigY = sigY
def forward(self, x: torch.Tensor) -> torch.Tensor:
S, M, D = x.shape
p = torch.empty((S, M, 1, D, 2), device=x.device)
p[:, :, 0, :, 0] = x
p[:, :, 0, :, 1] = self.sigY
return p
def set_param(self, param: torch.Tensor) -> None:
return None
def param_count(self) -> int:
return 0
| 7,855 | 31.733333 | 126 | py |
msvi | msvi-main/experiments/pendulum/val.py | from types import SimpleNamespace
import torch
import wandb
from tqdm import tqdm
from einops import reduce
import msvi.utils.pendulum as data_utils
import utils
torch.backends.cudnn.benchmark = True # type: ignore
# Read parameters.
argparser = data_utils.create_argparser()
param = SimpleNamespace(**vars(argparser.parse_args()))
param.tags.append("val")
# Load data.
train_dataset, val_dataset, _ = data_utils.create_datasets(param)
train_loader, val_loader, _ = data_utils.create_dataloaders(param, train_dataset, val_dataset, val_dataset)
# Create and load model.
utils.set_seed(param.seed)
device = torch.device(param.device)
g, F, h = data_utils.get_model_components(param)
elbo = data_utils.create_elbo(g, F, h, param).to(device)
utils.load_model(elbo, param.model_folder, param.name, device)
elbo.eval()
wandb.init(
mode="disabled", # online/disabled
project="AVMS",
group=param.group,
tags=param.tags,
name=param.name,
config=vars(param),
save_code=True,
)
loss_fn = torch.nn.MSELoss(reduction="none")
with torch.no_grad():
losses = []
for batch in tqdm(val_loader, total=len(val_loader)):
t, y, traj_inds = [bi.to(device) for bi in batch]
t_inf, y_inf = utils.get_inference_data(t, y, param.delta_inf)
y_pd = torch.zeros_like(y)
for i in range(param.n_mc_samples):
x0 = utils.get_x0(elbo, t_inf, y_inf)
y_pd += utils._pred_full_traj(elbo, t, x0)
y_pd /= param.n_mc_samples
loss_per_traj = reduce(loss_fn(y_pd, y), "s m n d -> s () () ()", "mean").view(-1).detach().cpu().numpy().ravel()
losses.extend(loss_per_traj)
mean_loss = sum(losses) / len(losses)
print(mean_loss)
wandb.run.summary.update({"mean_val_loss": mean_loss}) # type: ignore
| 1,791 | 25.352941 | 121 | py |
msvi | msvi-main/experiments/pendulum/test.py | from types import SimpleNamespace
import torch
import wandb
from tqdm import tqdm
from einops import reduce
import msvi.utils.pendulum as data_utils
import utils
torch.backends.cudnn.benchmark = True # type: ignore
# Read parameters.
argparser = data_utils.create_argparser()
param = SimpleNamespace(**vars(argparser.parse_args()))
param.tags.append("test")
# Load data.
train_dataset, val_dataset, test_dataset = data_utils.create_datasets(param)
train_loader, val_loader, test_loader = data_utils.create_dataloaders(param, train_dataset, val_dataset, test_dataset)
# Create and load model.
utils.set_seed(param.seed)
device = torch.device(param.device)
g, F, h = data_utils.get_model_components(param)
elbo = data_utils.create_elbo(g, F, h, param).to(device)
utils.load_model(elbo, param.model_folder, param.name, device)
elbo.eval()
wandb.init(
mode="disabled", # online/disabled
project="AVMS",
group=param.group,
tags=param.tags,
name=param.name,
config=vars(param),
save_code=True,
)
loss_fn = torch.nn.MSELoss(reduction="none")
with torch.no_grad():
losses = []
for batch in tqdm(test_loader, total=len(test_loader)):
t, y, traj_inds = [bi.to(device) for bi in batch]
t_inf, y_inf = utils.get_inference_data(t, y, param.delta_inf)
y_pd = torch.zeros_like(y)
for i in range(param.n_mc_samples):
x0 = utils.get_x0(elbo, t_inf, y_inf)
y_pd += utils._pred_full_traj(elbo, t, x0)
y_pd /= param.n_mc_samples
loss_per_traj = reduce(loss_fn(y_pd, y), "s m n d -> s () () ()", "mean").view(-1).detach().cpu().numpy().ravel()
losses.extend(loss_per_traj)
mean_loss = sum(losses) / len(losses)
print(mean_loss)
wandb.run.summary.update({"mean_test_loss": mean_loss}) # type: ignore
| 1,817 | 25.735294 | 121 | py |
msvi | msvi-main/experiments/pendulum/utils.py | import os
from collections import deque
import numpy as np
import torch
import msvi.posterior
from einops import rearrange
ndarray = np.ndarray
Tensor = torch.Tensor
def set_seed(seed):
np.random.seed(seed)
torch.manual_seed(seed)
def save_model(model, path, name):
if not os.path.isdir(path):
os.makedirs(path)
torch.save(model.state_dict(), path+name+".pt")
def load_model(model, path, name, device):
model.load_state_dict(torch.load(path+name+".pt", map_location=device), strict=False)
def get_inference_data(t: Tensor, y: Tensor, delta_inf: float) -> tuple[list[Tensor], list[Tensor]]:
t_inf, y_inf = [], []
for i in range(t.shape[0]):
inf_inds = torch.argwhere(t[[i]] <= delta_inf)[:, 1]
t_inf.append(t[[i]][:, inf_inds, :])
y_inf.append(y[[i]][:, inf_inds, :, :])
return t_inf, y_inf
def get_x0(elbo, t: list[Tensor], y: list[Tensor]) -> Tensor:
x0 = []
for ti, yi in zip(t, y):
elbo.q.rec_net.update_time_grids(ti)
gamma, tau = elbo.q.rec_net(yi)
x0.append(gamma[:, [0], :] + tau[:, [0], :] * torch.randn_like(tau[:, [0], :]))
return torch.cat(x0)
def _pred_full_traj(elbo, t: Tensor, x0: Tensor) -> Tensor:
elbo.p.set_theta(elbo.q.sample_theta())
S, M, K = x0.shape[0], t.shape[1], x0.shape[2]
x = torch.zeros((S, M, K), dtype=x0.dtype, device=x0.device)
x[:, [0], :] = x0
for i in range(1, M):
x[:, [i], :] = elbo.p.F(x[:, [i-1], :], t=msvi.posterior.extract_time_grids(t[:, i-1:i+1, :], n_blocks=1))
return elbo.p._sample_lik(x)
def pred_full_traj(param, elbo, t: Tensor, y: Tensor) -> Tensor:
t_inf, y_inf = get_inference_data(t, y, param.delta_inf)
x0 = get_x0(elbo, t_inf, y_inf)
y_full_traj = _pred_full_traj(elbo, t, x0)
return y_full_traj
class BatchMovingAverage():
"""Computes moving average over the last `k` mini-batches
and stores the smallest recorded moving average in `min_avg`."""
def __init__(self, k: int) -> None:
self.values = deque([], maxlen=k)
self.min_avg = np.inf
def add_value(self, value: float) -> None:
self.values.append(value)
def get_average(self) -> float:
if len(self.values) == 0:
avg = np.nan
else:
avg = sum(self.values) / len(self.values)
if avg < self.min_avg:
self.min_avg = avg
return avg
def get_min_average(self):
return self.min_avg
def kl_norm_norm(mu0: Tensor, mu1: Tensor, sig0: Tensor, sig1: Tensor) -> Tensor:
"""Calculates KL divergence between two K-dimensional Normal
distributions with diagonal covariance matrices.
Args:
mu0: Mean of the first distribution. Has shape (*, K).
mu1: Mean of the second distribution. Has shape (*, K).
std0: Diagonal of the covatiance matrix of the first distribution. Has shape (*, K).
std1: Diagonal of the covatiance matrix of the second distribution. Has shape (*, K).
Returns:
KL divergence between the distributions. Has shape (*, 1).
"""
assert mu0.shape == mu1.shape == sig0.shape == sig1.shape, (f"{mu0.shape=} {mu1.shape=} {sig0.shape=} {sig1.shape=}")
a = (sig0 / sig1).pow(2).sum(-1, keepdim=True)
b = ((mu1 - mu0).pow(2) / sig1**2).sum(-1, keepdim=True)
c = 2 * (torch.log(sig1) - torch.log(sig0)).sum(-1, keepdim=True)
kl = 0.5 * (a + b + c - mu0.shape[-1])
return kl
def create_mask(x: Tensor) -> Tensor:
"""Masks the 'velocity' part of the latent space since we want to use
only the 'position' to reconstruct the observsations."""
K = x.shape[2]
mask = torch.ones_like(x)
mask[:, :, K//2:] = 0.0
return mask
def param_norm(module):
total_norm = 0.0
for p in module.parameters():
if p.requires_grad:
total_norm += p.data.norm(2).item()
return total_norm
def grad_norm(module):
total_norm = 0.0
for p in module.parameters():
if p.requires_grad:
total_norm += p.grad.data.norm(2).item()
return total_norm
def split_trajectories(t, y, new_traj_len, batch_size):
s, m, n, d = y.shape
t_new = torch.empty((s, m-new_traj_len+1, new_traj_len, 1), dtype=t.dtype, device=t.device)
y_new = torch.empty((s, m-new_traj_len+1, new_traj_len, n, d), dtype=y.dtype, device=y.device)
for i in range(m - new_traj_len + 1):
t_new[:, i] = t[:, i:i+new_traj_len]
y_new[:, i] = y[:, i:i+new_traj_len]
t_new = rearrange(t_new, "a b c () -> (a b) c ()")
t_new -= torch.min(t_new, dim=1, keepdim=True)[0]
y_new = rearrange(y_new, "a b c n d -> (a b) c n d")
inds = np.random.choice(t_new.shape[0], size=batch_size, replace=False)
t_new = t_new[inds]
y_new = y_new[inds]
return t_new, y_new
| 4,823 | 30.122581 | 121 | py |
msvi | msvi-main/experiments/pendulum/train.py | from types import SimpleNamespace
import torch
import torch.nn as nn
import torch.optim as optim
import wandb
from tqdm import tqdm
import msvi.utils.pendulum as data_utils
import utils
torch.backends.cudnn.benchmark = True # type: ignore
# Read parameters.
argparser = data_utils.create_argparser()
param = SimpleNamespace(**vars(argparser.parse_args()))
param.tags.append("train")
# Load data.
train_dataset, val_dataset, _ = data_utils.create_datasets(param)
train_loader, val_loader, _ = data_utils.create_dataloaders(param, train_dataset, val_dataset, val_dataset)
# Create model.
utils.set_seed(param.seed)
device = torch.device(param.device)
g, F, h = data_utils.get_model_components(param)
elbo = data_utils.create_elbo(g, F, h, param).to(device)
# Training.
optimizer = optim.Adam(elbo.parameters(), lr=param.lr)
scheduler = data_utils.get_scheduler(optimizer, param.n_iters, param.lr)
bma = utils.BatchMovingAverage(k=10)
data_transform = data_utils.get_data_transform()
wandb.init(
mode="disabled", # online/disabled
project="AVMS",
group=param.group,
tags=param.tags,
name=param.name,
config=vars(param),
save_code=True,
)
utils.set_seed(param.seed)
for i in tqdm(range(param.n_iters), total=param.n_iters):
elbo.train()
t, y, traj_inds = [bi.to(device) for bi in next(iter(train_loader))]
# t = t + (torch.rand_like(t) - 0.5) * 2 * param.sigT
y = data_transform(y)
L1, L2, L3, x, s = elbo(t, y, traj_inds, param.block_size, scaler=1.0)
L1 *= len(train_dataset) / param.batch_size
L2 *= len(train_dataset) / param.batch_size
loss = -(L1 - L2 - L3)
optimizer.zero_grad()
loss.backward()
optimizer.step()
scheduler.step()
# Validation on full trajectory predictions.
if i % int(0.00333 * param.n_iters) == 0 or i == param.n_iters - 1:
with torch.no_grad():
elbo.eval()
t_val, y_val, _ = [bi.to(device) for bi in next(iter(val_loader))]
y_full_traj = utils.pred_full_traj(param, elbo, t, y)
y_val_full_traj = utils.pred_full_traj(param, elbo, t_val, y_val)
train_full_traj_mse = nn.MSELoss()(y_full_traj, y).item()
val_full_traj_mse = nn.MSELoss()(y_val_full_traj, y_val).item()
bma.add_value(val_full_traj_mse)
if bma.get_average() <= bma.get_min_average():
utils.save_model(elbo, param.model_folder, param.name)
wandb.log(
{
"-L1": -L1.item(),
"L2": L2.item(),
"L3": L3.item(),
"-ELBO": loss.item(),
"train_full_traj_mse": train_full_traj_mse,
"val_full_traj_mse": val_full_traj_mse,
"lr": optimizer.param_groups[0]["lr"],
"scaler": 1.0,
},
step=i
)
if param.visualize == 1:
data_utils.visualize_trajectories(
traj=[
y[[0]].detach().cpu().numpy(),
y_full_traj[[0]].detach().cpu().numpy(),
y_val[[0]].detach().cpu().numpy(),
y_val_full_traj[[0]].detach().cpu().numpy(),
],
vis_inds=list(range(y.shape[1]))[:-1:max(1, int(0.09*y.shape[1]))],
title=f"Iteration {i}",
path=f"./img/{param.name}/",
img_name=f"iter_{i}.png",
)
| 3,558 | 29.681034 | 107 | py |
msvi | msvi-main/experiments/bballs/val.py | from types import SimpleNamespace
import torch
import wandb
from tqdm import tqdm
from einops import reduce
import msvi.utils.bballs as data_utils
import utils
torch.backends.cudnn.benchmark = True # type: ignore
# Read parameters.
argparser = data_utils.create_argparser()
param = SimpleNamespace(**vars(argparser.parse_args()))
param.tags.append("val")
# Load data.
train_dataset, val_dataset, _ = data_utils.create_datasets(param)
train_loader, val_loader, _ = data_utils.create_dataloaders(param, train_dataset, val_dataset, val_dataset)
# Create and load model.
utils.set_seed(param.seed)
device = torch.device(param.device)
g, F, h = data_utils.get_model_components(param)
elbo = data_utils.create_elbo(g, F, h, param).to(device)
utils.load_model(elbo, param.model_folder, param.name, device)
elbo.eval()
wandb.init(
mode="disabled", # online/disabled
project="AVMS",
group=param.group,
tags=param.tags,
name=param.name,
config=vars(param),
save_code=True,
)
loss_fn = torch.nn.MSELoss(reduction="none")
with torch.no_grad():
losses = []
for batch in tqdm(val_loader, total=len(val_loader)):
t, y, traj_inds = [bi.to(device) for bi in batch]
t_inf, y_inf = utils.get_inference_data(t, y, param.delta_inf)
y_pd = torch.zeros_like(y)
for i in range(param.n_mc_samples):
x0 = utils.get_x0(elbo, t_inf, y_inf)
y_pd += utils._pred_full_traj(elbo, t, x0)
y_pd /= param.n_mc_samples
loss_per_traj = reduce(loss_fn(y_pd, y), "s m n d -> s () () ()", "mean").view(-1).detach().cpu().numpy().ravel()
losses.extend(loss_per_traj)
mean_loss = sum(losses) / len(losses)
print(mean_loss)
wandb.run.summary.update({"mean_val_loss": mean_loss}) # type: ignore
| 1,789 | 25.323529 | 121 | py |
msvi | msvi-main/experiments/bballs/test.py | from types import SimpleNamespace
import torch
import wandb
from tqdm import tqdm
from einops import reduce
import msvi.utils.bballs as data_utils
import utils
torch.backends.cudnn.benchmark = True # type: ignore
# Read parameters.
argparser = data_utils.create_argparser()
param = SimpleNamespace(**vars(argparser.parse_args()))
param.tags.append("test")
# Load data.
train_dataset, val_dataset, test_dataset = data_utils.create_datasets(param)
train_loader, val_loader, test_loader = data_utils.create_dataloaders(param, train_dataset, val_dataset, test_dataset)
# Create and load model.
utils.set_seed(param.seed)
device = torch.device(param.device)
g, F, h = data_utils.get_model_components(param)
elbo = data_utils.create_elbo(g, F, h, param).to(device)
utils.load_model(elbo, param.model_folder, param.name, device)
elbo.eval()
wandb.init(
mode="disabled", # online/disabled
project="AVMS",
group=param.group,
tags=param.tags,
name=param.name,
config=vars(param),
save_code=True,
)
loss_fn = torch.nn.MSELoss(reduction="none")
with torch.no_grad():
losses = []
for batch in tqdm(test_loader, total=len(test_loader)):
t, y, traj_inds = [bi.to(device) for bi in batch]
t_inf, y_inf = utils.get_inference_data(t, y, param.delta_inf)
y_pd = torch.zeros_like(y)
for i in range(param.n_mc_samples):
x0 = utils.get_x0(elbo, t_inf, y_inf)
y_pd += utils._pred_full_traj(elbo, t, x0)
y_pd /= param.n_mc_samples
loss_per_traj = reduce(loss_fn(y_pd, y), "s m n d -> s () () ()", "mean").view(-1).detach().cpu().numpy().ravel()
losses.extend(loss_per_traj)
mean_loss = sum(losses) / len(losses)
print(mean_loss)
wandb.run.summary.update({"mean_test_loss": mean_loss}) # type: ignore
| 1,815 | 25.705882 | 121 | py |
msvi | msvi-main/experiments/bballs/utils.py | import os
from collections import deque
import numpy as np
import torch
import msvi.posterior
from einops import rearrange
ndarray = np.ndarray
Tensor = torch.Tensor
def set_seed(seed):
np.random.seed(seed)
torch.manual_seed(seed)
def save_model(model, path, name):
if not os.path.isdir(path):
os.makedirs(path)
torch.save(model.state_dict(), path+name+".pt")
def load_model(model, path, name, device):
model.load_state_dict(torch.load(path+name+".pt", map_location=device), strict=False)
def get_inference_data(t: Tensor, y: Tensor, delta_inf: float) -> tuple[list[Tensor], list[Tensor]]:
t_inf, y_inf = [], []
for i in range(t.shape[0]):
inf_inds = torch.argwhere(t[[i]] <= delta_inf)[:, 1]
t_inf.append(t[[i]][:, inf_inds, :])
y_inf.append(y[[i]][:, inf_inds, :, :])
return t_inf, y_inf
def get_x0(elbo, t: list[Tensor], y: list[Tensor]) -> Tensor:
x0 = []
for ti, yi in zip(t, y):
elbo.q.rec_net.update_time_grids(ti)
gamma, tau = elbo.q.rec_net(yi)
x0.append(gamma[:, [0], :] + tau[:, [0], :] * torch.randn_like(tau[:, [0], :]))
return torch.cat(x0)
def _pred_full_traj(elbo, t: Tensor, x0: Tensor) -> Tensor:
elbo.p.set_theta(elbo.q.sample_theta())
S, M, K = x0.shape[0], t.shape[1], x0.shape[2]
x = torch.zeros((S, M, K), dtype=x0.dtype, device=x0.device)
x[:, [0], :] = x0
for i in range(1, M):
x[:, [i], :] = elbo.p.F(x[:, [i-1], :], t=msvi.posterior.extract_time_grids(t[:, i-1:i+1, :], n_blocks=1))
return elbo.p._sample_lik(x)
def pred_full_traj(param, elbo, t: Tensor, y: Tensor) -> Tensor:
t_inf, y_inf = get_inference_data(t, y, param.delta_inf)
x0 = get_x0(elbo, t_inf, y_inf)
y_full_traj = _pred_full_traj(elbo, t, x0)
return y_full_traj
class BatchMovingAverage():
"""Computes moving average over the last `k` mini-batches
and stores the smallest recorded moving average in `min_avg`."""
def __init__(self, k: int) -> None:
self.values = deque([], maxlen=k)
self.min_avg = np.inf
def add_value(self, value: float) -> None:
self.values.append(value)
def get_average(self) -> float:
if len(self.values) == 0:
avg = np.nan
else:
avg = sum(self.values) / len(self.values)
if avg < self.min_avg:
self.min_avg = avg
return avg
def get_min_average(self):
return self.min_avg
def kl_norm_norm(mu0: Tensor, mu1: Tensor, sig0: Tensor, sig1: Tensor) -> Tensor:
"""Calculates KL divergence between two K-dimensional Normal
distributions with diagonal covariance matrices.
Args:
mu0: Mean of the first distribution. Has shape (*, K).
mu1: Mean of the second distribution. Has shape (*, K).
std0: Diagonal of the covatiance matrix of the first distribution. Has shape (*, K).
std1: Diagonal of the covatiance matrix of the second distribution. Has shape (*, K).
Returns:
KL divergence between the distributions. Has shape (*, 1).
"""
assert mu0.shape == mu1.shape == sig0.shape == sig1.shape, (f"{mu0.shape=} {mu1.shape=} {sig0.shape=} {sig1.shape=}")
a = (sig0 / sig1).pow(2).sum(-1, keepdim=True)
b = ((mu1 - mu0).pow(2) / sig1**2).sum(-1, keepdim=True)
c = 2 * (torch.log(sig1) - torch.log(sig0)).sum(-1, keepdim=True)
kl = 0.5 * (a + b + c - mu0.shape[-1])
return kl
def create_mask(x: Tensor) -> Tensor:
"""Masks the 'velocity' part of the latent space since we want to use
only the 'position' to reconstruct the observsations."""
K = x.shape[2]
mask = torch.ones_like(x)
mask[:, :, K//2:] = 0.0
return mask
def param_norm(module):
total_norm = 0.0
for p in module.parameters():
if p.requires_grad:
total_norm += p.data.norm(2).item()
return total_norm
def grad_norm(module):
total_norm = 0.0
for p in module.parameters():
if p.requires_grad:
total_norm += p.grad.data.norm(2).item()
return total_norm
def split_trajectories(t, y, new_traj_len, batch_size):
s, m, n, d = y.shape
t_new = torch.empty((s, m-new_traj_len+1, new_traj_len, 1), dtype=t.dtype, device=t.device)
y_new = torch.empty((s, m-new_traj_len+1, new_traj_len, n, d), dtype=y.dtype, device=y.device)
for i in range(m - new_traj_len + 1):
t_new[:, i] = t[:, i:i+new_traj_len]
y_new[:, i] = y[:, i:i+new_traj_len]
t_new = rearrange(t_new, "a b c () -> (a b) c ()")
t_new -= torch.min(t_new, dim=1, keepdim=True)[0]
y_new = rearrange(y_new, "a b c n d -> (a b) c n d")
inds = np.random.choice(t_new.shape[0], size=batch_size, replace=False)
t_new = t_new[inds]
y_new = y_new[inds]
return t_new, y_new
| 4,823 | 30.122581 | 121 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.