id
int64 0
458k
| file_name
stringlengths 4
119
| file_path
stringlengths 14
227
| content
stringlengths 24
9.96M
| size
int64 24
9.96M
| language
stringclasses 1
value | extension
stringclasses 14
values | total_lines
int64 1
219k
| avg_line_length
float64 2.52
4.63M
| max_line_length
int64 5
9.91M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 7
101
| repo_stars
int64 100
139k
| repo_forks
int64 0
26.4k
| repo_open_issues
int64 0
2.27k
| repo_license
stringclasses 12
values | repo_extraction_date
stringclasses 433
values |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2,287,200 | rand_dataset.py | dptech-corp_NAG2G/NAG2G/data/rand_dataset.py | # Copyright (c) DP Techonology, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import numpy as np
from scipy.spatial import distance_matrix
from functools import lru_cache
from torch.utils.data.dataloader import default_collate
from unicore.data import data_utils, UnicoreDataset
class RandomLabelDataset(UnicoreDataset):
def __init__(self, maxv, nrow):
super().__init__()
self.maxv = maxv
self.nrow = nrow
@lru_cache(maxsize=16)
def __getitem__(self, index):
return np.random.randint(self.maxv)
def __len__(self):
return self.nrow
def collater(self, samples):
return torch.tensor(samples)
class RandomDataset(UnicoreDataset):
def __init__(self, ncol, nrow, maxv):
super().__init__()
self.nrow = nrow
self.ncol = ncol
self.maxv = maxv
@lru_cache(maxsize=16)
def __getitem__(self, index):
with data_utils.numpy_seed(index):
size = np.random.randint(self.ncol // 2 + 1, self.ncol)
val = np.random.randint(self.maxv, size=size)
return torch.tensor(val).long()
def __len__(self):
return self.nrow
def collater(self, samples):
return default_collate(samples)
| 1,363 | Python | .py | 38 | 29.815789 | 67 | 0.666413 | dptech-corp/NAG2G | 8 | 4 | 2 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,201 | graph_features.py | dptech-corp_NAG2G/NAG2G/data/graph_features.py | # Copyright (c) DP Technology.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
from functools import lru_cache
from unicore.data import BaseWrapperDataset, data_utils
from numba import njit
from .collator import *
@njit
def floyd_warshall(M, path):
(nrows, ncols) = M.shape
assert nrows == ncols
n = nrows
# set unreachable nodes distance to 510
for i in range(n):
for j in range(n):
if M[i, j] == 0:
M[i, j] = 510
for i in range(n):
M[i, i] = 0
# floyed algo
for k in range(n):
for i in range(n):
for j in range(n):
cost_ikkj = M[i, k] + M[k, j]
if M[i, j] > cost_ikkj:
M[i, j] = cost_ikkj
path[i, j] = k
for i in range(n):
for j in range(n):
if M[i, j] >= 510:
path[i, j] = 510
M[i, j] = 510
return M, path
def get_all_edges(path, i, j, max_dist):
if max_dist <= 0:
return []
k = path[i][j]
if k == -1:
return []
else:
left = get_all_edges(path, i, k, max_dist - 1)
if len(left) + 1 >= max_dist:
return left + [k]
right = get_all_edges(path, k, j, max_dist - len(left) - 1)
return left + [k] + right
# @njit
def gen_edge_input(max_dist, path_copy, edge_feat):
(nrows, ncols) = path_copy.shape
assert nrows == ncols
n = nrows
max_dist_copy = max_dist
edge_fea_all = -1 * np.ones(
[n, n, max_dist_copy, edge_feat.shape[-1]], dtype=np.int32
)
for i in range(n):
for j in range(n):
if i == j:
continue
if path_copy[i][j] == 510:
continue
path = (
[i] + get_all_edges(path_copy, i, j, max_dist=max_dist_copy + 1) + [j]
)
num_path = min(len(path) - 1, max_dist_copy)
for k in range(num_path):
edge_fea_all[i, j, k, :] = edge_feat[path[k], path[k + 1], :]
return edge_fea_all
def convert_to_single_emb(x, offset: int = 512):
feature_num = x.shape[-1] if len(x.shape) > 1 else 1
feature_offset = 1 + np.arange(0, feature_num * offset, offset, dtype=np.int32)
x = x + feature_offset
return x
def convert_to_single_emb_torch(x, offset: int = 512):
feature_num = x.size(-1) if len(x.size()) > 1 else 1
feature_offset = 1 + torch.arange(0, feature_num * offset, offset, dtype=torch.long)
x = x + feature_offset
return x
def preprocess_item(item, want_edge_input=True):
edge_attr, edge_index, x = (
item["edge_attr"],
item["edge_index"],
item["node_attr"],
)
N = x.shape[0]
x = convert_to_single_emb(x)
# node adj matrix [N, N] bool
adj = np.zeros([N, N], dtype=np.int32)
adj[edge_index[0, :], edge_index[1, :]] = 1
degree = adj.sum(axis=-1)
# edge feature here
if len(edge_attr.shape) == 1:
edge_attr = edge_attr[:, None]
attn_edge_type = np.zeros([N, N, edge_attr.shape[-1]], dtype=np.int32)
attn_edge_type[edge_index[0, :], edge_index[1, :]] = (
convert_to_single_emb(edge_attr) + 1
)
path = np.full([N, N], -1, dtype=np.int32)
shortest_path_result, path = floyd_warshall(adj, path)
if want_edge_input:
max_dist = min(np.amax(shortest_path_result), 6)
edge_input = gen_edge_input(max_dist, path, attn_edge_type)
spatial_pos = torch.from_numpy((shortest_path_result)).long()
attn_bias = torch.zeros([N + 1, N + 1], dtype=torch.float) # with graph token
# combine
feat = {}
feat["x"] = torch.from_numpy(x).long()
feat["attn_bias"] = attn_bias
feat["attn_edge_type"] = torch.from_numpy(attn_edge_type).long()
feat["spatial_pos"] = spatial_pos
feat["in_degree"] = torch.from_numpy(degree).long().view(-1)
feat["out_degree"] = feat["in_degree"] # for undirected graph
if want_edge_input:
feat["edge_input"] = torch.from_numpy(edge_input).long()
else:
feat["edge_input"] = None
return feat
class GraphFeatures(BaseWrapperDataset):
def __init__(self, dataset, pos_dataset, want_edge_input=True, add_len=0):
super().__init__(dataset)
self.dataset = dataset
self.pos_dataset = pos_dataset
self.want_edge_input = want_edge_input
self.add_len = add_len
if self.add_len < 0:
self.add_len = 0
@lru_cache(maxsize=16)
def __getitem__(self, idx):
data = self.dataset[idx]
feat = preprocess_item(data, want_edge_input=self.want_edge_input)
if self.pos_dataset is not None:
pos = self.pos_dataset[idx]
feat["pos"] = torch.from_numpy(pos)
else:
feat["pos"] = torch.zeros([feat["x"].shape[0], 3], dtype=torch.float)
return feat
def collater(self, items):
multi_hop_max_dist = 5
spatial_pos_max = 1024
if self.want_edge_input:
items = [
(
item["attn_bias"],
item["attn_edge_type"],
item["spatial_pos"],
item["in_degree"],
item["out_degree"],
item["x"],
item["edge_input"][:, :, :multi_hop_max_dist, :],
item["pos"],
)
for item in items
]
else:
items = [
(
item["attn_bias"],
item["attn_edge_type"],
item["spatial_pos"],
item["in_degree"],
item["out_degree"],
item["x"],
None,
item["pos"],
)
for item in items
]
(
attn_biases,
attn_edge_types,
spatial_poses,
in_degrees,
out_degrees,
xs,
edge_inputs,
poses,
) = zip(*items)
for idx, _ in enumerate(attn_biases):
attn_biases[idx][1:, 1:][spatial_poses[idx] >= spatial_pos_max] = float(
"-inf"
)
max_node_num = max(i.size(0) for i in xs)
max_node_num = max_node_num + self.add_len
max_node_num = (max_node_num + 1 + 3) // 4 * 4 - 1
x = torch.cat([pad_2d_unsqueeze(i, max_node_num) for i in xs])
if self.want_edge_input:
max_dist = max(i.size(-2) for i in edge_inputs)
edge_input = torch.cat(
[
pad_3d_unsqueeze(i, max_node_num, max_node_num, max_dist)
for i in edge_inputs
]
)
else:
edge_input = None
attn_bias = torch.cat(
[pad_attn_bias_unsqueeze(i, max_node_num + 1) for i in attn_biases]
)
attn_edge_type = torch.cat(
[pad_edge_type_unsqueeze(i, max_node_num) for i in attn_edge_types]
)
spatial_pos = torch.cat(
[pad_spatial_pos_unsqueeze(i, max_node_num) for i in spatial_poses]
)
in_degree = torch.cat([pad_1d_unsqueeze(i, max_node_num) for i in in_degrees])
pos = torch.cat([pad_pos_unsqueeze(i, max_node_num) for i in poses])
node_type_edges = []
for idx in range(len(items)):
node_atom_type = items[idx][5][:, 0]
n_nodes = items[idx][5].shape[0]
node_atom_i = node_atom_type.unsqueeze(-1).repeat(1, n_nodes)
node_atom_i = pad_spatial_pos_unsqueeze(
node_atom_i, max_node_num
).unsqueeze(-1)
node_atom_j = node_atom_type.unsqueeze(0).repeat(n_nodes, 1)
node_atom_j = pad_spatial_pos_unsqueeze(
node_atom_j, max_node_num
).unsqueeze(-1)
node_atom_edge = torch.cat([node_atom_i, node_atom_j], dim=-1)
node_atom_edge = convert_to_single_emb_torch(node_atom_edge)
node_type_edges.append(node_atom_edge.long())
node_type_edge = torch.cat(node_type_edges)
if not self.want_edge_input:
edge_input = attn_edge_type
return dict(
attn_bias=attn_bias,
attn_edge_type=attn_edge_type,
spatial_pos=spatial_pos,
in_degree=in_degree,
out_degree=in_degree, # for undirected graph
x=x,
edge_input=edge_input,
pos=pos,
node_type_edge=node_type_edge,
)
class ShortestPathDataset(BaseWrapperDataset):
def __init__(self, dataset, has_bos=True, has_eos=True):
super().__init__(dataset)
self.dataset = dataset
self.has_bos = has_bos
self.has_eos = has_eos
@lru_cache(maxsize=16)
def __getitem__(self, idx):
data = self.dataset[idx]
num_atoms = data["atoms"].shape[0]
offset = 0
if self.has_bos:
num_atoms += 1
offset = 1
if self.has_eos:
num_atoms += 1
adj = np.full(
(num_atoms, num_atoms),
510,
dtype=np.int,
)
edge_index = data["edge_index"]
adj[edge_index[0, :] + offset, edge_index[1, :] + offset] = 1
shortest_path_result, _ = floyd_warshall(adj)
# shortest_path_result[shortest_path_result > 510] = 510
spatial_pos = torch.from_numpy((shortest_path_result)).long()
return spatial_pos
class DegreeDataset(BaseWrapperDataset):
def __init__(self, dataset, has_bos=True, has_eos=True):
super().__init__(dataset)
self.dataset = dataset
self.has_bos = has_bos
self.has_eos = has_eos
@lru_cache(maxsize=16)
def __getitem__(self, idx):
data = self.dataset[idx]
num_atoms = data["atoms"].shape[0]
offset = 0
if self.has_bos:
num_atoms += 1
offset = 1
if self.has_eos:
num_atoms += 1
adj = np.full(
(num_atoms, num_atoms),
0,
dtype=np.int,
)
edge_index = data["edge_index"]
adj[edge_index[0, :] + offset, edge_index[1, :] + offset] = 1
# +1 for padding
degree = np.sum(adj, axis=1) + 1
return torch.from_numpy(degree).long()
def collate_1d_features(
values,
pad_idx,
pad_to_multiple=1,
):
"""Convert a list of 1d tensors into a padded 2d tensor."""
v = values[0]
size = max(v.size(0) for v in values)
if pad_to_multiple != 1 and size % pad_to_multiple != 0:
size = int(((size - 0.1) // pad_to_multiple + 1) * pad_to_multiple)
res = values[0].new(len(values), size, v.shape[-1]).fill_(pad_idx)
def copy_tensor(src, dst):
assert dst.numel() == src.numel()
dst.copy_(src)
for i, v in enumerate(values):
copy_tensor(
v,
res[i][: len(v), :],
)
return res
class AtomFeatDataset(BaseWrapperDataset):
def __init__(
self, dataset, num_features=8, num_vals=16, has_bos=True, has_eos=True
):
super().__init__(dataset)
self.dataset = dataset
self.num_features = num_features
self.num_vals = num_vals
self.has_bos = has_bos
self.has_eos = has_eos
@lru_cache(maxsize=16)
def __getitem__(self, idx):
data = self.dataset[idx]
num_atoms = data["atoms"].shape[0]
offset = 0
if self.has_bos:
num_atoms += 1
offset = 1
if self.has_eos:
num_atoms += 1
feat = np.full(
(num_atoms, self.num_features),
1,
dtype=np.int,
)
node_attr = data["node_attr"]
# skip first dimension
feat[offset : offset + node_attr.shape[0], :] = node_attr[:, 1:] + 2
for i in range(self.num_features):
feat[:, i] += i * self.num_vals
return torch.from_numpy(feat).long()
def collater(self, samples):
return collate_1d_features(samples, 0, pad_to_multiple=8)
def collate_2d_features(
values,
pad_idx,
pad_to_multiple=1,
):
"""Convert a list of 1d tensors into a padded 2d tensor."""
v = values[0]
size = max(v.size(0) for v in values)
if pad_to_multiple != 1 and size % pad_to_multiple != 0:
size = int(((size - 0.1) // pad_to_multiple + 1) * pad_to_multiple)
res = values[0].new(len(values), size, size, v.shape[-1]).fill_(pad_idx)
def copy_tensor(src, dst):
assert dst.numel() == src.numel()
dst.copy_(src)
for i, v in enumerate(values):
copy_tensor(
v,
res[i][: len(v), : len(v), :],
)
return res
class BondDataset(BaseWrapperDataset):
def __init__(
self,
dataset,
num_features=4,
num_vals=8,
has_bos=True,
has_eos=True,
):
super().__init__(dataset)
self.dataset = dataset
self.num_features = num_features
self.num_vals = num_vals
self.has_bos = has_bos
self.has_eos = has_eos
@lru_cache(maxsize=16)
def __getitem__(self, idx):
data = self.dataset[idx]
num_atoms = data["atoms"].shape[0]
offset = 0
if self.has_bos:
num_atoms += 1
offset = 1
if self.has_eos:
num_atoms += 1
edge_feat = np.full(
(num_atoms, num_atoms, self.num_features),
0,
dtype=np.int,
)
edge_index = data["edge_index"]
edge_attr = data["edge_attr"]
# no connected
edge_feat[:, :, 0] = 1
# self connected
for i in range(num_atoms):
edge_feat[i, i, 0] = 2
# bond connected
edge_feat[edge_index[0, :] + offset, edge_index[1, :] + offset, 0] = 3
# other bond features
edge_feat[edge_index[0, :] + offset, edge_index[1, :] + offset, 1:] = (
edge_attr + 1
)
for i in range(self.num_features):
# add offset
edge_feat[:, :, i] += self.num_vals * i
return torch.from_numpy(edge_feat).long()
def collater(self, samples):
return collate_2d_features(samples, 0, pad_to_multiple=8)
| 14,494 | Python | .py | 411 | 25.776156 | 88 | 0.530871 | dptech-corp/NAG2G | 8 | 4 | 2 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,202 | empty_smiles_dataset.py | dptech-corp_NAG2G/NAG2G/data/empty_smiles_dataset.py | import logging
from rdkit import Chem
from rdkit.Chem import AllChem
import numpy as np
import re
import pickle
from functools import lru_cache
from unicore.data import UnicoreDataset
logger = logging.getLogger(__name__)
def get_atom(smiles):
mol = Chem.MolFromSmiles(smiles)
mol = AllChem.AddHs(mol)
atoms = [atom.GetSymbol() for atom in mol.GetAtoms()] # after add H
return atoms
def get_coordinates(smiles, seed=42):
mol = Chem.MolFromSmiles(smiles)
mol = AllChem.AddHs(mol)
try:
res = AllChem.EmbedMolecule(mol, randomSeed=seed)
if res == 0:
AllChem.MMFFOptimizeMolecule(mol)
coordinates = mol.GetConformer().GetPositions().astype(np.float32)
elif res == -1:
mol_tmp = Chem.MolFromSmiles(smiles)
AllChem.EmbedMolecule(mol_tmp, maxAttempts=5000, randomSeed=seed)
mol_tmp = AllChem.AddHs(mol_tmp, addCoords=True)
AllChem.MMFFOptimizeMolecule(mol_tmp)
coordinates = mol_tmp.GetConformer().GetPositions().astype(np.float32)
except:
AllChem.Compute2DCoords(mol)
coordinates = mol.GetConformer().GetPositions().astype(np.float32)
len(mol.GetAtoms()) == len(
coordinates
), "2D coordinates shape is not align with {}".format(smiles)
return [coordinates]
def smi_tokenizer(smi):
"""
Tokenize a SMILES molecule or reaction
"""
pattern = "(\[[^\]]+]|Br?|Cl?|N|O|S|P|F|I|b|c|n|o|s|p|\(|\)|\.|=|#|-|\+|\\\\|\/|:|~|@|\?|>|\*|\$|\%[0-9]{2}|[0-9])"
regex = re.compile(pattern)
tokens = [token for token in regex.findall(smi)]
assert smi == "".join(tokens)
return tokens
class EMPTY_SMILES_Dataset(UnicoreDataset):
def __init__(self, init_values, seed=42):
self.key = []
self.epoch = 0
self.put_smiles_in(smiles=init_values, seed=seed)
def set_epoch(self, epoch, **unused):
pass
def put_smiles_in(self, smiles, seed=42):
self.epoch = (self.epoch + 1) % 100000
dict_ = {"target_id": smiles}
dict_["target_atoms"] = get_atom(smiles)
dict_["target_coordinates"] = get_coordinates(smiles, seed=seed)
dict_["smiles_target_list"] = [smiles]
dict_["smiles_target"] = smi_tokenizer(smiles)
self.key = [dict_]
def __len__(self):
return 1
def __getitem__(self, index: int):
return self.__getitem_cached__(self.epoch, index)
@lru_cache(maxsize=16)
def __getitem_cached__(self, epoch: int, index: int):
return self.key[index]
class EMPTY_SMILES_Dataset_G2G(UnicoreDataset):
def __init__(self, name, init_values=None, seed=42):
self.name = name
self.key = []
self.epoch = 0
self.put_smiles_in(init_values)
def set_epoch(self, epoch, **unused):
pass
def put_smiles_in(self, smiles):
if smiles is None:
return
self.epoch = (self.epoch + 1) % 100000
self.key = smiles
def __len__(self):
return len(self.key)
def __getitem__(self, index: int):
return self.__getitem_cached__(self.epoch, index)
@lru_cache(maxsize=16)
def __getitem_cached__(self, epoch: int, index: int):
return {self.name: self.key[index]}
| 3,278 | Python | .py | 86 | 31.313953 | 119 | 0.626381 | dptech-corp/NAG2G | 8 | 4 | 2 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,203 | distance_dataset.py | dptech-corp_NAG2G/NAG2G/data/distance_dataset.py |
import numpy as np
import torch
from scipy.spatial import distance_matrix
from functools import lru_cache
from unicore.data import BaseWrapperDataset
class DistanceDataset(BaseWrapperDataset):
def __init__(self, dataset):
super().__init__(dataset)
self.dataset = dataset
@lru_cache(maxsize=16)
def __getitem__(self, idx):
pos = self.dataset[idx].view(-1, 3).numpy()
# add eps to avoid zero distances
# dist = (pos.view(-1, 1, 3) - pos.view(1, -1, 3)).norm(dim=-1) + 1e-5
dist = distance_matrix(pos, pos).astype(np.float32)
return torch.from_numpy(dist)
class EdgeTypeDataset(BaseWrapperDataset):
def __init__(
self,
dataset: torch.utils.data.Dataset,
num_types: int
):
self.dataset = dataset
self.num_types = num_types
@lru_cache(maxsize=16)
def __getitem__(self, index: int):
node_input = self.dataset[index].clone()
offset = node_input.view(-1, 1) * \
self.num_types + node_input.view(1, -1)
return offset
class CrossDistanceDataset(BaseWrapperDataset):
def __init__(self, mol_dataset, pocket_dataset):
super().__init__(mol_dataset)
self.mol_dataset = mol_dataset
self.pocket_dataset = pocket_dataset
@lru_cache(maxsize=16)
def __getitem__(self, idx):
mol_pos = self.mol_dataset[idx].view(-1, 3).numpy()
pocket_pos = self.pocket_dataset[idx].view(-1, 3).numpy()
# add eps to avoid zero distances
# dist = (pos.view(-1, 1, 3) - pos.view(1, -1, 3)).norm(dim=-1) + 1e-5
dist = distance_matrix(mol_pos, pocket_pos).astype(np.float32)
return torch.from_numpy(dist)
| 1,721 | Python | .py | 43 | 33.046512 | 79 | 0.630631 | dptech-corp/NAG2G | 8 | 4 | 2 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,204 | __init__.py | dptech-corp_NAG2G/NAG2G/data/__init__.py | from .customized_unicore_dataset import CustomizedUnicoreDataset
from .mask_points_dataset import MaskPointsDataset, MaskPointsPocketDataset
from .distance_dataset import DistanceDataset, EdgeTypeDataset, CrossDistanceDataset
from .rand_dataset import RandomDataset, RandomLabelDataset
from .key_dataset import KeyDataset
from .size_dataset import SizeDataset
from .reorder_dataset import ReorderDataset
from .pad_dataset import RightPadDatasetCoord, RightPadDatasetCross2D
from .list_shuffle_dataset import ListShuffleDataset
from .random_smiles_dataset import RandomSmilesDataset, ReorderSmilesDataset
from .bart_token_dataset import BartTokenDataset
from .empty_smiles_dataset import EMPTY_SMILES_Dataset, EMPTY_SMILES_Dataset_G2G
from .graphormer_dataset import (
CsvGraphormerDataset,
SmilesDataset,
GraphormerDataset,
ShuffleGraphormerDataset,
SeqGraphormerDataset,
ReorderGraphormerDataset,
ReorderCoordDataset,
SmilesDataset_2,
)
from .pad_dataset_3d import RightPadDataset3D
from .graph_features import GraphFeatures
from .bpe_tokenize_dataset import BpeTokenizeDataset
| 1,111 | Python | .py | 25 | 42.16 | 84 | 0.861878 | dptech-corp/NAG2G | 8 | 4 | 2 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,205 | list_shuffle_dataset.py | dptech-corp_NAG2G/NAG2G/data/list_shuffle_dataset.py | # Copyright (c) DP Techonology, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from functools import lru_cache
from unicore.data import BaseWrapperDataset
import random
import logging
logger = logging.getLogger(__name__)
class ListShuffleDataset(BaseWrapperDataset):
def __init__(self, dataset, prob=1.0):
self.dataset = dataset
self.prob = prob
def __len__(self):
return len(self.dataset)
@lru_cache(maxsize=16)
def __getitem__(self, idx):
tmp_list = self.dataset[idx]
if self.prob != 0 and random.random() < self.prob:
random.shuffle(tmp_list)
return tmp_list
| 748 | Python | .py | 21 | 30.714286 | 65 | 0.697642 | dptech-corp/NAG2G | 8 | 4 | 2 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,206 | reorder_dataset.py | dptech-corp_NAG2G/NAG2G/data/reorder_dataset.py | # Copyright (c) DP Techonology, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import lmdb
import os
import pickle
import torch
import numpy as np
from functools import lru_cache
import logging
from unicore.data import data_utils, BaseWrapperDataset
logger = logging.getLogger(__name__)
class ReorderDataset(BaseWrapperDataset):
def __init__(self, dataset, seed, molstr, atoms, coordinates, nomalize=True):
self.dataset = dataset
self.seed = seed
self.atoms = atoms
self.molstr = molstr
self.coordinates = coordinates
self.nomalize = nomalize
self.set_epoch(None)
def set_epoch(self, epoch, **unused):
super().set_epoch(epoch)
self.epoch = epoch
@lru_cache(maxsize=16)
def __cached_item__(self, index: int, epoch: int):
# print('???',self.molstr )
if self.molstr == 'smi':
molstr = np.array([x for x in self.dataset[index][self.molstr]])
elif self.molstr == 'selfies':
molstr = np.array(self.dataset[index][self.molstr])
# else:
# atoms = np.array(self.dataset[index][self.atoms])
receptor = self.dataset[index]['receptor']
atoms = np.array(self.dataset[index][self.atoms])
assert len(atoms) > 0, (len(atoms), atoms, self.atoms, molstr)
# print('???',len(atoms))
size = len(self.dataset[index][self.coordinates])
with data_utils.numpy_seed(self.seed, epoch, index):
sample_idx = np.random.randint(size)
coordinates = self.dataset[index][self.coordinates][sample_idx]
if len(atoms) > 256:
np.random.seed(self.seed)
index = np.random.choice(len(atoms), 256, replace=False)
atoms = atoms[index]
coordinates = coordinates[index]
# normalize
if self.nomalize:
coordinates = coordinates - coordinates.mean(axis=0)
#print( self.atoms,len(atoms), self.molstr, len(molstr))
return {self.atoms: atoms, self.molstr: molstr, 'coordinates': coordinates.astype(np.float32), 'receptor': receptor}
def __getitem__(self, index: int):
return self.__cached_item__(index, self.epoch)
| 2,311 | Python | .py | 54 | 35.518519 | 124 | 0.648444 | dptech-corp/NAG2G | 8 | 4 | 2 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,207 | key_dataset.py | dptech-corp_NAG2G/NAG2G/data/key_dataset.py | # Copyright (c) DP Techonology, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from functools import lru_cache
from unicore.data import BaseWrapperDataset
import logging
logger = logging.getLogger(__name__)
class KeyDataset(BaseWrapperDataset):
def __init__(self, dataset, key):
self.dataset = dataset
self.key = key
def __len__(self):
return len(self.dataset)
@lru_cache(maxsize=16)
def __getitem__(self, idx):
return self.dataset[idx][self.key]
| 614 | Python | .py | 18 | 30.166667 | 65 | 0.71912 | dptech-corp/NAG2G | 8 | 4 | 2 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,208 | size_dataset.py | dptech-corp_NAG2G/NAG2G/data/size_dataset.py | # Copyright (c) DP Techonology, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import numpy as np
from functools import lru_cache
from unicore.data import BaseWrapperDataset
import logging
logger = logging.getLogger(__name__)
class SizeDataset(BaseWrapperDataset):
def __init__(self, dataset, src_key_info, tgt_key_info):
self.dataset = dataset
self.src_size = np.array([])
self.tgt_size = np.array([])
self.cal_size_data(dataset, src_key_info, tgt_key_info)
def __len__(self):
return len(self.dataset)
@lru_cache(maxsize=16)
def __getitem__(self, idx):
return self.dataset[idx]
def cal_size_data(self, dataset, src_key_info, tgt_key_info):
for i in range(len(dataset)):
self.src_size = np.append(
self.src_size, len(dataset[i][src_key_info]))
self.tgt_size = np.append(
self.tgt_size, len(dataset[i][tgt_key_info]))
if i % 10000 == 0:
print('test dataset size: ', i)
def get_size_data(self):
return self.src_size, self.tgt_size
| 1,222 | Python | .py | 31 | 32.645161 | 65 | 0.644426 | dptech-corp/NAG2G | 8 | 4 | 2 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,209 | beam_search_generator.py | dptech-corp_NAG2G/NAG2G/search_strategies/beam_search_generator.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import sys
from typing import Dict, List, Optional
import torch
import torch.nn as nn
from torch import Tensor
from unicore import utils
from .ngram_repeat_block import NGramRepeatBlock
from . import search
from .search_utils import (
collate_tokens,
strip_pad,
extract_hard_alignment,
extract_soft_alignment,
)
class SequenceGeneratorBeamSearch(nn.Module):
def __init__(
self,
models,
tgt_dict,
beam_size=1,
max_len_a=0,
max_len_b=300,
max_len=0,
min_len=1,
normalize_scores=True,
len_penalty=1.0,
unk_penalty=0.0,
temperature=1.0,
match_source_len=False,
no_repeat_ngram_size=0,
search_strategy=None,
eos=None,
symbols_to_strip_from_output=None,
lm_model=None,
lm_weight=1.0,
tokens_to_suppress=(),
):
"""Generates translations of a given source sentence.
Args:
models (List[~fairseq.models.FairseqModel]): ensemble of models,
currently support fairseq.models.TransformerModel for scripting
beam_size (int, optional): beam width (default: 1)
max_len_a/b (int, optional): generate sequences of maximum length
ax + b, where x is the source length
max_len (int, optional): the maximum length of the generated output
(not including end-of-sentence)
min_len (int, optional): the minimum length of the generated output
(not including end-of-sentence)
normalize_scores (bool, optional): normalize scores by the length
of the output (default: True)
len_penalty (float, optional): length penalty, where <1.0 favors
shorter, >1.0 favors longer sentences (default: 1.0)
unk_penalty (float, optional): unknown word penalty, where <0
produces more unks, >0 produces fewer (default: 0.0)
temperature (float, optional): temperature, where values
>1.0 produce more uniform samples and values <1.0 produce
sharper samples (default: 1.0)
match_source_len (bool, optional): outputs should match the source
length (default: False)
"""
super().__init__()
if isinstance(models, EnsembleModel):
self.model = models
else:
self.model = EnsembleModel(models)
self.model_0 = models[0]
self.tgt_dict = tgt_dict
self.pad = tgt_dict.pad()
self.unk = tgt_dict.unk()
self.eos = tgt_dict.eos() if eos is None else eos
self.bos = tgt_dict.bos()
self.symbols_to_strip_from_output = (
symbols_to_strip_from_output.union({self.eos})
if symbols_to_strip_from_output is not None
else {self.eos}
)
self.token_indices_to_suppress: Optional[Tensor] = None
token_indices_to_suppress = []
for token_string in tokens_to_suppress:
token_index = tgt_dict.index(token_string)
assert token_index != self.unk
token_indices_to_suppress.append(token_index)
if len(token_indices_to_suppress) > 0:
self.token_indices_to_suppress = torch.Tensor(
token_indices_to_suppress
).long()
self.vocab_size = len(tgt_dict)
self.beam_size = beam_size
# the max beam size is the dictionary size - 1, since we never select pad
self.beam_size = min(beam_size, self.vocab_size - 1)
self.model.set_decoder_beam_size(self.beam_size)
self.max_len_a = max_len_a
self.max_len_b = max_len_b
self.min_len = min_len
self.max_len = max_len or self.model.max_decoder_positions()
self.normalize_scores = normalize_scores
self.len_penalty = len_penalty
self.unk_penalty = unk_penalty
self.temperature = temperature
self.match_source_len = match_source_len
if no_repeat_ngram_size > 0:
self.repeat_ngram_blocker = NGramRepeatBlock(no_repeat_ngram_size)
else:
self.repeat_ngram_blocker = None
assert temperature > 0, "--temperature must be greater than 0"
self.search = (
search.BeamSearch(tgt_dict) if search_strategy is None else search_strategy
)
# We only need to set src_lengths in LengthConstrainedBeamSearch.
# As a module attribute, setting it would break in multithread
# settings when the model is shared.
self.should_set_src_lengths = (
hasattr(self.search, "needs_src_lengths") and self.search.needs_src_lengths
)
self.model.eval()
self.lm_model = lm_model
self.lm_weight = lm_weight
if self.lm_model is not None:
self.lm_model.eval()
def cuda(self):
self.model.cuda()
return self
@torch.no_grad()
def forward(
self,
sample: Dict[str, Dict[str, Tensor]],
prefix_tokens: Optional[Tensor] = None,
bos_token: Optional[int] = None,
):
"""Generate a batch of translations.
Args:
sample (dict): batch
prefix_tokens (torch.LongTensor, optional): force decoder to begin
with these tokens
bos_token (int, optional): beginning of sentence token
(default: self.eos)
"""
return self._generate(sample, prefix_tokens, bos_token=bos_token)
@torch.no_grad()
def generate(
self, models, sample: Dict[str, Dict[str, Tensor]], **kwargs
) -> List[List[Dict[str, Tensor]]]:
"""Generate translations. Match the api of other fairseq generators.
Args:
models (List[~fairseq.models.FairseqModel]): ensemble of models
sample (dict): batch
prefix_tokens (torch.LongTensor, optional): force decoder to begin
with these tokens
constraints (torch.LongTensor, optional): force decoder to include
the list of constraints
bos_token (int, optional): beginning of sentence token
(default: self.eos)
"""
return self._generate(sample, **kwargs)
def _generate(
self,
sample: Dict[str, Dict[str, Tensor]],
prefix_tokens: Optional[Tensor] = None,
constraints: Optional[Tensor] = None,
bos_token: Optional[int] = None,
):
incremental_states = torch.jit.annotate(
List[Dict[str, Dict[str, Optional[Tensor]]]],
[
torch.jit.annotate(Dict[str, Dict[str, Optional[Tensor]]], {})
for i in range(self.model.models_size)
],
)
net_input = sample["net_input"]
src_tokens = self.model_0.get_src_tokens(sample)
if "src_lengths" in net_input:
src_lengths = net_input["src_lengths"]
else:
src_lengths = (
(src_tokens.ne(self.eos) & src_tokens.ne(self.pad)).long().sum(dim=1)
)
decoder_kwargs = {}
encoder_kwargs = {}
for k, v in sample["net_input"].items():
if "decoder" in k:
decoder_kwargs[k] = v
else:
encoder_kwargs[k] = v
# if "src_tokens" in net_input:
# src_tokens = net_input["src_tokens"]
# # length of the source text being the character length except EndOfSentence and pad
# # if src_lengths exists in net_input (speech_to_text dataset case), then use it
# if "src_lengths" in net_input:
# src_lengths = net_input["src_lengths"]
# else:
# src_lengths = (
# (src_tokens.ne(self.eos) & src_tokens.ne(self.pad))
# .long()
# .sum(dim=1)
# )
# elif "source" in net_input:
# src_tokens = net_input["source"]
# src_lengths = (
# net_input["padding_mask"].size(-1) - net_input["padding_mask"].sum(-1)
# if net_input["padding_mask"] is not None
# else torch.tensor(src_tokens.size(-1)).to(src_tokens)
# )
# elif "features" in net_input:
# src_tokens = net_input["features"]
# src_lengths = (
# net_input["padding_mask"].size(-1) - net_input["padding_mask"].sum(-1)
# if net_input["padding_mask"] is not None
# else torch.tensor(src_tokens.size(-1)).to(src_tokens)
# )
# else:
# raise Exception(
# "expected src_tokens or source in net input. input keys: "
# + str(net_input.keys())
# )
# bsz: total number of sentences in beam
# Note that src_tokens may have more than 2 dimensions (i.e. audio features)
bsz, src_len = src_tokens.size()[:2]
beam_size = self.beam_size
if constraints is not None and not self.search.supports_constraints:
raise NotImplementedError(
"Target-side constraints were provided, but search method doesn't support them"
)
# Initialize constraints, when active
self.search.init_constraints(constraints, beam_size)
max_len: int = -1
if self.match_source_len:
max_len = src_lengths.max().item()
else:
max_len = min(
int(self.max_len_a * src_len + self.max_len_b),
self.max_len - 1,
)
assert (
self.min_len <= max_len
), "min_len cannot be larger than max_len, please adjust these!"
# compute the encoder output for each beam
with torch.autograd.profiler.record_function("EnsembleModel: forward_encoder"):
encoder_outs = self.model.forward_encoder(**encoder_kwargs)
# placeholder of indices for bsz * beam_size to hold tokens and accumulative scores
new_order = torch.arange(bsz).view(-1, 1).repeat(1, beam_size).view(-1)
new_order = new_order.to(src_tokens.device).long()
encoder_outs = self.model.reorder_encoder_out(encoder_outs, new_order)
# ensure encoder_outs is a List.
assert encoder_outs is not None
# initialize buffers
scores = (
torch.zeros(bsz * beam_size, max_len + 1).to(src_tokens).float()
) # +1 for eos; pad is never chosen for scoring
tokens = (
torch.zeros(bsz * beam_size, max_len + 2)
.to(src_tokens)
.long()
.fill_(self.pad)
) # +2 for eos and pad
tokens[:, 0] = self.bos if bos_token is None else bos_token
attn: Optional[Tensor] = None
# A list that indicates candidates that should be ignored.
# For example, suppose we're sampling and have already finalized 2/5
# samples. Then cands_to_ignore would mark 2 positions as being ignored,
# so that we only finalize the remaining 3 samples.
cands_to_ignore = (
torch.zeros(bsz, beam_size).to(src_tokens).eq(-1)
) # forward and backward-compatible False mask
# list of completed sentences
finalized = torch.jit.annotate(
List[List[Dict[str, Tensor]]],
[torch.jit.annotate(List[Dict[str, Tensor]], []) for i in range(bsz)],
) # contains lists of dictionaries of infomation about the hypothesis being finalized at each step
# a boolean array indicating if the sentence at the index is finished or not
finished = [False for i in range(bsz)]
num_remaining_sent = bsz # number of sentences remaining
# number of candidate hypos per step
cand_size = 2 * beam_size # 2 x beam size in case half are EOS
# offset arrays for converting between different indexing schemes
bbsz_offsets = (
(torch.arange(0, bsz) * beam_size)
.unsqueeze(1)
.type_as(tokens)
.to(src_tokens.device)
)
cand_offsets = torch.arange(0, cand_size).type_as(tokens).to(src_tokens.device)
reorder_state: Optional[Tensor] = None
batch_idxs: Optional[Tensor] = None
original_batch_idxs: Optional[Tensor] = None
if "id" in sample and isinstance(sample["id"], Tensor):
original_batch_idxs = sample["id"]
else:
original_batch_idxs = torch.arange(0, bsz).type_as(tokens)
for step in range(max_len + 1): # one extra step for EOS marker
# reorder decoder internal states based on the prev choice of beams
if reorder_state is not None:
if batch_idxs is not None:
# update beam indices to take into account removed sentences
corr = batch_idxs - torch.arange(batch_idxs.numel()).type_as(
batch_idxs
)
reorder_state.view(-1, beam_size).add_(
corr.unsqueeze(-1) * beam_size
)
original_batch_idxs = original_batch_idxs[batch_idxs]
self.model.reorder_incremental_state(incremental_states, reorder_state)
encoder_outs = self.model.reorder_encoder_out(
encoder_outs, reorder_state
)
with torch.autograd.profiler.record_function(
"EnsembleModel: forward_decoder"
):
lprobs, avg_attn_scores = self.model.forward_decoder(
tokens[:, : step + 1],
encoder_outs,
incremental_states,
self.temperature,
)
if self.lm_model is not None:
lm_out = self.lm_model(tokens[:, : step + 1])
probs = self.lm_model.get_normalized_probs(
lm_out, log_probs=True, sample=None
)
probs = probs[:, -1, :] * self.lm_weight
lprobs += probs
lprobs[lprobs != lprobs] = torch.tensor(-math.inf).to(lprobs)
lprobs[:, self.pad] = -math.inf # never select pad
lprobs[:, self.unk] -= self.unk_penalty # apply unk penalty
# handle max length constraint
if step >= max_len:
lprobs[:, : self.eos] = -math.inf
lprobs[:, self.eos + 1 :] = -math.inf
# handle prefix tokens (possibly with different lengths)
if (
prefix_tokens is not None
and step < prefix_tokens.size(1)
and step < max_len
):
lprobs, tokens, scores = self._prefix_tokens(
step, lprobs, scores, tokens, prefix_tokens, beam_size
)
else:
if step < self.min_len:
# minimum length constraint (does not apply if using prefix_tokens)
lprobs[:, self.eos] = -math.inf
if self.token_indices_to_suppress is not None:
lprobs[:, self.token_indices_to_suppress] = -math.inf
# Record attention scores, only support avg_attn_scores is a Tensor
if avg_attn_scores is not None:
if attn is None:
attn = torch.empty(
bsz * beam_size, avg_attn_scores.size(1), max_len + 2
).to(scores)
attn[:, :, step + 1].copy_(avg_attn_scores)
scores = scores.type_as(lprobs)
eos_bbsz_idx = torch.empty(0).to(
tokens
) # indices of hypothesis ending with eos (finished sentences)
eos_scores = torch.empty(0).to(
scores
) # scores of hypothesis ending with eos (finished sentences)
if self.should_set_src_lengths:
self.search.set_src_lengths(src_lengths)
if self.repeat_ngram_blocker is not None:
lprobs = self.repeat_ngram_blocker(tokens, lprobs, bsz, beam_size, step)
# Shape: (batch, cand_size)
cand_scores, cand_indices, cand_beams = self.search.step(
step,
lprobs.view(bsz, -1, self.vocab_size),
scores.view(bsz, beam_size, -1)[:, :, :step],
tokens[:, : step + 1],
original_batch_idxs,
)
# cand_bbsz_idx contains beam indices for the top candidate
# hypotheses, with a range of values: [0, bsz*beam_size),
# and dimensions: [bsz, cand_size]
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
# finalize hypotheses that end in eos
# Shape of eos_mask: (batch size, beam size)
eos_mask = cand_indices.eq(self.eos) & cand_scores.ne(-math.inf)
eos_mask[:, :beam_size][cands_to_ignore] = torch.tensor(0).to(eos_mask)
# only consider eos when it's among the top beam_size indices
# Now we know what beam item(s) to finish
# Shape: 1d list of absolute-numbered
eos_bbsz_idx = torch.masked_select(
cand_bbsz_idx[:, :beam_size], mask=eos_mask[:, :beam_size]
)
finalized_sents: List[int] = []
if eos_bbsz_idx.numel() > 0:
eos_scores = torch.masked_select(
cand_scores[:, :beam_size], mask=eos_mask[:, :beam_size]
)
finalized_sents = self.finalize_hypos(
step,
eos_bbsz_idx,
eos_scores,
tokens,
scores,
finalized,
finished,
beam_size,
attn,
src_lengths,
max_len,
)
num_remaining_sent -= len(finalized_sents)
assert num_remaining_sent >= 0
if num_remaining_sent == 0:
break
if self.search.stop_on_max_len and step >= max_len:
break
assert step < max_len, f"{step} < {max_len}"
# Remove finalized sentences (ones for which {beam_size}
# finished hypotheses have been generated) from the batch.
if len(finalized_sents) > 0:
new_bsz = bsz - len(finalized_sents)
# construct batch_idxs which holds indices of batches to keep for the next pass
batch_mask = torch.ones(
bsz, dtype=torch.bool, device=cand_indices.device
)
batch_mask[finalized_sents] = False
# TODO replace `nonzero(as_tuple=False)` after TorchScript supports it
batch_idxs = torch.arange(
bsz, device=cand_indices.device
).masked_select(batch_mask)
# Choose the subset of the hypothesized constraints that will continue
self.search.prune_sentences(batch_idxs)
eos_mask = eos_mask[batch_idxs]
cand_beams = cand_beams[batch_idxs]
bbsz_offsets.resize_(new_bsz, 1)
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
cand_scores = cand_scores[batch_idxs]
cand_indices = cand_indices[batch_idxs]
if prefix_tokens is not None:
prefix_tokens = prefix_tokens[batch_idxs]
src_lengths = src_lengths[batch_idxs]
cands_to_ignore = cands_to_ignore[batch_idxs]
scores = scores.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
tokens = tokens.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
if attn is not None:
attn = attn.view(bsz, -1)[batch_idxs].view(
new_bsz * beam_size, attn.size(1), -1
)
bsz = new_bsz
else:
batch_idxs = None
# Set active_mask so that values > cand_size indicate eos hypos
# and values < cand_size indicate candidate active hypos.
# After, the min values per row are the top candidate active hypos
# Rewrite the operator since the element wise or is not supported in torchscript.
eos_mask[:, :beam_size] = ~((~cands_to_ignore) & (~eos_mask[:, :beam_size]))
active_mask = torch.add(
eos_mask.type_as(cand_offsets) * cand_size,
cand_offsets[: eos_mask.size(1)],
)
# get the top beam_size active hypotheses, which are just
# the hypos with the smallest values in active_mask.
# {active_hypos} indicates which {beam_size} hypotheses
# from the list of {2 * beam_size} candidates were
# selected. Shapes: (batch size, beam size)
new_cands_to_ignore, active_hypos = torch.topk(
active_mask, k=beam_size, dim=1, largest=False
)
# update cands_to_ignore to ignore any finalized hypos.
cands_to_ignore = new_cands_to_ignore.ge(cand_size)[:, :beam_size]
# Make sure there is at least one active item for each sentence in the batch.
assert (~cands_to_ignore).any(dim=1).all()
# update cands_to_ignore to ignore any finalized hypos
# {active_bbsz_idx} denotes which beam number is continued for each new hypothesis (a beam
# can be selected more than once).
active_bbsz_idx = torch.gather(cand_bbsz_idx, dim=1, index=active_hypos)
active_scores = torch.gather(cand_scores, dim=1, index=active_hypos)
active_bbsz_idx = active_bbsz_idx.view(-1)
active_scores = active_scores.view(-1)
# copy tokens and scores for active hypotheses
# Set the tokens for each beam (can select the same row more than once)
tokens[:, : step + 1] = torch.index_select(
tokens[:, : step + 1], dim=0, index=active_bbsz_idx
)
# Select the next token for each of them
tokens.view(bsz, beam_size, -1)[:, :, step + 1] = torch.gather(
cand_indices, dim=1, index=active_hypos
)
if step > 0:
scores[:, :step] = torch.index_select(
scores[:, :step], dim=0, index=active_bbsz_idx
)
scores.view(bsz, beam_size, -1)[:, :, step] = torch.gather(
cand_scores, dim=1, index=active_hypos
)
# Update constraints based on which candidates were selected for the next beam
self.search.update_constraints(active_hypos)
# copy attention for active hypotheses
if attn is not None:
attn[:, :, : step + 2] = torch.index_select(
attn[:, :, : step + 2], dim=0, index=active_bbsz_idx
)
# reorder incremental state in decoder
reorder_state = active_bbsz_idx
# sort by score descending
for sent in range(len(finalized)):
scores = torch.tensor(
[float(elem["score"].item()) for elem in finalized[sent]]
)
_, sorted_scores_indices = torch.sort(scores, descending=True)
finalized[sent] = [finalized[sent][ssi] for ssi in sorted_scores_indices]
finalized[sent] = torch.jit.annotate(
List[Dict[str, Tensor]], finalized[sent]
)
return finalized
def _prefix_tokens(
self, step: int, lprobs, scores, tokens, prefix_tokens, beam_size: int
):
"""Handle prefix tokens"""
prefix_toks = prefix_tokens[:, step].unsqueeze(-1).repeat(1, beam_size).view(-1)
prefix_lprobs = lprobs.gather(-1, prefix_toks.unsqueeze(-1))
prefix_mask = prefix_toks.ne(self.pad)
lprobs[prefix_mask] = torch.tensor(-math.inf).to(lprobs)
lprobs[prefix_mask] = lprobs[prefix_mask].scatter(
-1, prefix_toks[prefix_mask].unsqueeze(-1), prefix_lprobs[prefix_mask]
)
# if prefix includes eos, then we should make sure tokens and
# scores are the same across all beams
eos_mask = prefix_toks.eq(self.eos)
if eos_mask.any():
# validate that the first beam matches the prefix
first_beam = tokens[eos_mask].view(-1, beam_size, tokens.size(-1))[
:, 0, 1 : step + 1
]
eos_mask_batch_dim = eos_mask.view(-1, beam_size)[:, 0]
target_prefix = prefix_tokens[eos_mask_batch_dim][:, :step]
assert (first_beam == target_prefix).all()
# copy tokens, scores and lprobs from the first beam to all beams
tokens = self.replicate_first_beam(tokens, eos_mask_batch_dim, beam_size)
scores = self.replicate_first_beam(scores, eos_mask_batch_dim, beam_size)
lprobs = self.replicate_first_beam(lprobs, eos_mask_batch_dim, beam_size)
return lprobs, tokens, scores
def replicate_first_beam(self, tensor, mask, beam_size: int):
tensor = tensor.view(-1, beam_size, tensor.size(-1))
tensor[mask] = tensor[mask][:, :1, :]
return tensor.view(-1, tensor.size(-1))
def finalize_hypos(
self,
step: int,
bbsz_idx,
eos_scores,
tokens,
scores,
finalized: List[List[Dict[str, Tensor]]],
finished: List[bool],
beam_size: int,
attn: Optional[Tensor],
src_lengths,
max_len: int,
):
"""Finalize hypothesis, store finalized information in `finalized`, and change `finished` accordingly.
A sentence is finalized when {beam_size} finished items have been collected for it.
Returns number of sentences (not beam items) being finalized.
These will be removed from the batch and not processed further.
Args:
bbsz_idx (Tensor):
"""
assert bbsz_idx.numel() == eos_scores.numel()
# clone relevant token and attention tensors.
# tokens is (batch * beam, max_len). So the index_select
# gets the newly EOS rows, then selects cols 1..{step + 2}
tokens_clone = tokens.index_select(0, bbsz_idx)[
:, 1 : step + 2
] # skip the first index, which is EOS
tokens_clone[:, step] = self.eos
attn_clone = (
attn.index_select(0, bbsz_idx)[:, :, 1 : step + 2]
if attn is not None
else None
)
# compute scores per token position
pos_scores = scores.index_select(0, bbsz_idx)[:, : step + 1]
pos_scores[:, step] = eos_scores
# convert from cumulative to per-position scores
pos_scores[:, 1:] = pos_scores[:, 1:] - pos_scores[:, :-1]
# normalize sentence-level scores
if self.normalize_scores:
eos_scores /= (step + 1) ** self.len_penalty
# cum_unfin records which sentences in the batch are finished.
# It helps match indexing between (a) the original sentences
# in the batch and (b) the current, possibly-reduced set of
# sentences.
cum_unfin: List[int] = []
prev = 0
for f in finished:
if f:
prev += 1
else:
cum_unfin.append(prev)
cum_fin_tensor = torch.tensor(cum_unfin, dtype=torch.int).to(bbsz_idx)
unfin_idx = torch.div(bbsz_idx, beam_size, rounding_mode="trunc")
sent = unfin_idx + torch.index_select(cum_fin_tensor, 0, unfin_idx)
# Create a set of "{sent}{unfin_idx}", where
# "unfin_idx" is the index in the current (possibly reduced)
# list of sentences, and "sent" is the index in the original,
# unreduced batch
# For every finished beam item
# sentence index in the current (possibly reduced) batch
seen = (sent << 32) + unfin_idx
unique_seen: List[int] = torch.unique(seen).tolist()
if self.match_source_len:
condition = step > torch.index_select(src_lengths, 0, unfin_idx)
eos_scores = torch.where(condition, torch.tensor(-math.inf), eos_scores)
sent_list: List[int] = sent.tolist()
for i in range(bbsz_idx.size()[0]):
# An input sentence (among those in a batch) is finished when
# beam_size hypotheses have been collected for it
if len(finalized[sent_list[i]]) < beam_size:
if attn_clone is not None:
# remove padding tokens from attn scores
hypo_attn = attn_clone[i]
else:
hypo_attn = torch.empty(0)
finalized[sent_list[i]].append(
{
"tokens": tokens_clone[i],
"score": eos_scores[i],
"attention": hypo_attn, # src_len x tgt_len
"alignment": torch.empty(0),
"positional_scores": pos_scores[i],
}
)
newly_finished: List[int] = []
for unique_s in unique_seen:
# check termination conditions for this sentence
unique_sent: int = unique_s >> 32
unique_unfin_idx: int = unique_s - (unique_sent << 32)
if not finished[unique_sent] and self.is_finished(
step, unique_unfin_idx, max_len, len(finalized[unique_sent]), beam_size
):
finished[unique_sent] = True
newly_finished.append(unique_unfin_idx)
return newly_finished
def is_finished(
self,
step: int,
unfin_idx: int,
max_len: int,
finalized_sent_len: int,
beam_size: int,
):
"""
Check whether decoding for a sentence is finished, which
occurs when the list of finalized sentences has reached the
beam size, or when we reach the maximum length.
"""
assert finalized_sent_len <= beam_size
if finalized_sent_len == beam_size or step == max_len:
return True
return False
# TODO(myleott): unused, deprecate after pytorch-translate migration
def generate_batched_itr(self, data_itr, beam_size=None, cuda=False, timer=None):
"""Iterate over a batched dataset and yield individual translations.
Args:
cuda (bool, optional): use GPU for generation
timer (StopwatchMeter, optional): time generations
"""
for sample in data_itr:
s = utils.move_to_cuda(sample) if cuda else sample
if "net_input" not in s:
continue
input = s["net_input"]
# model.forward normally channels prev_output_tokens into the decoder
# separately, but SequenceGenerator directly calls model.encoder
encoder_input = {
k: v for k, v in input.items() if k != "prev_output_tokens"
}
if timer is not None:
timer.start()
with torch.no_grad():
hypos = self.generate(encoder_input)
if timer is not None:
timer.stop(sum(len(h[0]["tokens"]) for h in hypos))
for i, id in enumerate(s["id"].data):
# remove padding
src = strip_pad(input["src_tokens"].data[i, :], self.pad)
ref = (
strip_pad(s["target"].data[i, :], self.pad)
if s["target"] is not None
else None
)
yield id, src, ref, hypos[i]
class EnsembleModel(nn.Module):
"""A wrapper around an ensemble of models."""
def __init__(self, models):
super().__init__()
self.models_size = len(models)
# method '__len__' is not supported in ModuleList for torch script
self.single_model = models[0]
self.models = nn.ModuleList(models)
self.has_incremental: bool = False
# if all(
# hasattr(m, "decoder") and isinstance(m.decoder, FairseqIncrementalDecoder)
# for m in models
# ):
# self.has_incremental = True
def forward(self):
pass
def has_encoder(self):
return hasattr(self.single_model, "encoder")
def has_incremental_states(self):
return self.has_incremental
def max_decoder_positions(self):
return min(
[
m.max_decoder_positions()
for m in self.models
if hasattr(m, "max_decoder_positions")
]
+ [sys.maxsize]
)
def set_decoder_beam_size(self, beam_size):
"""Set beam size for efficient beamable enc-dec attention."""
if beam_size > 1:
for model in self.models:
if hasattr(model, "set_beam_size"):
model.set_beam_size(beam_size)
@torch.jit.export
def forward_encoder(self, **encoder_kwargs):
if not self.has_encoder():
return None
return [model.forward_encoder(**encoder_kwargs) for model in self.models]
@torch.jit.export
def forward_decoder(
self,
tokens,
encoder_outs: List[Dict[str, List[Tensor]]],
incremental_states: List[Dict[str, Dict[str, Optional[Tensor]]]],
temperature: float = 1.0,
):
log_probs = []
avg_attn: Optional[Tensor] = None
encoder_out: Optional[Dict[str, List[Tensor]]] = None
for i, model in enumerate(self.models):
if self.has_encoder():
encoder_out = encoder_outs[i]
# decode each model
if self.has_incremental_states():
decoder_out = model.decoder.forward(
tokens,
encoder_out=encoder_out,
incremental_state=incremental_states[i],
)
else:
if hasattr(model, "decoder"):
decoder_out = model.forward_decoder(
decoder_src_tokens=tokens,
encoder_cls=encoder_out["encoder_rep"],
temperature=temperature,
encoder_padding_mask=encoder_out["padding_mask"],
)
else:
decoder_out = model.forward(tokens)
attn: Optional[Tensor] = None
decoder_len = len(decoder_out)
if decoder_len > 1 and decoder_out[1] is not None:
if isinstance(decoder_out[1], Tensor):
attn = decoder_out[1]
else:
attn_holder = decoder_out[1]["attn"]
if isinstance(attn_holder, Tensor):
attn = attn_holder
elif attn_holder is not None:
attn = attn_holder[0]
if attn is not None:
attn = attn[:, -1, :]
# decoder_out_tuple = (
# decoder_out[0][:, -1:, :].div_(temperature),
# None if decoder_len <= 1 else decoder_out[1],
# )
# probs = model.get_normalized_probs(
# decoder_out_tuple, log_probs=True, sample=None
# )
probs = decoder_out[0]
if self.models_size == 1:
return probs, attn
log_probs.append(probs)
if attn is not None:
if avg_attn is None:
avg_attn = attn
else:
avg_attn.add_(attn)
avg_probs = torch.logsumexp(torch.stack(log_probs, dim=0), dim=0) - math.log(
self.models_size
)
if avg_attn is not None:
avg_attn.div_(self.models_size)
return avg_probs, avg_attn
@torch.jit.export
def reorder_encoder_out(
self, encoder_outs: Optional[List[Dict[str, List[Tensor]]]], new_order
):
"""
Reorder encoder output according to *new_order*.
Args:
encoder_out: output from the ``forward()`` method
new_order (LongTensor): desired order
Returns:
*encoder_out* rearranged according to *new_order*
"""
new_outs: List[Dict[str, List[Tensor]]] = []
if not self.has_encoder():
return new_outs
for i, model in enumerate(self.models):
assert encoder_outs is not None
new_outs.append(model.reorder_encoder_out(encoder_outs[i], new_order))
return new_outs
@torch.jit.export
def reorder_incremental_state(
self,
incremental_states: List[Dict[str, Dict[str, Optional[Tensor]]]],
new_order,
):
if not self.has_incremental_states():
return
for i, model in enumerate(self.models):
model.decoder.reorder_incremental_state_scripting(
incremental_states[i], new_order
)
class SequenceGeneratorWithAlignment(SequenceGeneratorBeamSearch):
def __init__(
self, models, tgt_dict, left_pad_target=False, print_alignment="hard", **kwargs
):
"""Generates translations of a given source sentence.
Produces alignments following "Jointly Learning to Align and
Translate with Transformer Models" (Garg et al., EMNLP 2019).
Args:
left_pad_target (bool, optional): Whether or not the
hypothesis should be left padded or not when they are
teacher forced for generating alignments.
"""
super().__init__(EnsembleModelWithAlignment(models), tgt_dict, **kwargs)
self.left_pad_target = left_pad_target
if print_alignment == "hard":
self.extract_alignment = extract_hard_alignment
elif print_alignment == "soft":
self.extract_alignment = extract_soft_alignment
@torch.no_grad()
def generate(self, models, sample, **kwargs):
finalized = super()._generate(sample, **kwargs)
src_tokens = sample["net_input"]["src_tokens"]
bsz = src_tokens.shape[0]
beam_size = self.beam_size
(
src_tokens,
src_lengths,
prev_output_tokens,
tgt_tokens,
) = self._prepare_batch_for_alignment(sample, finalized)
if any(getattr(m, "full_context_alignment", False) for m in self.model.models):
attn = self.model.forward_align(src_tokens, src_lengths, prev_output_tokens)
else:
attn = [
finalized[i // beam_size][i % beam_size]["attention"].transpose(1, 0)
for i in range(bsz * beam_size)
]
if src_tokens.device != "cpu":
src_tokens = src_tokens.to("cpu")
tgt_tokens = tgt_tokens.to("cpu")
attn = [i.to("cpu") for i in attn]
# Process the attn matrix to extract hard alignments.
for i in range(bsz * beam_size):
alignment = self.extract_alignment(
attn[i], src_tokens[i], tgt_tokens[i], self.pad, self.eos
)
finalized[i // beam_size][i % beam_size]["alignment"] = alignment
return finalized
def _prepare_batch_for_alignment(self, sample, hypothesis):
src_tokens = sample["net_input"]["src_tokens"]
bsz = src_tokens.shape[0]
src_tokens = (
src_tokens[:, None, :]
.expand(-1, self.beam_size, -1)
.contiguous()
.view(bsz * self.beam_size, -1)
)
src_lengths = sample["net_input"]["src_lengths"]
src_lengths = (
src_lengths[:, None]
.expand(-1, self.beam_size)
.contiguous()
.view(bsz * self.beam_size)
)
prev_output_tokens = collate_tokens(
[beam["tokens"] for example in hypothesis for beam in example],
self.pad,
self.eos,
self.left_pad_target,
move_eos_to_beginning=True,
)
tgt_tokens = collate_tokens(
[beam["tokens"] for example in hypothesis for beam in example],
self.pad,
self.eos,
self.left_pad_target,
move_eos_to_beginning=False,
)
return src_tokens, src_lengths, prev_output_tokens, tgt_tokens
class EnsembleModelWithAlignment(EnsembleModel):
"""A wrapper around an ensemble of models."""
def __init__(self, models):
super().__init__(models)
def forward_align(self, src_tokens, src_lengths, prev_output_tokens):
avg_attn = None
for model in self.models:
decoder_out = model(src_tokens, src_lengths, prev_output_tokens)
attn = decoder_out[1]["attn"][0]
if avg_attn is None:
avg_attn = attn
else:
avg_attn.add_(attn)
if len(self.models) > 1:
avg_attn.div_(len(self.models))
return avg_attn
| 41,579 | Python | .py | 919 | 33.022851 | 110 | 0.560465 | dptech-corp/NAG2G | 8 | 4 | 2 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,210 | simple_sequence_generator.py | dptech-corp_NAG2G/NAG2G/search_strategies/simple_sequence_generator.py | import math
from typing import Dict, List, Optional
import sys
import torch
import torch.nn as nn
from torch import Tensor
# from . import move_to_cuda, strip_pad
import logging
import math
import sys
from typing import Dict, List, Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
# from dpaie import search, utils
# from fairseq.data import data_utils
# from fairseq.models import FairseqIncrementalDecoder
# from dpaie.ngram_repeat_block import NGramRepeatBlock
logger = logging.getLogger(__name__)
"""
Translate algorithms, whitch supprot a batch input.
algorithms:
- greedy search (when args.beam_size <= 0)
- beam search (when args.beam_size > 0. Support to adjust
these parameters: beam_size and length_penalty)
inputs:
- src_tokens: (batch_size, src_len)
outputs:
- gen_seqs: (batch_size, max_seq_len/tgt_len.max()) (related to the stop rules)
"""
"""
Referenced from facebookresearch/XLM,
at https://github.com/facebookresearch/XLM/blob/master/xlm/model/transformer.py
"""
class BeamHypotheses(object):
def __init__(self, n_hyp, length_penalty):
"""
Initialize n-best list of hypotheses.
"""
self.length_penalty = length_penalty
self.n_hyp = n_hyp
self.hyp = []
self.worst_score = 1e9
def __len__(self):
"""
Number of hypotheses in the list.
"""
return len(self.hyp)
def add(self, hyp, sum_logprobs):
"""
Add a new hypothesis to the list.
"""
lp = len(hyp) ** self.length_penalty # deafult length penalty
# lp = (5 + len(hyp)) ** self.length_penalty / (5 + 1) ** self.length_penalty # Google GNMT's length penalty
score = sum_logprobs / lp
# score = sum_logprobs
if len(self) < self.n_hyp or score > self.worst_score:
self.hyp.append((score, hyp))
if len(self) > self.n_hyp:
sorted_scores = sorted(
[(s, idx) for idx, (s, _) in enumerate(self.hyp)]
)
# delete the worst hyp in beam
del self.hyp[sorted_scores[0][1]]
# update worst score with the sencond worst hyp
self.worst_score = sorted_scores[1][0]
else:
self.worst_score = min(score, self.worst_score)
def is_done(self, best_sum_logprobs, cur_len):
"""
If there are enough hypotheses and that none of the hypotheses being generated
can become better than the worst one in the heap, then we are done with this sentence.
"""
if len(self) < self.n_hyp:
return False
else:
# return self.worst_score >= best_sum_logprobs
# / cur_len ** self.length_penalty
return (
self.worst_score >= best_sum_logprobs / cur_len**self.length_penalty
)
class SimpleGenerator(nn.Module):
def __init__(
self,
model,
dict,
output_num_return_sequences_per_batch=5,
max_seq_len=512,
beam_size=5,
temperature=1.0,
match_source_len=False,
len_penalty=1.0,
args=None,
eos=None,
):
super().__init__()
self.model = model
self.dict = dict
self.output_num_return_sequences_per_batch = beam_size
self.max_seq_len = max_seq_len
self.beam_size = beam_size
self.temperature = temperature
self.match_source_len = match_source_len
self.pad_idx = self.dict.pad()
self.bos_idx = self.dict.bos()
self.eos_idx = eos if eos is not None else self.dict.eos()
# self.eos_idx2 = 247
self.len_penalty = len_penalty
self.args = args
@torch.no_grad()
def _generate(self, sample, prefix_tokens):
self.model.eval()
src_tokens = self.model.get_src_tokens(sample)
batch_size = src_tokens.size(0)
decoder_kwargs = {}
encoder_kwargs = {}
for k, v in sample["net_input"].items():
if "decoder" in k:
decoder_kwargs[k] = v
else:
encoder_kwargs[k] = v
if self.args.use_class_encoder:
assert self.args.N_vnode == 2
encoder_kwargs["cls_embedding"] = self.model.decoder_embed_tokens(decoder_kwargs["decoder_src_tokens"][:, 1])
encoder_result = self.model.forward_encoder(**encoder_kwargs)
encoder_out = encoder_result["encoder_rep"]
padding_mask = encoder_result["padding_mask"]
masked_tokens = encoder_result["masked_tokens"]
src_padding_mask = padding_mask
if src_padding_mask is None:
src_padding_mask = torch.zeros(
[encoder_out.shape[0], encoder_out.shape[1]]
).to(encoder_out.device)
# expand to beam size the source latent representations
encoder_out = encoder_out.repeat_interleave(
self.beam_size, dim=0
) # (B x beam_size) x T x C
src_padding_mask = src_padding_mask.repeat_interleave(
self.beam_size, dim=0
) # (B x beam_size) x T x C
# generated sentences (batch with beam current hypotheses)
generated = src_tokens.new(batch_size * self.beam_size, self.max_seq_len).fill_(
self.pad_idx
) # upcoming output
generated[:, 0].fill_(self.bos_idx)
if prefix_tokens is not None:
generated[:, 1] = prefix_tokens.expand((prefix_tokens.shape[0], self.beam_size)).reshape(-1)
# generated hypotheses
generated_hyps = [
BeamHypotheses(self.beam_size * 2, self.len_penalty)
for _ in range(batch_size)
] # 为每个输入句子定义维护其beam search序列的类实例
# scores for each sentence in the beam
beam_scores = encoder_out.new(batch_size, self.beam_size).fill_(
0
) # 定义scores向量,保存累加的log_probs
beam_scores[:, 1:] = -1e9 # 需要初始化为-inf
if prefix_tokens is not None:
beam_scores[:, 1] = 0
# current position
cur_len = 1
if prefix_tokens is not None:
cur_len = 2
# done sentences
done = [False] * batch_size # 标记每个输入句子的beam search是否完成
while cur_len < self.max_seq_len:
tgt_padding_mask = generated[:, :cur_len].eq(self.pad_idx)
scores, avg_attn_scores = self.model.forward_decoder(
decoder_src_tokens=generated[:, :cur_len],
encoder_cls=encoder_out,
temperature=self.temperature,
encoder_padding_mask=src_padding_mask,
) # (batch_size * beam_size, n_tgt_words)
n_tgt_words = scores.size(-1)
# - scores: (batch_size * beam_size, n_tgt_words)
# if self.args.N_left > 0: # and self.eos_idx2 in generated[:, :cur_len]:
# score_tmp, _ = torch.topk(
# scores, self.args.N_left, dim=-1, largest=True, sorted=True
# )
# score_tmp = score_tmp[:, -1].unsqueeze(1)
# scores[scores < score_tmp] = -100
_scores = scores + beam_scores.view(batch_size * self.beam_size, 1)
# (batch_size, beam_size * vocab_size)
_scores = _scores.view(batch_size, self.beam_size * n_tgt_words)
next_scores, next_words = torch.topk(
_scores, 2 * self.beam_size, dim=-1, largest=True, sorted=True
)
# - next_scores, next_words: (batch_size, 2 * beam_size)
# next batch beam content
# list of (batch_size * beam_size) tuple(next hypothesis score, next word, current position in the batch)
next_batch_beam = []
# for each sentence
for sent_id in range(batch_size):
# if we are done with this sentence
done[sent_id] = done[sent_id] or generated_hyps[sent_id].is_done(
next_scores[sent_id].max().item(), cur_len
)
if done[sent_id]:
next_batch_beam.extend(
[(0, self.pad_idx, 0)] * self.beam_size
) # pad the batch
continue
# next sentence beam content
next_sent_beam = []
# next words for this sentence
for idx, value in zip(next_words[sent_id], next_scores[sent_id]):
# get beam and word IDs
beam_id = idx // n_tgt_words
word_id = idx % n_tgt_words
# end of sentence, or next word
effective_beam_id = sent_id * self.beam_size + beam_id
if word_id == self.eos_idx or cur_len + 1 == self.max_seq_len:
generated_hyps[sent_id].add(
generated[effective_beam_id, :cur_len].clone(), value.item()
)
else:
next_sent_beam.append((value, word_id, effective_beam_id))
# the beam for next step is full
if len(next_sent_beam) == self.beam_size:
break
# update next beam content
if len(next_sent_beam) == 0:
next_sent_beam = [
(0, self.pad_idx, 0)
] * self.beam_size # pad the batch
next_batch_beam.extend(next_sent_beam)
# prepare next batch
beam_scores = beam_scores.new([x[0] for x in next_batch_beam])
beam_words = generated.new([x[1] for x in next_batch_beam])
beam_idx = src_tokens.new([x[2] for x in next_batch_beam])
# re-order batch and internal states
generated = generated[beam_idx, :]
generated[:, cur_len] = beam_words
# update current length
cur_len = cur_len + 1
if all(done):
break
# select the best hypotheses
tgt_len = src_tokens.new(
batch_size * self.output_num_return_sequences_per_batch
)
best = []
for i, hypotheses in enumerate(generated_hyps):
sorted_hyps = sorted(hypotheses.hyp, key=lambda x: x[0])
for j in range(self.output_num_return_sequences_per_batch):
effective_batch_idx = self.output_num_return_sequences_per_batch * i + j
best_hyp = sorted_hyps.pop()[1]
tgt_len[effective_batch_idx] = len(best_hyp)
best.append(best_hyp)
# generate target batch
gen_seqs = src_tokens.new(
batch_size * self.output_num_return_sequences_per_batch,
tgt_len.max().item(),
).fill_(self.pad_idx)
for i, hypo in enumerate(best):
gen_seqs[i, : tgt_len[i]] = hypo
tgt_lengths = torch.sum(gen_seqs.ne(self.pad_idx), dim=1)
return gen_seqs, tgt_lengths
| 11,220 | Python | .py | 259 | 31.69112 | 121 | 0.562512 | dptech-corp/NAG2G | 8 | 4 | 2 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,211 | sample_generator.py | dptech-corp_NAG2G/NAG2G/search_strategies/sample_generator.py | import math
from typing import Dict, List, Optional
import sys
import torch
import torch.nn as nn
from torch import Tensor
# from . import move_to_cuda, strip_pad
import logging
import math
import sys
from typing import Dict, List, Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
# from dpaie import search, utils
# from fairseq.data import data_utils
# from fairseq.models import FairseqIncrementalDecoder
# from dpaie.ngram_repeat_block import NGramRepeatBlock
logger = logging.getLogger(__name__)
"""
Translate algorithms, whitch supprot a batch input.
algorithms:
- greedy search (when args.beam_size <= 0)
- beam search (when args.beam_size > 0. Support to adjust
these parameters: beam_size and length_penalty)
inputs:
- src_tokens: (batch_size, src_len)
outputs:
- gen_seqs: (batch_size, max_seq_len/tgt_len.max()) (related to the stop rules)
"""
"""
Referenced from facebookresearch/XLM,
at https://github.com/facebookresearch/XLM/blob/master/xlm/model/transformer.py
"""
class BeamHypotheses(object):
def __init__(self, n_hyp, length_penalty):
"""
Initialize n-best list of hypotheses.
"""
self.length_penalty = length_penalty
self.n_hyp = n_hyp
self.hyp = []
self.worst_score = 1e9
def __len__(self):
"""
Number of hypotheses in the list.
"""
return len(self.hyp)
def add(self, hyp, sum_logprobs):
"""
Add a new hypothesis to the list.
"""
lp = len(hyp) ** self.length_penalty # deafult length penalty
# lp = (5 + len(hyp)) ** self.length_penalty / (5 + 1) ** self.length_penalty # Google GNMT's length penalty
score = sum_logprobs / lp
# score = sum_logprobs
if len(self) < self.n_hyp or score > self.worst_score:
self.hyp.append((score, hyp))
if len(self) > self.n_hyp:
sorted_scores = sorted(
[(s, idx) for idx, (s, _) in enumerate(self.hyp)]
)
del self.hyp[sorted_scores[0][1]] # delete the worst hyp in beam
self.worst_score = sorted_scores[1][
0
] # update worst score with the sencond worst hyp
else:
self.worst_score = min(score, self.worst_score)
def is_done(self, best_sum_logprobs, cur_len):
"""
If there are enough hypotheses and that none of the hypotheses being generated
can become better than the worst one in the heap, then we are done with this sentence.
"""
if len(self) < self.n_hyp:
return False
else:
return self.worst_score >= best_sum_logprobs
# / cur_len ** self.length_penalty
# return self.worst_score >= best_sum_logprobs / cur_len ** self.length_penalty
class SampleGenerator(nn.Module):
def __init__(
self,
model,
dict,
output_num_return_sequences_per_batch=10,
max_seq_len=180,
beam_size=5,
temperature=1.0,
match_source_len=False,
):
super().__init__()
self.model = model
self.dict = dict
self.output_num_return_sequences_per_batch = beam_size
self.max_seq_len = max_seq_len
self.beam_size = beam_size
self.temperature = temperature
self.match_source_len = match_source_len
self.pad_idx = self.dict.pad()
self.bos_idx = self.dict.bos()
self.eos_idx = self.dict.eos()
def _generate(self, sample):
self.model.eval()
src_tokens = sample["net_input"]["reverse_src_dataset"] # B x T
reverse_tgt_tokens = sample["net_input"]["src_tokens"] # B x T
reaction_type = sample["net_input"]["reaction_type"]
src_lengths = torch.sum(src_tokens.ne(self.pad_idx), dim=1)
batch_size = src_tokens.size(0)
src_padding_mask = src_tokens.eq(self.pad_idx)
encoder_out, padding_mask = self.model.forward_encoder(
src_tokens, reaction_type
)
# expand to beam size the source latent representations
# encoder_out = encoder_out.repeat_interleave(self.beam_size, dim=0) # (B x beam_size) x T x C
# reverse_tgt_tokens = reverse_tgt_tokens.repeat_interleave(self.beam_size, dim=0) # (B x beam_size) x T x C
# src_padding_mask = src_padding_mask.repeat_interleave(self.beam_size, dim=0) # (B x beam_size) x T x C
# generated sentences (batch with beam current hypotheses)
generated = src_tokens.new(batch_size, self.max_seq_len).fill_(
self.pad_idx
) # upcoming output
generated[:, 0].fill_(self.bos_idx)
# generated hypotheses
generated_hyps = [
BeamHypotheses(self.beam_size * 2, 1.0) for _ in range(batch_size)
] # 为每个输入句子定义维护其beam search序列的类实例
# scores for each sentence in the beam
# beam_scores = encoder_out.new(batch_size).fill_(0).to(src_tokens.device) # 定义scores向量,保存累加的log_probs
# beam_scores[:, 1:] = -1e9 # 需要初始化为-inf
all_scores, avg_attn_scores = self.model.forward_decoder(
reverse_tgt_tokens,
encoder_out,
self.temperature,
padding_mask=src_padding_mask,
) # (batch_size * beam_size, n_tgt_words)
# current position
cur_len = 1
# done sentences
n_tgt_words = all_scores.size(-1)
pre_len = all_scores.size(1)
# - scores: (batch_size * beam_size, n_tgt_words)
done = [False] * batch_size # 标记每个输入句子的beam search是否完成
sample_num = 0
batch_size, seq_len, num_clas = all_scores.shape
all_scores = torch.exp(all_scores)
all_scores = all_scores.view(-1, num_clas)
all_scores_ind = torch.multinomial(all_scores, self.beam_size, replacement=True)
all_scores_ind = all_scores_ind.view(batch_size, seq_len, self.beam_size)
all_scores = all_scores.view(batch_size, seq_len, num_clas)
### store the score
all_probs = torch.take(all_scores, all_scores_ind)
### clean the eos_id
gen_seqs = all_scores_ind
gen_scores = torch.log(all_probs).sum(1)
gen_scores = gen_scores.reshape(-1)
gen_lengths = all_probs[0, :, 0]
gen_seqs = gen_seqs.transpose(1, 2)
gen_seqs = gen_seqs.reshape(-1, seq_len)
# print('test gen_seqs0: ', gen_seqs.shape, all_probs.shape)
return gen_seqs, gen_lengths, gen_scores
# while cur_len < self.max_seq_len and cur_len < pre_len:
# scores = all_scores[:,cur_len,:]
# _scores = scores + beam_scores.view(batch_size * self.beam_size, 1) # 累加上以前的scores
# _scores = _scores.view(batch_size, self.beam_size * n_tgt_words) # (batch_size, beam_size * vocab_size)
# next_scores, next_words = torch.topk(_scores, 2 * self.beam_size, dim=-1, largest=True, sorted=True)
# # - next_scores, next_words: (batch_size, 2 * beam_size)
# # next batch beam content
# next_batch_beam = [] # list of (batch_size * beam_size) tuple(next hypothesis score, next word, current position in the batch)
# # for each sentence
# for sent_id in range(batch_size):
# # if we are done with this sentence
# done[sent_id] = done[sent_id] or generated_hyps[sent_id].is_done(next_scores[sent_id].max().item(), cur_len)
# if done[sent_id]:
# next_batch_beam.extend([(0, self.pad_idx, 0)] * self.beam_size) # pad the batch
# continue
# # next sentence beam content
# next_sent_beam = []
# # next words for this sentence
# for idx, value in zip(next_words[sent_id], next_scores[sent_id]):
# # get beam and word IDs
# beam_id = idx // n_tgt_words
# word_id = idx % n_tgt_words
# # end of sentence, or next word
# effective_beam_id = sent_id * self.beam_size + beam_id
# if word_id == self.eos_idx or cur_len + 1 == self.max_seq_len:
# generated_hyps[sent_id].add(generated[effective_beam_id, :cur_len].clone(), value.item())
# else:
# next_sent_beam.append((value, word_id, effective_beam_id))
# # the beam for next step is full
# if len(next_sent_beam) == self.beam_size:
# break
# # update next beam content
# if len(next_sent_beam) == 0:
# next_sent_beam = [(0, self.pad_idx, 0)] * self.beam_size # pad the batch
# next_batch_beam.extend(next_sent_beam)
# # prepare next batch
# beam_scores = beam_scores.new([x[0] for x in next_batch_beam])
# beam_words = generated.new([x[1] for x in next_batch_beam])
# beam_idx = src_tokens.new([x[2] for x in next_batch_beam])
# # re-order batch and internal states
# generated = generated[beam_idx, :]
# generated[:, cur_len] = beam_words
# # update current length
# cur_len = cur_len + 1
# if all(done):
# break
# # select the best hypotheses
# tgt_len = src_tokens.new(batch_size*self.output_num_return_sequences_per_batch)
# best = []
# best_score = []
# for i, hypotheses in enumerate(generated_hyps):
# sorted_hyps = sorted(hypotheses.hyp, key=lambda x: x[0])
# for j in range(self.output_num_return_sequences_per_batch):
# try:
# effective_batch_idx = self.output_num_return_sequences_per_batch * i + j
# best_cand = sorted_hyps.pop()
# best_hyp = best_cand[1]
# score = best_cand[0]
# tgt_len[effective_batch_idx] = len(best_hyp)
# best.append(best_hyp)
# best_score.append(score)
# except:
# tgt_len[effective_batch_idx] = 0
# best.append(torch.tensor([]))
# best_score.append(-1000)
# # generate target batch
# gen_seqs = src_tokens.new(batch_size*self.output_num_return_sequences_per_batch, tgt_len.max().item()).fill_(self.pad_idx)
# gen_scores = [-1e5] * len(gen_seqs)
# for i, hypo in enumerate(best):
# gen_seqs[i, :tgt_len[i]] = hypo
# gen_scores[i] = best_score[i]
# tgt_lengths = torch.sum(gen_seqs.ne(self.pad_idx), dim=1)
# return gen_seqs, tgt_lengths, gen_scores
| 11,063 | Python | .py | 233 | 38.446352 | 141 | 0.576966 | dptech-corp/NAG2G | 8 | 4 | 2 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,212 | search.py | dptech-corp_NAG2G/NAG2G/search_strategies/search.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import List, Optional
import torch
import torch.nn as nn
from .token_generation_constraints import (
ConstraintState,
OrderedConstraintState,
UnorderedConstraintState,
)
from torch import Tensor
class Search(nn.Module):
def __init__(self, tgt_dict):
super().__init__()
self.pad = tgt_dict.pad()
self.unk = tgt_dict.unk()
self.eos = tgt_dict.eos()
self.vocab_size = len(tgt_dict)
self.src_lengths = torch.tensor(-1)
self.supports_constraints = False
self.stop_on_max_len = False
def step(
self, step, lprobs, scores, prev_output_tokens=None, original_batch_idxs=None
):
"""Take a single search step.
Args:
step: the current search step, starting at 0
lprobs: (bsz x input_beam_size x vocab_size)
the model's log-probabilities over the vocabulary at the current step
scores: (bsz x input_beam_size x step)
the historical model scores of each hypothesis up to this point
prev_output_tokens: (bsz x step)
the previously generated oputput tokens
original_batch_idxs: (bsz)
the tensor with the batch indices, in the range [0, bsz)
this is useful in case there has been applied a re-ordering
and we need to know the orignal indices
Return: A tuple of (scores, indices, beams) where:
scores: (bsz x output_beam_size)
the scores of the chosen elements; output_beam_size can be
larger than input_beam_size, e.g., we may return
2*input_beam_size to account for EOS
indices: (bsz x output_beam_size)
the indices of the chosen elements
beams: (bsz x output_beam_size)
the hypothesis ids of the chosen elements, in the range [0, input_beam_size)
"""
raise NotImplementedError
@torch.jit.export
def set_src_lengths(self, src_lengths):
self.src_lengths = src_lengths
@torch.jit.export
def init_constraints(self, batch_constraints: Optional[Tensor], beam_size: int):
"""Initialize constraint states for constrained decoding (if supported).
Args:
batch_constraints: (torch.Tensor, optional)
the list of constraints, in packed form
beam_size: (int)
the beam size
Returns:
*encoder_out* rearranged according to *new_order*
"""
pass
def prune_sentences(self, batch_idxs: Tensor):
"""
Removes constraint states for completed sentences (if supported).
This is called from sequence_generator._generate() when sentences are
deleted from the batch.
Args:
batch_idxs: Indices of *sentences* whose constraint state should be *kept*.
"""
pass
def update_constraints(self, active_hypos: Tensor):
"""
Updates the constraint states by selecting the beam items that are retained.
This is called at each time step of sequence_generator._generate() when
the set of 2 * {beam_size} candidate hypotheses are reduced to the beam size.
Args:
active_hypos: (batch size, beam size)
list of integers denoting, for each sentence, which beam candidate items
should be kept.
"""
pass
class BeamSearch(Search):
def __init__(self, tgt_dict):
super().__init__(tgt_dict)
self.constraint_states = None
@torch.jit.export
def step(
self,
step: int,
lprobs,
scores: Optional[Tensor],
prev_output_tokens: Optional[Tensor] = None,
original_batch_idxs: Optional[Tensor] = None,
):
bsz, beam_size, vocab_size = lprobs.size()
if step == 0:
# at the first step all hypotheses are equally likely, so use
# only the first beam
lprobs = lprobs[:, ::beam_size, :].contiguous()
else:
# make probs contain cumulative scores for each hypothesis
assert scores is not None
lprobs = lprobs + scores[:, :, step - 1].unsqueeze(-1)
top_prediction = torch.topk(
lprobs.view(bsz, -1),
k=min(
# Take the best 2 x beam_size predictions. We'll choose the first
# beam_size of these which don't predict eos to continue with.
beam_size * 2,
lprobs.view(bsz, -1).size(1) - 1, # -1 so we never select pad
),
)
scores_buf = top_prediction[0]
indices_buf = top_prediction[1]
# Project back into relative indices and beams
beams_buf = torch.div(indices_buf, vocab_size, rounding_mode="trunc")
indices_buf = indices_buf.fmod(vocab_size)
# At this point, beams_buf and indices_buf are single-dim and contain relative indices
return scores_buf, indices_buf, beams_buf
class PrefixConstrainedBeamSearch(Search):
def __init__(self, tgt_dict, prefix_allowed_tokens_fn):
super().__init__(tgt_dict)
self.prefix_allowed_tokens_fn = prefix_allowed_tokens_fn
self.stop_on_max_len = True
@torch.jit.export
def apply_mask(self, x, prev_output_tokens, original_batch_idxs):
beam_size = x.shape[0] // original_batch_idxs.shape[0]
original_batch_idxs = (
original_batch_idxs.unsqueeze(-1).repeat((1, beam_size)).flatten().tolist()
)
mask = torch.full_like(x, -math.inf)
for sent_i, (sent, batch_i) in enumerate(
zip(prev_output_tokens, original_batch_idxs)
):
mask[sent_i, :, self.prefix_allowed_tokens_fn(batch_i, sent)] = 0
return mask
@torch.jit.export
def step(
self,
step: int,
lprobs: Tensor,
scores: Tensor,
prev_output_tokens: Tensor,
original_batch_idxs: Tensor,
):
bsz, beam_size, vocab_size = lprobs.size()
lprobs += self.apply_mask(
lprobs.view(bsz * beam_size, 1, vocab_size),
prev_output_tokens,
original_batch_idxs,
).view(bsz, beam_size, vocab_size)
if step == 0:
# at the first step all hypotheses are equally likely, so use
# only the first beam
lprobs = lprobs[:, ::beam_size, :].contiguous()
else:
# make probs contain cumulative scores for each hypothesis
assert scores is not None
lprobs = lprobs + scores[:, :, step - 1].unsqueeze(-1)
top_prediction = torch.topk(
lprobs.view(bsz, -1),
k=min(
# Take the best beam_size predictions. We'll choose the first
# beam_size of these which don't predict eos to continue with.
beam_size,
lprobs.view(bsz, -1).size(1) - 1, # -1 so we never select pad
),
)
scores_buf = top_prediction[0]
indices_buf = top_prediction[1]
beams_buf = indices_buf // vocab_size
indices_buf = indices_buf.fmod(vocab_size)
return scores_buf, indices_buf, beams_buf
class LexicallyConstrainedBeamSearch(Search):
"""Implements lexically constrained beam search as described in
Fast Lexically Constrained Decoding with Dynamic Beam
Allocation for Neural Machine Translation. Post & Vilar,
NAACL 2018. https://www.aclweb.org/anthology/N18-1119/
and
Improved Lexically Constrained Decoding for Translation and
Monolingual Rewriting. Hu et al, NAACL
2019. https://www.aclweb.org/anthology/N19-1090/
This is accomplished by maintaining, for each beam hypothesis, a
ConstraintState object (see constraints.py) that tracks which
constraints have been generated and using this information to
shape the beam for each input sentence.
"""
def __init__(self, tgt_dict, representation):
super().__init__(tgt_dict)
self.representation = representation
self.vocab_size = len(tgt_dict)
self.num_cands = 0
self.supports_constraints = True
@torch.jit.export
def init_constraints(self, batch_constraints: Optional[Tensor], beam_size: int):
self.constraint_states = []
for constraint_tensor in batch_constraints:
if self.representation == "ordered":
constraint_state = OrderedConstraintState.create(constraint_tensor)
elif self.representation == "unordered":
constraint_state = UnorderedConstraintState.create(constraint_tensor)
self.constraint_states.append([constraint_state for i in range(beam_size)])
@torch.jit.export
def prune_sentences(self, batch_idxs: Tensor):
self.constraint_states = [
self.constraint_states[i] for i in batch_idxs.tolist()
]
@torch.jit.export
def update_constraints(self, active_hypos: Tensor):
if self.constraint_states:
batch_size = active_hypos.size(0)
for sentid in range(batch_size):
self.constraint_states[sentid] = [
self.constraint_states[sentid][i] for i in active_hypos[sentid]
]
@torch.jit.export
def step(
self,
step: int,
lprobs: Tensor,
scores: Optional[Tensor],
prev_output_tokens: Optional[Tensor] = None,
original_batch_idxs: Optional[Tensor] = None,
):
"""
A constrained step builds a large candidates list from the following:
- the top 2 * {beam_size} items over the whole beam
- for each item in the beam
- the top {each_k} (default 1)
- all next constraints
We then compute the constrained state of each beam item, and assign
stripe codes: 0 to the best in each bank, 1 to the 2nd-best, and so
on. We then sort by (stripe, score), and truncate the list at
2 * beam size.
Args:
step: the decoder step
lprobs: (batch size, beam size, target vocab)
the target-vocab distributions for each item in the beam.
Retrun: A tuple of (scores, indices, beams, constraints) where:
scores: (batch, output beam size)
the scores of the chosen elements
indices: (batch, output beam size)
the target vocab indices of the chosen elements
beams: (batch, output beam size)
the 0-indexed hypothesis ids of the chosen elements
constraints: (batch, output beam size)
the new constraint states
"""
each_k = 1
device = lprobs.device
batch_size, beam_size, vocab_size = lprobs.size()
self.num_cands = min(
# Just take the k-best. We'll get another k from the 1-best from each
# row, plus more from the constraints
beam_size * 2,
lprobs.view(batch_size, -1).size(1) - 1, # -1 so we never select pad
)
# STEP 0: Preliminary. Prevent EOS for unfinished hyps across all batch items
constraint_states = self.constraint_states
if constraint_states and step > 0:
not_finished_indices = []
for sentno, sent_constraints in enumerate(constraint_states):
for beamno, state in enumerate(sent_constraints):
index = sentno * beam_size + beamno
if not state.finished:
not_finished_indices.append(index)
not_finished_indices = torch.tensor(not_finished_indices)
if not_finished_indices.numel() > 0:
lprobs.view(batch_size * beam_size, -1)[
not_finished_indices, self.eos
] = -math.inf
if step == 0:
# at the first step all hypotheses are equally likely, so use
# only the first beam entry for each batch item
lprobs = lprobs[:, ::beam_size, :].contiguous()
else:
# make probs contain cumulative scores for each hypothesis
assert scores is not None
lprobs = lprobs + scores[:, :, step - 1].unsqueeze(-1)
top_prediction = torch.topk(
lprobs.view(batch_size, -1),
self.num_cands,
)
scores_buf, indices_buf = top_prediction
# Project back into relative indices and beams
beams_buf = indices_buf // vocab_size
indices_buf = indices_buf.fmod(vocab_size)
# Short circuit if there are no constraints in this batch
if not constraint_states:
return scores_buf, indices_buf, beams_buf
# STEP 1: get top-1 from each hypothesis across all sentences in the batch
if step > 0:
top_scores, top_indices = torch.topk(
lprobs.view(batch_size * beam_size, -1),
k=each_k,
dim=1,
)
top_scores = top_scores.view(batch_size, -1)
top_indices = top_indices.view(batch_size, -1)
scores_buf = torch.cat((scores_buf, top_scores), dim=1)
indices_buf = torch.cat((indices_buf, top_indices), dim=1)
new_beams = torch.arange(0, beam_size, device=device).repeat(batch_size, 1)
beams_buf = torch.cat((beams_buf, new_beams), dim=1)
# Now, process sentences in the batch one by one.
new_scores_buf = torch.zeros((batch_size, 2 * beam_size), device=device)
new_indices_buf = torch.zeros((batch_size, 2 * beam_size), device=device).long()
new_beams_buf = torch.zeros((batch_size, 2 * beam_size), device=device).long()
for sentno, states in enumerate(constraint_states):
scores, indices, beams, new_states = self.step_sentence(
step,
sentno,
lprobs[sentno],
constraint_states[sentno],
beams_buf[sentno].clone(),
indices_buf[sentno].clone(),
scores_buf[sentno].clone(),
)
new_scores_buf[sentno] = scores
new_indices_buf[sentno] = indices
new_beams_buf[sentno] = beams
self.constraint_states[sentno] = new_states
return new_scores_buf, new_indices_buf, new_beams_buf
@torch.jit.export
def step_sentence(
self,
step: int,
sentno: int,
lprobs: Tensor,
constraint_states: List[List[ConstraintState]],
beams_buf: Tensor,
indices_buf: Tensor,
scores_buf: Tensor,
):
"""Does per-sentence processing. Adds all constraints for each
hypothesis to the list of candidates; then removes duplicates,
sorts, and dynamically stripes across the banks. All tensor inputs
are collapsed to those pertaining to a single input sentence.
"""
device = lprobs.device
# STEP 2: Add all constraints for each beam item
for beamno, state in enumerate(constraint_states):
next_tokens = torch.tensor(list(state.next_tokens()), device=device).long()
if next_tokens.numel() != 0:
indices_buf = torch.cat((indices_buf, next_tokens))
next_beams = (
torch.tensor(beamno, device=device)
.repeat(next_tokens.size(0))
.long()
)
beams_buf = torch.cat((beams_buf, next_beams))
next_values = lprobs[beamno].take(next_tokens.view(-1))
scores_buf = torch.cat((scores_buf, next_values))
# At the 0th time step, there is just one beam item
if step == 0:
break
# STEP 3: Compute the "bank" for each candidate. This is the
# number of constraints it's generated. We need this so that
# we can do round-robin allocation of the beam across these
# banks. If C is the number of constraints, we select the best
# item in bank C, then the best in bank C-1, etc, followed by
# the 2nd-best in bank C, the 2nd-best in bank C-1, etc, and so
# on, until the maximum beam size. We accomplish this by
# creating a sort key and striping across the banks.
# Compute the new states for all candidates
cands_size = indices_buf.size(0)
constraint_states = [
constraint_states[beams_buf[i]].advance(indices_buf[i])
for i in range(cands_size)
]
banks = torch.tensor([state.bank for state in constraint_states], device=device)
# STEP 4: Sort
num_constraint_tokens = len(state.tokens)
# Sort by keys (bank, score) (i.e., sort banks together, and scores
# within banks). AFAIK pytorch doesn't support either stable sort or
# multi-key sorting, so we have to hack this.
MAX_SCORE = -100
sort_key = (num_constraint_tokens - banks) * MAX_SCORE + scores_buf
sort_values, sort_indices = sort_key.sort(dim=0, descending=True)
scores_buf = scores_buf[sort_indices]
indices_buf = indices_buf[sort_indices]
beams_buf = beams_buf[sort_indices]
banks = banks[sort_indices]
# Sort the constraints to follow suit
constraint_states = [constraint_states[i] for i in sort_indices]
# STEP 5: Remove duplicates. The topk calls (overall and
# per-row) plus the per-row generation of constraints will
# produce duplicates. Here we remove them.
def roll(t):
"""Rolls a 1d tensor left by 1.
[0, 1, 2, 3, 4] becomes [4, 0, 1, 2, 3]
"""
return torch.cat((t[-1].unsqueeze(0), t[0:-1]), dim=0)
# We map candidates (beam, token_id) to a single dimension.
# This is then shifted by 1. We can then easily identify
# duplicates and create a mask that identifies unique
# extensions.
uniques_mask = beams_buf * (self.vocab_size + 1) + indices_buf
uniques_mask = roll(uniques_mask) != uniques_mask
# Use the mask to pare down the data structures
scores_buf = torch.masked_select(scores_buf, uniques_mask)
indices_buf = torch.masked_select(indices_buf, uniques_mask)
beams_buf = torch.masked_select(beams_buf, uniques_mask)
banks = torch.masked_select(banks, uniques_mask)
i = 1
for mask in uniques_mask[1:]:
if not mask:
constraint_states.pop(i)
i += mask
# STEP 6: Assign IDs round-robin across banks, sort, and
# truncate. Now that the candidates are sorted by (bank,
# score) and uniqed, we dynamically allocate the {beam_size}
# beam by striping across the candidates. These stripes will
# be used as sort keys to do round-robin selection. This is
# accomplished in a single pass with offsets. Sorting by
# highest-banks (furthest-along hypotheses) first ensures
# progress through the constraints.
#
# e.g., BANKS: 3 3 3 2 2 2 2 1 1 1 0 0
# OLD STRIPES: 0 1 2 0 1 2 3 0 1 2 0 1
# NEW STRIPES: 0 1+4 2+8 0+1 1+5 2+9 3+11 0+2 1+6 2+10 0+3 1+7
# = 0 5 10 1 6 11 13 2 7 12 3 8
#
# Sorting by this then gives the following banks:
#
# 3 2 1 0 3 2 1 0 3 2 1 2
#
# We'll take the top {beam_size} of these.
stripe_offsets = [offset * (len(banks) + 1) for offset in range(len(banks) + 1)]
stripes = torch.zeros_like(banks)
cur_bank_count = -1
cur_bank = banks[0]
for i, bank in enumerate(banks):
if bank != cur_bank:
cur_bank_count = 0
cur_bank = bank
else:
cur_bank_count += 1
stripes[i] = num_constraint_tokens - bank + stripe_offsets[cur_bank_count]
# STEP 7: Sort by the stripes values
sort_values, sort_indices = stripes.sort(dim=0)
scores_buf = scores_buf[sort_indices]
indices_buf = indices_buf[sort_indices]
beams_buf = beams_buf[sort_indices]
constraint_states = [constraint_states[i] for i in sort_indices]
# STEP 8: Truncate to the candidates size!
scores_buf = scores_buf[: self.num_cands]
indices_buf = indices_buf[: self.num_cands]
beams_buf = beams_buf[: self.num_cands]
return scores_buf, indices_buf, beams_buf, constraint_states
class LengthConstrainedBeamSearch(Search):
def __init__(self, tgt_dict, min_len_a, min_len_b, max_len_a, max_len_b):
super().__init__(tgt_dict)
self.min_len_a = min_len_a
self.min_len_b = min_len_b
self.max_len_a = max_len_a
self.max_len_b = max_len_b
self.beam = BeamSearch(tgt_dict)
self.needs_src_lengths = True
def step(
self,
step: int,
lprobs,
scores,
prev_output_tokens: Optional[Tensor] = None,
original_batch_idxs: Optional[Tensor] = None,
):
min_lens = self.min_len_a * self.src_lengths + self.min_len_b
max_lens = self.max_len_a * self.src_lengths + self.max_len_b
lprobs[step < min_lens, :, self.eos] = -math.inf
lprobs[step >= max_lens, :, self.eos] = 0
return self.beam.step(step, lprobs, scores)
class DiverseBeamSearch(Search):
"""Diverse Beam Search.
See "Diverse Beam Search: Decoding Diverse Solutions from Neural Sequence
Models" for details.
We only implement the Hamming Diversity penalty here, which performed best
in the original paper.
"""
def __init__(self, tgt_dict, num_groups, diversity_strength):
super().__init__(tgt_dict)
self.num_groups = num_groups
self.diversity_strength = -diversity_strength
self.beam = BeamSearch(tgt_dict)
@torch.jit.export
def step(
self,
step: int,
lprobs,
scores,
prev_output_tokens: Optional[Tensor] = None,
original_batch_idxs: Optional[Tensor] = None,
):
bsz, beam_size, vocab_size = lprobs.size()
if beam_size % self.num_groups != 0:
raise ValueError(
"DiverseBeamSearch requires --beam to be divisible by the number of groups"
)
# initialize diversity penalty
diversity_buf = torch.zeros(lprobs[:, 0, :].size()).to(lprobs)
scores_G, indices_G, beams_G = [], [], []
for g in range(self.num_groups):
lprobs_g = lprobs[:, g :: self.num_groups, :]
scores_g = scores[:, g :: self.num_groups, :] if step > 0 else None
# apply diversity penalty
if g > 0:
lprobs_g = torch.add(
lprobs_g,
other=diversity_buf.unsqueeze(1),
alpha=self.diversity_strength,
)
else:
lprobs_g = lprobs_g.contiguous()
scores_buf, indices_buf, beams_buf = self.beam.step(
step, lprobs_g, scores_g
)
beams_buf.mul_(self.num_groups).add_(g)
scores_G.append(scores_buf.clone())
indices_G.append(indices_buf.clone())
beams_G.append(beams_buf.clone())
# update diversity penalty
diversity_buf.scatter_add_(
1, indices_buf, torch.ones(indices_buf.size()).to(diversity_buf)
)
# interleave results from different groups
scores_buf = torch.stack(scores_G, dim=2).view(bsz, -1)
indices_buf = torch.stack(indices_G, dim=2).view(bsz, -1)
beams_buf = torch.stack(beams_G, dim=2).view(bsz, -1)
return scores_buf, indices_buf, beams_buf
class Sampling(Search):
sampling_topk: int
sampling_topp: float
def __init__(self, tgt_dict, sampling_topk=-1, sampling_topp=-1.0):
super().__init__(tgt_dict)
self.sampling_topk = sampling_topk
self.sampling_topp = sampling_topp
def _sample_topp(self, lprobs):
"""Sample among the smallest set of elements whose cumulative probability mass exceeds p.
See `"The Curious Case of Neural Text Degeneration"
(Holtzman et al., 2019) <https://arxiv.org/abs/1904.09751>`_.
Args:
lprobs: (bsz x input_beam_size x vocab_size)
the model's log-probabilities over the vocabulary at the current step
Return: A tuple of (trimed_probs, truncated_indices) where:
trimed_probs: (bsz x input_beam_size x ?)
the model's probabilities over the elements selected to sample from. The
width of the third dimension is determined by top-P.
truncated_indices: (bsz x input_beam_size x ?)
the indices of the chosen elements.
"""
probs = lprobs.exp_()
# sort the last dimension (vocab dimension) in descending order
sorted_probs, sorted_indices = probs.sort(descending=True)
# compute a mask to indicate the words to be included in the top-P set.
cumsum_probs = sorted_probs.cumsum(dim=2)
mask = cumsum_probs.lt(self.sampling_topp)
# note that mask was computed by 'lt'. One more word needs to be included
# so that the cumulative probability mass can exceed p.
cumsum_mask = mask.cumsum(dim=2)
last_included = cumsum_mask[:, :, -1:]
last_included.clamp_(0, mask.size()[2] - 1)
mask = mask.scatter_(2, last_included, 1)
# truncate unnecessary dims.
max_dim = last_included.max()
truncated_mask = mask[:, :, : max_dim + 1]
truncated_probs = sorted_probs[:, :, : max_dim + 1]
truncated_indices = sorted_indices[:, :, : max_dim + 1]
# trim the words that are not in top-P by setting their probabilities
# to 0, so that they would not be sampled later.
trim_mask = ~truncated_mask
trimed_probs = truncated_probs.masked_fill_(trim_mask, 0)
return trimed_probs, truncated_indices
@torch.jit.export
def step(
self,
step: int,
lprobs,
scores,
prev_output_tokens: Optional[Tensor] = None,
original_batch_idxs: Optional[Tensor] = None,
):
bsz, beam_size, vocab_size = lprobs.size()
if step == 0:
# at the first step all hypotheses are equally likely, so use
# only the first beam
lprobs = lprobs[:, ::beam_size, :].contiguous()
if self.sampling_topp > 0:
# only sample from the smallest set of words whose cumulative probability mass exceeds p
probs, top_indices = self._sample_topp(lprobs)
elif self.sampling_topk > 0:
# only sample from top-k candidates
lprobs, top_indices = lprobs.topk(self.sampling_topk)
probs = lprobs.exp_()
else:
probs = lprobs.exp_()
# dummy data to be consistent with true branch for type check
top_indices = torch.empty(0).to(probs)
# sample
if step == 0:
indices_buf = torch.multinomial(
probs.view(bsz, -1),
beam_size,
replacement=True,
).view(bsz, beam_size)
else:
indices_buf = torch.multinomial(
probs.view(bsz * beam_size, -1),
1,
replacement=True,
).view(bsz, beam_size)
if step == 0:
# expand to beam size
probs = probs.expand(bsz, beam_size, -1)
# gather scores
scores_buf = torch.gather(probs, dim=2, index=indices_buf.unsqueeze(-1))
scores_buf = scores_buf.log_().view(bsz, -1)
# remap indices if using top-k or top-P sampling
if self.sampling_topk > 0 or self.sampling_topp > 0:
indices_buf = torch.gather(
top_indices.expand(bsz, beam_size, -1),
dim=2,
index=indices_buf.unsqueeze(-1),
).squeeze(2)
if step == 0:
beams_buf = indices_buf.new_zeros(bsz, beam_size)
else:
beams_buf = torch.arange(0, beam_size).to(indices_buf).repeat(bsz, 1)
# make scores cumulative
scores_buf.add_(
torch.gather(scores[:, :, step - 1], dim=1, index=beams_buf)
)
return scores_buf, indices_buf, beams_buf
class DiverseSiblingsSearch(Search):
"""
Beam search with diverse siblings.
See "A Simple, Fast Diverse Decoding Algorithm for Neural Generation" for details.
https://arxiv.org/abs/1611.08562
1/ Calculate hypotheses for each beam
2/ Intra-sibling ordering
3/ Rewrite scores
4/ Choose top K hypotheses
if diversity_rate == 0 is equivalent to BeamSearch
"""
def __init__(self, tgt_dict, diversity_rate):
super().__init__(tgt_dict)
self.diversity_rate = diversity_rate
self.beam = BeamSearch(tgt_dict)
def step(
self,
step: int,
lprobs,
scores,
prev_output_tokens: Optional[Tensor] = None,
original_batch_idxs: Optional[Tensor] = None,
):
bsz, beam_size, vocab_size = lprobs.size()
k = min(
# Take the best 2 x beam_size predictions. We'll choose the first
# beam_size of these which don't predict eos to continue with.
beam_size * 2,
lprobs.view(bsz, -1).size(1) - 1, # -1 so we never select pad
)
s_list: List[Tensor]
i_list: List[Tensor]
s_list = [torch.empty(0).to(lprobs) for i in range(beam_size)]
i_list = [torch.LongTensor().to(device=lprobs.device) for i in range(beam_size)]
sibling_score = torch.arange(1, k + 1).to(lprobs) * self.diversity_rate
if step == 0:
return self.beam.step(step, lprobs, scores)
lprobs.add_(scores[:, :, step - 1].unsqueeze(-1))
# 1/ Calculate hypotheses for each beam
for i in range(beam_size):
torch.topk(lprobs[:, i, :].view(bsz, -1), k, out=(s_list[i], i_list[i]))
i_list[i].fmod_(vocab_size)
# 2/ Intra-sibling ordering by default from topk + 3/ Rewrite scores
s_list[i].sub_(sibling_score)
# 4/ Choose top K hypotheses
indices = torch.stack(i_list, dim=1).view(bsz, -1)
final_scores = torch.empty(0).to(lprobs)
final_indices = torch.LongTensor().to(device=lprobs.device)
final_beams = torch.LongTensor().to(device=lprobs.device)
(final_scores, final_indices) = torch.topk(
torch.stack(s_list, dim=1).view(bsz, -1),
k,
)
final_beams = final_indices // k
for i in range(bsz):
final_indices[i] = indices[i][final_indices[i]]
return final_scores, final_indices, final_beams
| 31,343 | Python | .py | 693 | 34.793651 | 100 | 0.598533 | dptech-corp/NAG2G | 8 | 4 | 2 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,213 | greedy_generator.py | dptech-corp_NAG2G/NAG2G/search_strategies/greedy_generator.py | import math
from typing import Dict, List, Optional
import sys
import torch
import torch.nn as nn
from torch import Tensor
# from . import move_to_cuda, strip_pad
import logging
import math
import sys
from typing import Dict, List, Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
# from dpaie import search, utils
# from fairseq.data import data_utils
# from fairseq.models import FairseqIncrementalDecoder
# from dpaie.ngram_repeat_block import NGramRepeatBlock
logger = logging.getLogger(__name__)
'''
Translate algorithms, whitch supprot a batch input.
algorithms:
- greedy search (when args.beam_size <= 0)
- beam search (when args.beam_size > 0. Support to adjust
these parameters: beam_size and length_penalty)
inputs:
- src_tokens: (batch_size, src_len)
outputs:
- gen_seqs: (batch_size, max_seq_len/tgt_len.max()) (related to the stop rules)
'''
"""
Referenced from facebookresearch/XLM,
at https://github.com/facebookresearch/XLM/blob/master/xlm/model/transformer.py
"""
class BeamHypotheses(object):
def __init__(self, n_hyp, length_penalty):
"""
Initialize n-best list of hypotheses.
"""
self.length_penalty = length_penalty
self.n_hyp = n_hyp
self.hyp = []
self.worst_score = 1e9
def __len__(self):
"""
Number of hypotheses in the list.
"""
return len(self.hyp)
def add(self, hyp, sum_logprobs):
"""
Add a new hypothesis to the list.
"""
lp = len(hyp) ** self.length_penalty # deafult length penalty
# lp = (5 + len(hyp)) ** self.length_penalty / (5 + 1) ** self.length_penalty # Google GNMT's length penalty
score = sum_logprobs / lp
# score = sum_logprobs
if len(self) < self.n_hyp or score > self.worst_score:
self.hyp.append((score, hyp))
if len(self) > self.n_hyp:
sorted_scores = sorted([(s, idx) for idx, (s, _) in enumerate(self.hyp)])
del self.hyp[sorted_scores[0][1]] # delete the worst hyp in beam
self.worst_score = sorted_scores[1][0] # update worst score with the sencond worst hyp
else:
self.worst_score = min(score, self.worst_score)
def is_done(self, best_sum_logprobs, cur_len):
"""
If there are enough hypotheses and that none of the hypotheses being generated
can become better than the worst one in the heap, then we are done with this sentence.
"""
if len(self) < self.n_hyp:
return False
else:
return self.worst_score >= best_sum_logprobs
# / cur_len ** self.length_penalty
# return self.worst_score >= best_sum_logprobs / cur_len ** self.length_penalty
class GreedyGenerator(nn.Module):
def __init__(
self,
model,
dict,
output_num_return_sequences_per_batch = 10,
max_seq_len=180,
beam_size=5,
temperature=1.0,
match_source_len=False,
):
super().__init__()
self.model = model
self.dict = dict
self.output_num_return_sequences_per_batch = beam_size
self.max_seq_len = max_seq_len
self.beam_size = beam_size
self.temperature = temperature
self.match_source_len = match_source_len
self.pad_idx = self.dict.pad()
self.bos_idx = self.dict.bos()
self.eos_idx = self.dict.eos()
def _generate(self, sample):
self.model.eval()
src_tokens = sample['net_input']['reverse_src_dataset'] # B x T
reverse_tgt_tokens = sample['net_input']['src_tokens'] # B x T
reaction_type = sample['net_input']['reaction_type']
src_lengths = torch.sum(src_tokens.ne(self.pad_idx), dim=1)
batch_size = src_tokens.size(0)
src_padding_mask = src_tokens.eq(self.pad_idx)
encoder_out, padding_mask = self.model.forward_encoder(src_tokens, reaction_type)
# expand to beam size the source latent representations
encoder_out = encoder_out.repeat_interleave(self.beam_size, dim=0) # (B x beam_size) x T x C
reverse_tgt_tokens = reverse_tgt_tokens.repeat_interleave(self.beam_size, dim=0) # (B x beam_size) x T x C
src_padding_mask = src_padding_mask.repeat_interleave(self.beam_size, dim=0) # (B x beam_size) x T x C
# generated sentences (batch with beam current hypotheses)
generated = src_tokens.new(batch_size * self.beam_size, self.max_seq_len).fill_(self.pad_idx) # upcoming output
generated[:, 0].fill_(self.bos_idx)
# generated hypotheses
generated_hyps = [
BeamHypotheses(self.beam_size *2, 1.0)
for _ in range(batch_size)
] # 为每个输入句子定义维护其beam search序列的类实例
# scores for each sentence in the beam
beam_scores = encoder_out.new(batch_size, self.beam_size).fill_(0).to(src_tokens.device) # 定义scores向量,保存累加的log_probs
beam_scores[:, 1:] = -1e9 # 需要初始化为-inf
all_scores, avg_attn_scores = self.model.forward_decoder(reverse_tgt_tokens, encoder_out, self.temperature, padding_mask = src_padding_mask) # (batch_size * beam_size, n_tgt_words)
# current position
cur_len = 1
# done sentences
n_tgt_words = all_scores.size(-1)
pre_len = all_scores.size(1)
# - scores: (batch_size * beam_size, n_tgt_words)
done = [False] * batch_size # 标记每个输入句子的beam search是否完成
while cur_len < self.max_seq_len and cur_len < pre_len:
scores = all_scores[:,cur_len,:]
_scores = scores + beam_scores.view(batch_size * self.beam_size, 1) # 累加上以前的scores
_scores = _scores.view(batch_size, self.beam_size * n_tgt_words) # (batch_size, beam_size * vocab_size)
next_scores, next_words = torch.topk(_scores, 2 * self.beam_size, dim=-1, largest=True, sorted=True)
# - next_scores, next_words: (batch_size, 2 * beam_size)
# next batch beam content
next_batch_beam = [] # list of (batch_size * beam_size) tuple(next hypothesis score, next word, current position in the batch)
# for each sentence
for sent_id in range(batch_size):
# if we are done with this sentence
done[sent_id] = done[sent_id] or generated_hyps[sent_id].is_done(next_scores[sent_id].max().item(), cur_len)
if done[sent_id]:
next_batch_beam.extend([(0, self.pad_idx, 0)] * self.beam_size) # pad the batch
continue
# next sentence beam content
next_sent_beam = []
# next words for this sentence
for idx, value in zip(next_words[sent_id], next_scores[sent_id]):
# get beam and word IDs
beam_id = idx // n_tgt_words
word_id = idx % n_tgt_words
# end of sentence, or next word
effective_beam_id = sent_id * self.beam_size + beam_id
if word_id == self.eos_idx or cur_len + 1 == self.max_seq_len:
generated_hyps[sent_id].add(generated[effective_beam_id, :cur_len].clone(), value.item())
else:
next_sent_beam.append((value, word_id, effective_beam_id))
# the beam for next step is full
if len(next_sent_beam) == self.beam_size:
break
# update next beam content
if len(next_sent_beam) == 0:
next_sent_beam = [(0, self.pad_idx, 0)] * self.beam_size # pad the batch
next_batch_beam.extend(next_sent_beam)
# prepare next batch
beam_scores = beam_scores.new([x[0] for x in next_batch_beam])
beam_words = generated.new([x[1] for x in next_batch_beam])
beam_idx = src_tokens.new([x[2] for x in next_batch_beam])
# re-order batch and internal states
generated = generated[beam_idx, :]
generated[:, cur_len] = beam_words
# update current length
cur_len = cur_len + 1
if all(done):
break
# select the best hypotheses
tgt_len = src_tokens.new(batch_size*self.output_num_return_sequences_per_batch)
best = []
best_score = []
for i, hypotheses in enumerate(generated_hyps):
sorted_hyps = sorted(hypotheses.hyp, key=lambda x: x[0])
for j in range(self.output_num_return_sequences_per_batch):
try:
effective_batch_idx = self.output_num_return_sequences_per_batch * i + j
best_cand = sorted_hyps.pop()
best_hyp = best_cand[1]
score = best_cand[0]
tgt_len[effective_batch_idx] = len(best_hyp)
best.append(best_hyp)
best_score.append(score)
except:
tgt_len[effective_batch_idx] = 0
best.append(torch.tensor([]))
best_score.append(-1000)
# generate target batch
gen_seqs = src_tokens.new(batch_size*self.output_num_return_sequences_per_batch, tgt_len.max().item()).fill_(self.pad_idx)
gen_scores = [-1e5] * len(gen_seqs)
for i, hypo in enumerate(best):
gen_seqs[i, :tgt_len[i]] = hypo
gen_scores[i] = best_score[i]
tgt_lengths = torch.sum(gen_seqs.ne(self.pad_idx), dim=1)
return gen_seqs, tgt_lengths, gen_scores | 9,969 | Python | .py | 203 | 37.334975 | 190 | 0.586533 | dptech-corp/NAG2G | 8 | 4 | 2 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,214 | token_generation_constraints.py | dptech-corp_NAG2G/NAG2G/search_strategies/token_generation_constraints.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Implements tracking of constraints for a beam item.
A list of constraints is given as a list of one or more token
sequences, each of length at least one token. For example, for an input sentence
> Die maschinelle Übersetzung ist schwer zu kontrollieren.
We could have the constraints:
* to influence
* hard
There are two implementations:
* OrderedConstraintState: Tracks progress through an ordered list of multitoken constraints.
* UnorderedConstraintState: Tracks progress through an unordered list of multitoken constraints.
The difference is that in the first, the constraints are assumed to be
in order; the algorithm will permit zero or more tokens between them.
In the second, the constraints are not ordered, so many orderings will
be explored.
The same sequence can be present any number of times, and will appear
that many times in the output.
"""
from collections import Counter
from typing import List, Optional, Set, Tuple
import torch
class ConstraintState:
def __init__(self):
pass
def pack_constraints(batch_constraints: List[List[torch.Tensor]]) -> torch.Tensor:
"""Takes a list of list of constraints in tensor form (a list of
tensor constraints for each sentence) and transforms it into a
packed Tensor. For example, here is a batch of size 3 with 3, 0,
and 1 constraints:
[ [ [3 1 2], [3], [4 5 6 7], ]
[],
[ [1 8 9 10 1 4 11 12], ]
]
Its corresponding packed structure is:
[ [ 3 3 1 2 0 3 0 4 5 6 7 0],
[ 0 0 0 0 0 0 0 0 0 0 0 0],
[ 1 1 8 9 10 1 4 11 12 0 0 0] ]
The packed tensor has shape (batch size, maxlen), where
maxlen is defined below. Each row contains concatenated
constraint tokens for that sentence, with 0 appended after
each constraint. The first item in each row is the number
of constraints for that sentence. So maxlen is the maximum
of
(number of constraints) + (sum length of constraints) + 1.
across all sentences in the batch.
"""
# The maximum word length of concatenated constraints for any sentence
max_constraints_len = 1
for sentence_constraints in batch_constraints:
if len(sentence_constraints):
# number of constraints, plus sum of constrain lens, plus a zero after each
constraints_len = (
1
+ sum([c.size(0) for c in sentence_constraints])
+ len(sentence_constraints)
)
max_constraints_len = max(max_constraints_len, constraints_len)
batch_size = len(batch_constraints)
constraints_tensor = torch.zeros((batch_size, max_constraints_len)).long()
for i, sentence_constraints in enumerate(batch_constraints):
constraints_tensor[i, 0] = len(sentence_constraints)
offset = 1
for j, constraint in enumerate(sentence_constraints):
this_len = constraint.size(0)
constraints_tensor[i, offset : offset + this_len] = constraint
offset += this_len + 1
return constraints_tensor.long()
def unpack_constraints(constraint_tensor: torch.Tensor) -> List[torch.Tensor]:
"""
Transforms *one row* of a packed constraint tensor (e.g., for one
sentence in the batch) into a list of constraint tensors.
"""
constraint_list = []
num_constraints = constraint_tensor[0]
constraints = constraint_tensor.tolist()
offset = 1
for i in range(num_constraints):
where = constraints.index(0, offset)
constraint_list.append(constraint_tensor[offset:where])
offset = where + 1
return constraint_list
class ConstraintNode:
"""
Represents a node in a trie managing unordered constraints.
"""
def __init__(self, token: int = None, parent=None):
# The token associate with this node (None for the root)
self.token = int(token) if token is not None else None
# The parent (None at the root)
self.parent = parent
# Whether this node is a completed constraint
self.terminal = 0
# List of child nodes
self.children = {}
# The cumulative number of constraints from this point in the
# trie forward
self.num_constraints = 0
@property
def id(self):
return self.token
def __str__(self):
term = self.terminal != 0
return f"[{self.token}].{term}#{self.num_constraints}"
def __getitem__(self, key: int):
return self.children.get(key, None)
def next_tokens(self) -> Set[int]:
"""The set of child labels."""
return set(self.children.keys())
@staticmethod
def create(constraints: List[List[int]]):
root = ConstraintNode()
for sequence in constraints:
root.add_sequence(sequence)
return root
@staticmethod
def print_graph(node: "ConstraintNode"):
if len(node.children) == 0:
return str(node)
else:
s = f"({node}"
for child in node.children.values():
s += " " + ConstraintNode.print_graph(child)
s += ")"
return s
def token_counts(self) -> Counter:
"""Returns a counter of the number of times each token is used
in a constraint.
"""
token_counts = Counter()
kids = list(self.children.values())
while len(kids) > 0:
kid = kids.pop()
token_counts[kid.id] += kid.num_constraints
kids += list(kid.children.values())
return token_counts
def tokens(self) -> Set[int]:
"""Returns the set of tokens in constraints."""
return set(self.token_counts().keys())
def add_sequence(self, sequence: List[int]):
"""Adds a constraint, represented as a list of integers, to
the trie."""
assert len(sequence) > 0
token = int(sequence[0])
if token not in self.children:
self.children[token] = ConstraintNode(token, parent=self)
node = self.children[token]
if len(sequence) == 1:
node.terminal += 1
node.num_constraints += 1
parent = node.parent
while parent is not None:
parent.num_constraints += 1
parent = parent.parent
else:
node.add_sequence(sequence[1:])
class UnorderedConstraintState(ConstraintState):
"""
Records progress through the set of constraints for each item in the beam
using a trie.
"""
def __init__(self, node: ConstraintNode, copy_from: "ConstraintState" = None):
self.node = node
if copy_from is None:
# The root node
self.root = node
# The set of states in the graph that have been completed
self.completed = Counter()
# The...
self.generated = Counter()
# The list of tokens we need to generate
self.needed_tokens = self.root.tokens()
else:
self.completed = Counter(copy_from.completed)
self.generated = Counter(copy_from.generated)
self.root = copy_from.root
# Mark the node as generated
if self.node != self.root:
self.generated[node] += 1
@staticmethod
def create(constraint_tensor: torch.Tensor):
constraint_list = unpack_constraints(constraint_tensor)
constraint_trie_root = ConstraintNode.create(constraint_list)
return UnorderedConstraintState(constraint_trie_root)
def __str__(self):
gen_str = ",".join([str(node) for node in self.generated])
return f"{self.name}/{self.bank}({gen_str})x{self.num_completed}"
def __copy__(self):
copied_state = UnorderedConstraintState(self.node, copy_from=self)
return copied_state
def copy(self):
return self.__copy__()
@property
def name(self):
if self.node.id is None:
return "ROOT"
else:
return str(self.node.id)
@property
def is_root(self):
return self.node == self.root
@property
def bank(self):
return sum(self.generated.values())
@property
def num_completed(self):
"""The number of constraints (not constraint tokens) that are completed.
In addition to the already-completed states, we need to account for the
current state, which might get marked as completed when another token
is generated.
"""
in_final = self.node.terminal and self.completed[self.node] < self.node.terminal
return sum(self.completed.values()) + in_final
@property
def finished(self):
return self.root.num_constraints - self.num_completed == 0
@property
def token_counts(self):
return self.root.token_counts()
@property
def tokens(self):
return self.root.tokens()
@property
def num_constraint_tokens(self):
return sum(self.token_counts.values())
def next_tokens(self) -> Set[int]:
"""Returns the list of tokens that could come next.
These are (a) all tokens extending the root state and, for
non-root states, additionally all tokens extending the current
state."""
if self.node != self.root:
return self.root.next_tokens().union(self.node.next_tokens())
else:
return self.root.next_tokens()
def advance(self, token: int):
"""Reads in a token and advances the state. Here's how it works.
We can advance to the next state if:
- there is a matching child
- its path isn't blocked
A path is blocked when all constraints that are descendants of
that node have already been generated, in the current state.
If we are not able to advance from the current state, we "fall
off the graph" and return to the root state. There, we again
try to advance, checking the same criteria.
In any case, when falling off the graph, we need to do some
bookkeeping. We:
- check whether any constraints were met (all prefixes of
current state)
- if one is found, mark it as completed
- adjust visited nodes accordingly
"""
token = int(token)
next_state = None
child = self.node[token]
if child is not None and self.generated[child] < child.num_constraints:
next_state = UnorderedConstraintState(child, copy_from=self)
def rewind():
"""If we're mid-trie and an "illegal" token is chosen next, we need
to reset our state to the root state. However, along the way, we need
to check whether a prefix of the current trie state represents a state
we could mark as completed.
"""
node = self.node
while node != self.root:
if node.terminal and self.completed[node] < node.terminal:
next_state.completed[node] += 1
return
next_state.generated[node] -= 1
node = node.parent
# Fall off the graph, check the root
if next_state is None and token in self.root.next_tokens():
child = self.root[token]
# We can only traverse this edge if it's not saturated
if self.generated[child] < child.num_constraints:
next_state = UnorderedConstraintState(child, copy_from=self)
else:
next_state = UnorderedConstraintState(self.root, copy_from=self)
# Rewind
rewind()
elif next_state is None:
next_state = UnorderedConstraintState(self.root, copy_from=self)
# Rewind
rewind()
return next_state
class ConstraintSequence:
def __init__(self, sequences: List[List[int]]):
"""Represents a set of possibly multitoken constraints by
concatenating them and internally recording the end points.
"""
self.sequences = []
self.endpoints = []
self.num_tokens = 0
self.tokens = set()
for sequence in sequences:
for token in sequence:
self.tokens.add(token)
self.num_tokens += len(sequence)
self.endpoints += [False for x in range(len(sequence) - 1)] + [True]
self.sequences += sequence
def __getitem__(self, key: int):
return self.sequences[key]
def __len__(self):
return len(self.sequences)
def __str__(self):
return str(self.sequences)
class OrderedConstraintState(ConstraintState):
"""
Records progress through the set of linear nonbranching constraints with gaps.
"""
def __init__(self, sequence: ConstraintSequence, state: int = -1):
self.sequence = sequence
self.state = state
@staticmethod
def create(constraint_tensor: torch.Tensor):
constraint_list = unpack_constraints(constraint_tensor)
return OrderedConstraintState(ConstraintSequence(constraint_list), -1)
def __str__(self):
return f"{self.state}/{self.bank}x{self.num_completed}"
def __copy__(self):
return OrderedConstraintState(self.sequence, self.state)
def copy(self):
return self.__copy__()
@property
def num_completed(self):
if self.state == -1:
return 0
count = len(
list(filter(lambda x: x, self.sequence.endpoints[0 : self.state + 1]))
)
return count
@property
def is_root(self):
return self.state == -1
@property
def name(self):
if self.state == -1:
return "ROOT"
else:
return str(self.sequence[self.state])
@property
def bank(self) -> int:
return self.state + 1
@property
def finished(self):
return self.state + 1 == len(self.sequence)
@property
def token_counts(self):
return self.sequence.token_counts()
@property
def tokens(self):
return self.sequence.tokens
@property
def num_constraint_tokens(self):
return sum(self.token_counts.values())
def next_tokens(self) -> Set[int]:
"""Returns the list of tokens that could come next.
These are (a) all tokens extending the root state and, for
non-root states, additionally all tokens extending the current
state."""
tokens = set()
if self.state > 0:
tokens.add(self.sequence[0])
if not self.finished:
tokens.add(self.sequence[self.state + 1])
return tokens
def advance(self, token: int):
"""Reads in a token and advances the state. Here's how it works.
We can advance to the next state if:
- there is a matching child
- its path isn't blocked
A path is blocked when all constraints that are descendants of
that node have already been generated, in the current state.
If we are not able to advance from the current state, we "fall
off the graph" and return to the root state. There, we again
try to advance, checking the same criteria.
In any case, when falling off the graph, we need to do some
bookkeeping. We:
- check whether any constraints were met (all prefixes of
current state)
- if one is found, mark it as completed
- adjust visited nodes accordingly
"""
token = int(token)
# print(f"{self} ADVANCE({token}) {self.sequence} -> ", end="")
if self.finished:
# Accept anything
next_state = self.copy()
elif self.sequence[self.state + 1] == token:
# Advance to the next token
next_state = OrderedConstraintState(self.sequence, self.state + 1)
elif self.sequence.endpoints[self.state]:
# Accept anything between constraints (*)
next_state = self.copy()
elif token == self.sequence[0]:
# Start over having generated the first token
next_state = OrderedConstraintState(self.sequence, 0)
else:
# Start over from the root
next_state = OrderedConstraintState(self.sequence, -1)
return next_state
| 16,536 | Python | .py | 402 | 32.532338 | 96 | 0.629844 | dptech-corp/NAG2G | 8 | 4 | 2 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,215 | search_utils.py | dptech-corp_NAG2G/NAG2G/search_strategies/search_utils.py | from itertools import accumulate
def collate_tokens(
values,
pad_idx,
left_pad=False,
pad_to_length=None,
pad_to_multiple=1,
):
"""Convert a list of 1d tensors into a padded 2d tensor."""
size = max(v.size(0) for v in values)
size = size if pad_to_length is None else max(size, pad_to_length)
if pad_to_multiple != 1 and size % pad_to_multiple != 0:
size = int(((size - 0.1) // pad_to_multiple + 1) * pad_to_multiple)
res = values[0].new(len(values), size).fill_(pad_idx)
def copy_tensor(src, dst):
assert dst.numel() == src.numel()
dst.copy_(src)
for i, v in enumerate(values):
copy_tensor(v, res[i][size - len(v) :] if left_pad else res[i][: len(v)])
return res
def strip_pad(tensor, pad):
return tensor[tensor.ne(pad)]
def get_token_to_word_mapping(tokens, exclude_list):
n = len(tokens)
word_start = [int(token not in exclude_list) for token in tokens]
word_idx = list(accumulate(word_start))
token_to_word = {i: word_idx[i] for i in range(n)}
return token_to_word
def extract_hard_alignment(attn, src_sent, tgt_sent, pad, eos):
tgt_valid = (
((tgt_sent != pad) & (tgt_sent != eos)).nonzero(as_tuple=False).squeeze(dim=-1)
)
src_invalid = (
((src_sent == pad) | (src_sent == eos)).nonzero(as_tuple=False).squeeze(dim=-1)
)
src_token_to_word = get_token_to_word_mapping(src_sent, [eos, pad])
tgt_token_to_word = get_token_to_word_mapping(tgt_sent, [eos, pad])
alignment = []
if len(tgt_valid) != 0 and len(src_invalid) < len(src_sent):
attn_valid = attn[tgt_valid]
attn_valid[:, src_invalid] = float("-inf")
_, src_indices = attn_valid.max(dim=1)
for tgt_idx, src_idx in zip(tgt_valid, src_indices):
alignment.append(
(
src_token_to_word[src_idx.item()] - 1,
tgt_token_to_word[tgt_idx.item()] - 1,
)
)
return alignment
def extract_soft_alignment(attn, src_sent, tgt_sent, pad, eos):
tgt_valid = ((tgt_sent != pad)).nonzero(as_tuple=False)
src_valid = ((src_sent != pad)).nonzero(as_tuple=False).squeeze(dim=-1)
alignment = []
if len(tgt_valid) != 0 and len(src_valid) != 0:
attn_valid = attn[tgt_valid, src_valid]
alignment = [
["{:.6f}".format(p) for p in src_probs.tolist()] for src_probs in attn_valid
]
return alignment
| 2,483 | Python | .py | 60 | 34.45 | 88 | 0.601825 | dptech-corp/NAG2G | 8 | 4 | 2 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,216 | __init__.py | dptech-corp_NAG2G/NAG2G/search_strategies/__init__.py | # from .beam_search_generator import SequenceGeneratorBeamSearch, EnsembleModel, EnsembleModelWithAlignment, SequenceGeneratorWithAlignment
from .search import Search, BeamSearch, PrefixConstrainedBeamSearch, LexicallyConstrainedBeamSearch, LengthConstrainedBeamSearch, DiverseBeamSearch, Sampling, DiverseSiblingsSearch
from .greedy_generator import GreedyGenerator
from .sample_generator import SampleGenerator
from .token_generation_constraints import ConstraintState, OrderedConstraintState, UnorderedConstraintState
from .simple_sequence_generator import SimpleGenerator
from .parse import add_search_strategies_args | 621 | Python | .py | 7 | 87.857143 | 180 | 0.894309 | dptech-corp/NAG2G | 8 | 4 | 2 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,217 | ngram_repeat_block.py | dptech-corp_NAG2G/NAG2G/search_strategies/ngram_repeat_block.py | # Originally from Microsoft Corporation.
# Licensed under the MIT License.
""" Wrapper for ngram_repeat_block cuda extension """
import math
import warnings
from typing import List
import torch
from torch import nn
try:
from fairseq import ngram_repeat_block_cuda
EXTENSION_BUILT = True
except ImportError:
EXTENSION_BUILT = False
def is_cuda_extension_usable() -> bool:
"""Check whether ngram_repeat_block_cuda is built properly"""
if not EXTENSION_BUILT or not torch.cuda.is_available():
return False
bsz = 2
tokens = torch.tensor([[4, 4, 3, 2], [1, 2, 3, 4]], dtype=torch.long, device="cuda")
lprobs = torch.rand((8, 12), device="cuda")
try:
outputs = ngram_repeat_block_cuda.forward(tokens, lprobs, bsz, 3, 4, 3)
outputs = outputs + 4 # This line breaks if the extension is built incorrectly.
return True
except RuntimeError:
warnings.warn(
"NGramRepeatBlock extension must be rebuilt."
'Run TORCH_CUDA_ARCH_LIST="6.0;6.1;7.0" python setup.py build_ext --inplace'
)
return False
class NGramRepeatBlock(nn.Module):
"""Wrapper class for calling ngram_repeat_block cuda extension"""
def __init__(self, no_repeat_ngram_size: int, use_extension: bool = True):
super().__init__()
self.use_extension = is_cuda_extension_usable() if use_extension else False
self.no_repeat_ngram_size = no_repeat_ngram_size
def reset_parameters(self):
pass
@torch.jit.unused
def call_cuda_extension(
self,
tokens,
lprobs,
bsz: int,
beam_size: int,
step: int,
):
return ngram_repeat_block_cuda.forward(
tokens, lprobs, bsz, step, beam_size, self.no_repeat_ngram_size
)
def forward(
self,
tokens,
lprobs,
bsz: int,
beam_size: int,
step: int,
):
"""
Args:
tokens(Tensor): Input tokens(Bsz*beam, seq_len)
lprobs(Tensor): likelihood probability,
Expected to be updated in place.(Bsz*beam, vocab_size)
bsz(int): batch size
step(int): current step
beam_size(int): beam size
no_repeat_ngram_size(int): Ngram size
"""
msg = f"expected {bsz *beam_size} got"
assert tokens.size(0) == bsz * beam_size, f"{msg} {tokens.size(0)}"
assert lprobs.size(0) == bsz * beam_size, f"{msg} {lprobs.size(0)}"
if self.use_extension:
return self.call_cuda_extension(tokens, lprobs, bsz, beam_size, step)
else:
return self._no_repeat_ngram(
tokens,
lprobs,
bsz,
beam_size,
step,
)
def _no_repeat_ngram(self, tokens, lprobs, bsz: int, beam_size: int, step: int):
"""For each hypothesis generate a list of previous ngrams and set associated lprobs to -inf"""
banned_tokens = [
torch.jit.annotate(List[int], []) for bbsz_idx in range(bsz * beam_size)
]
if step + 2 - self.no_repeat_ngram_size >= 0:
cpu_tokens: List[List[int]] = tokens.cpu().tolist()
check_start_pos = step + 2 - self.no_repeat_ngram_size
for bbsz_idx in range(bsz * beam_size):
ngram_to_check = cpu_tokens[bbsz_idx][
-(self.no_repeat_ngram_size - 1) :
]
for i in range(check_start_pos):
if (
ngram_to_check
== cpu_tokens[bbsz_idx][i : i + self.no_repeat_ngram_size - 1]
):
banned_tokens[bbsz_idx].append(
cpu_tokens[bbsz_idx][i + self.no_repeat_ngram_size - 1]
)
for bbsz_idx in range(bsz * beam_size):
lprobs[bbsz_idx][
torch.tensor(banned_tokens[bbsz_idx], dtype=torch.int64)
] = torch.tensor(-math.inf).to(lprobs)
return lprobs | 4,105 | Python | .py | 106 | 28.698113 | 102 | 0.571249 | dptech-corp/NAG2G | 8 | 4 | 2 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,218 | parse.py | dptech-corp_NAG2G/NAG2G/search_strategies/parse.py | def add_search_strategies_args(parser, train=False, gen=False):
group = parser.add_argument_group("beam search")
group.add_argument(
"--beam-size", default=10, type=int, metavar="N", help="beam size for inference"
)
group.add_argument(
"--search_strategies",
type=str,
default="SequenceGeneratorBeamSearch",
help="beam size for inference",
)
group.add_argument(
"--len-penalty",
default=1.0,
type=float,
metavar="N",
help="Length penalty in beam search for inference",
)
group.add_argument(
"--temperature",
default=1.0,
type=float,
metavar="N",
help="Temperature in beam search for inference",
)
# for two stage
group.add_argument(
"--beam-size-second", default=5, type=int, metavar="N", help="beam size for second stage inference"
)
group.add_argument(
"--beam-head-second", default=3, type=int, metavar="N", help="beam head for second stage inference"
)
return group
| 1,067 | Python | .py | 33 | 25.363636 | 107 | 0.618587 | dptech-corp/NAG2G | 8 | 4 | 2 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,219 | unimol_encoder.py | dptech-corp_NAG2G/NAG2G/models/unimol_encoder.py | import torch
try:
from unimol.models import UniMolModel
class CustomizedUniMolModel(UniMolModel):
def forward(
self,
src_tokens,
src_distance,
src_coord,
src_edge_type,
encoder_masked_tokens=None,
features_only=False,
classification_head_name=None,
**kwargs
):
if classification_head_name is not None:
features_only = True
padding_mask = src_tokens.eq(self.padding_idx)
if not padding_mask.any():
padding_mask = None
x = self.embed_tokens(src_tokens)
def get_dist_features(dist, et):
n_node = dist.size(-1)
gbf_feature = self.gbf(dist, et)
gbf_result = self.gbf_proj(gbf_feature)
graph_attn_bias = gbf_result
graph_attn_bias = graph_attn_bias.permute(0, 3, 1, 2).contiguous()
graph_attn_bias = graph_attn_bias.view(-1, n_node, n_node)
return graph_attn_bias
graph_attn_bias = get_dist_features(src_distance, src_edge_type)
(
encoder_rep,
encoder_pair_rep,
delta_encoder_pair_rep,
x_norm,
delta_encoder_pair_rep_norm,
) = self.encoder(x, padding_mask=padding_mask, attn_mask=graph_attn_bias)
encoder_pair_rep[encoder_pair_rep == float("-inf")] = 0
encoder_distance = None
encoder_coord = None
logits = encoder_rep
if not features_only:
if self.args.masked_token_loss > 0:
logits = self.lm_head(encoder_rep, encoder_masked_tokens)
if self.args.masked_coord_loss > 0:
coords_emb = src_coord
if padding_mask is not None:
atom_num = (
torch.sum(1 - padding_mask.type_as(x), dim=1) - 1
).view(-1, 1, 1, 1)
else:
atom_num = src_coord.shape[1] - 1
delta_pos = coords_emb.unsqueeze(1) - coords_emb.unsqueeze(2)
attn_probs = self.pair2coord_proj(delta_encoder_pair_rep)
coord_update = delta_pos / atom_num * attn_probs
coord_update = torch.sum(coord_update, dim=2)
encoder_coord = coords_emb + coord_update
if self.args.masked_dist_loss > 0:
encoder_distance = self.dist_head(encoder_pair_rep)
if classification_head_name is not None:
logits = self.classification_heads[classification_head_name](
encoder_rep
)
return (
logits,
encoder_distance,
encoder_coord,
x_norm,
delta_encoder_pair_rep_norm,
)
except:
print("Cannot import unimol")
| 3,072 | Python | .py | 72 | 27.166667 | 85 | 0.506024 | dptech-corp/NAG2G | 8 | 4 | 2 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,220 | NAG2G.py | dptech-corp_NAG2G/NAG2G/models/NAG2G.py | import logging
import os
import json
import torch
import torch.nn as nn
import torch.nn.functional as F
from unicore import utils
from unicore.models import BaseUnicoreModel, register_model, register_model_architecture
from typing import Callable, Optional, Dict, Tuple, Any, NamedTuple, List
import math
from NAG2G.modules import (
MaskLMHead,
ClassificationHead,
)
from unicore.modules import init_bert_params, TransformerEncoder, TransformerDecoder
from NAG2G.decoder import TransformerDecoder as NewTransformerDecoder
logger = logging.getLogger(__name__)
torch._C._jit_set_profiling_mode(False)
torch._C._jit_set_profiling_executor(False)
torch._C._jit_override_can_fuse_on_cpu(True)
torch._C._jit_override_can_fuse_on_gpu(True)
class NAG2GFBaseModel(BaseUnicoreModel):
@staticmethod
def default_encoder_add_args(parser):
"""Add model-specific arguments to the parser."""
parser.add_argument(
"--encoder-layers", type=int, metavar="L", help="num encoder layers"
)
parser.add_argument(
"--encoder-embed-dim",
type=int,
metavar="H",
help="encoder embedding dimension",
)
parser.add_argument(
"--encoder-ffn-embed-dim",
type=int,
metavar="F",
help="encoder embedding dimension for FFN",
)
parser.add_argument(
"--encoder-attention-heads",
type=int,
metavar="A",
help="num encoder attention heads",
)
parser.add_argument(
"--activation-fn",
choices=utils.get_available_activation_fns(),
help="activation function to use",
)
parser.add_argument(
"--pooler-activation-fn",
choices=utils.get_available_activation_fns(),
help="activation function to use for pooler layer",
)
parser.add_argument(
"--dropout", type=float, metavar="D", help="dropout probability"
)
parser.add_argument(
"--attention-dropout",
type=float,
metavar="D",
help="dropout probability for attention weights",
)
@staticmethod
def default_decoder_add_args(parser):
"""Add model-specific arguments to the parser."""
parser.add_argument(
"--decoder-layers", type=int, metavar="L", help="num decoder layers"
)
parser.add_argument(
"--decoder-embed-dim",
type=int,
metavar="H",
help="decoder embedding dimension",
)
parser.add_argument(
"--decoder-ffn-embed-dim",
type=int,
metavar="H",
help="decoder ffn embedding dimension",
)
parser.add_argument(
"--decoder-attention-heads",
type=int,
metavar="A",
help="num decoder attention heads",
)
parser.add_argument(
"--emb-dropout",
type=float,
metavar="D",
help="dropout probability for embeddings",
)
parser.add_argument(
"--activation-dropout",
type=float,
metavar="D",
help="dropout probability after activation in FFN",
)
parser.add_argument(
"--position-type",
default="normal",
choices=["sinusoidal", "relative", "normal"],
help="noise type in coordinate noise",
)
# parser.add_argument(
# "--transformer-type",
# default="normal",
# choices=["simple", "normal"],
# help="noise type in coordinate noise",
# )
parser.add_argument(
"--pooler-dropout",
type=float,
metavar="D",
help="dropout probability in the masked_lm pooler layers",
)
parser.add_argument(
"--max-seq-len", type=int, help="number of positional embeddings to learn"
)
parser.add_argument(
"--post-ln", type=bool, help="use post layernorm or pre layernorm"
)
parser.add_argument(
"--contrastive-global-negative",
action="store_true",
help="use contrastive learning or not",
)
parser.add_argument(
"--auto-regressive",
action="store_true",
help="use auto regressive generative or not",
)
parser.add_argument(
"--class-embedding", action="store_true", help="use class embedding or not"
)
parser.add_argument(
"--use-decoder", action="store_true", help="use decoder or not"
)
parser.add_argument(
"--smoothl1-beta",
default=1.0,
type=float,
help="beta in pair distance smoothl1 loss",
)
parser.add_argument(
"--rel_pos",
action="store_true",
help="rel_pos",
)
parser.add_argument(
"--flag_old",
action="store_true",
help="flag_old",
)
parser.add_argument(
"--decoder_type",
default="default",
choices=[
"default",
"new",
],
help="model chosen as decoder",
)
parser.add_argument(
"--reduced_head_dim", type=int, default=4, help="reduced_head_dim"
)
parser.add_argument(
"--q_reduced_before",
action="store_true",
help="q_reduced_before",
)
parser.add_argument(
"--want_emb_k_dynamic_proj",
action="store_true",
help="want_emb_k_dynamic_proj",
)
parser.add_argument(
"--want_emb_k_dynamic_dropout",
action="store_true",
help="want_emb_k_dynamic_dropout",
)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
NAG2GFBaseModel.default_encoder_add_args(parser)
NAG2GFBaseModel.default_decoder_add_args(parser)
def __init__(self, args, dictionary, **kwargs):
super().__init__()
flag_use_base_architecture = (
kwargs.pop("flag_use_base_architecture")
if "flag_use_base_architecture" in kwargs
else True
)
if flag_use_base_architecture:
base_architecture(args)
self.init(args, dictionary, **kwargs)
def init(self, args, dictionary, **kwargs):
self.args = args
self.padding_idx = dictionary.pad()
if self.args.bpe_tokenizer_path == "none":
len_dict = len(dictionary)
else:
with open(os.path.join(self.args.bpe_tokenizer_path, "vocab.json"), "r") as f:
len_dict = len(list(json.load(f).keys()))
self.embed_tokens = nn.Embedding(
len_dict, args.encoder_embed_dim, self.padding_idx
)
self.encoder = self.get_encoder(kwargs)
self.lm_head = MaskLMHead(
embed_dim=args.encoder_embed_dim,
output_dim=len_dict,
activation_fn=args.activation_fn,
weight=None,
)
self.auto_regressive = args.auto_regressive
self.use_decoder = args.use_decoder
# self.embed_positions = self.get_position_embedding('test', args.max_seq_len, args.encoder_embed_dim)
self.embed_positions = self.get_position_embedding(
args.position_type, args.max_seq_len, args.encoder_embed_dim
)
# self.embed_positions = nn.Embedding(args.max_seq_len, args.encoder_embed_dim)
self.use_class_embedding = args.class_embedding
if self.use_class_embedding:
self.class_embedding = nn.Embedding(100, args.encoder_embed_dim)
if args.auto_regressive:
self.use_decoder = True
if self.use_decoder:
# self.decoder_embed_positions = self.get_position_embedding('test', args.max_seq_len, args.decoder_embed_dim)
# self.decoder_embed_positions = nn.Embedding(args.max_seq_len, args.decoder_embed_dim)
self.decoder_embed_positions = self.get_position_embedding(
args.position_type, args.max_seq_len, args.decoder_embed_dim
)
self.decoder_embed_tokens = self.embed_tokens # FFFFFF
# self.decoder_embed_tokens = nn.Embedding(len(dictionary), args.decoder_embed_dim, self.padding_idx)
self.decoder = self.get_decoder()
self.decoder_lm_head = MaskLMHead(
embed_dim=args.decoder_embed_dim,
output_dim=len_dict,
activation_fn=args.activation_fn,
weight=None,
)
# self.decoder_lm_head = nn.Linear(args.decoder_embed_dim, len(dictionary))
self.classification_heads = nn.ModuleDict()
self.apply(init_bert_params)
print("flag_old", self.args.flag_old)
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
if hasattr(task, "encoder_dictionary"):
return cls(
args, task.dictionary, encoder_dictionary=task.encoder_dictionary
)
return cls(args, task.dictionary)
def get_default_encoder(self):
encoder = TransformerEncoder(
encoder_layers=self.args.encoder_layers,
embed_dim=self.args.encoder_embed_dim,
ffn_embed_dim=self.args.encoder_ffn_embed_dim,
attention_heads=self.args.encoder_attention_heads,
emb_dropout=self.args.emb_dropout,
dropout=self.args.dropout,
attention_dropout=self.args.attention_dropout,
activation_dropout=self.args.activation_dropout,
max_seq_len=self.args.max_seq_len,
activation_fn=self.args.activation_fn,
)
return encoder
def get_encoder(self, kwargs):
encoder = self.get_default_encoder()
return encoder
def get_decoder(self):
if self.args.decoder_type == "default":
decoder = TransformerDecoder(
decoder_layers=self.args.decoder_layers,
embed_dim=self.args.decoder_embed_dim,
ffn_embed_dim=self.args.decoder_ffn_embed_dim,
attention_heads=self.args.decoder_attention_heads,
emb_dropout=self.args.emb_dropout,
dropout=self.args.dropout,
attention_dropout=self.args.attention_dropout,
activation_dropout=self.args.activation_dropout,
max_seq_len=self.args.max_seq_len,
activation_fn=self.args.activation_fn,
auto_regressive=self.args.auto_regressive,
post_ln=self.args.post_ln,
rel_pos=self.args.rel_pos,
)
elif self.args.decoder_type == "new":
decoder = NewTransformerDecoder(
decoder_layers=self.args.decoder_layers,
embed_dim=self.args.decoder_embed_dim,
ffn_embed_dim=self.args.decoder_ffn_embed_dim,
attention_heads=self.args.decoder_attention_heads,
emb_dropout=self.args.emb_dropout,
dropout=self.args.dropout,
attention_dropout=self.args.attention_dropout,
activation_dropout=self.args.activation_dropout,
max_seq_len=self.args.max_seq_len,
activation_fn=self.args.activation_fn,
auto_regressive=self.args.auto_regressive,
post_ln=self.args.post_ln,
rel_pos=self.args.rel_pos,
reduced_head_dim = self.args.reduced_head_dim,
q_reduced_before = self.args.q_reduced_before,
want_emb_k_dynamic_proj = self.args.want_emb_k_dynamic_proj,
want_emb_k_dynamic_dropout = self.args.want_emb_k_dynamic_dropout,
)
else:
raise
return decoder
def get_position_embedding(self, position_type, max_seq_len, embed_dim):
if position_type == "sinusoidal":
pe = torch.zeros(max_seq_len, embed_dim)
position = torch.arange(0, max_seq_len).unsqueeze(1)
div_term = torch.exp(
(
torch.arange(0, embed_dim, 2, dtype=torch.float)
* -(math.log(10000.0) / embed_dim)
)
)
pe[:, 0::2] = torch.sin(position.float() * div_term)
pe[:, 1::2] = torch.cos(position.float() * div_term)
pe1 = nn.Embedding(max_seq_len, embed_dim)
pe1.weight = nn.Parameter(pe, requires_grad=False)
return pe1
elif position_type == "relative":
# relative_pe = nn.Embedding(max_seq_len * 2 + 2, embed_dim)
pe = torch.zeros(max_seq_len, embed_dim // 2)
position = torch.arange(0, max_seq_len).unsqueeze(1)
div_term = torch.exp(
(
torch.arange(0, (embed_dim // 2), 2, dtype=torch.float)
* -(math.log(10000.0) / (embed_dim // 2))
)
)
pe[:, 0::2] = torch.sin(position.float() * div_term)
pe[:, 1::2] = torch.cos(position.float() * div_term)
pe1 = nn.Embedding(max_seq_len, embed_dim // 2)
pe1.weight = nn.Parameter(pe, requires_grad=False)
relative = nn.Embedding(max_seq_len, embed_dim // 2)
relative_pe = torch.cat((relative, pe1), -1)
return relative_pe
else:
return nn.Embedding(max_seq_len, embed_dim)
def forward(
self,
# src_tokens,
# decoder_src_tokens,
# relation_type,
# masked_tokens=None,
features_only=False,
classification_head_name=None,
**kwargs
):
if classification_head_name is not None:
features_only = True
decoder_kwargs = {}
encoder_kwargs = {}
for k, v in kwargs.items():
if "decoder" in k:
decoder_kwargs[k] = v
else:
encoder_kwargs[k] = v
if self.args.use_class_encoder:
assert self.args.N_vnode == 2
encoder_kwargs["cls_embedding"] = self.decoder_embed_tokens(decoder_kwargs["decoder_src_tokens"][:, 1])
encoder_result = self.forward_encoder(
# src_tokens=src_tokens,
**encoder_kwargs
)
masked_tokens = encoder_result.pop("masked_tokens")
encoder_rep = encoder_result.pop("encoder_rep")
padding_mask = encoder_result.pop("padding_mask")
decoder_outprob, vae_kl_loss = self.forward_decoder(
encoder_cls=encoder_rep,
temperature=None,
encoder_padding_mask=padding_mask,
want_probs=False,
**decoder_kwargs,
)
contrast_out = None
if not features_only:
logits = self.lm_head(encoder_rep, masked_tokens)
else:
logits = encoder_rep
return logits, decoder_outprob, contrast_out, vae_kl_loss
def forward_default_encoder(self, encoder, src_tokens, **kwargs):
padding_mask = src_tokens.eq(self.padding_idx)
masked_tokens = ~padding_mask
tmp_padding_mask = padding_mask
if not padding_mask.any():
padding_mask = None
x = self.embed_tokens(src_tokens)
seq_len = src_tokens.size(1)
x = x * math.sqrt(x.shape[-1]) # FFFFFF
x += self.embed_positions.weight[:seq_len, :]
# if self.use_class_embedding:
# x[:,0,:] += self.class_embedding(relation_type)
if padding_mask is not None:
x = x * (1 - padding_mask.unsqueeze(-1).type_as(x))
encoder_rep = encoder(x, padding_mask=padding_mask, attn_mask=None)
return encoder_rep, padding_mask, tmp_padding_mask, masked_tokens
def forward_encoder(
self,
src_tokens,
# relation_type,
# masked_tokens=None,
**kwargs
):
encoder_rep, padding_mask, _, masked_tokens = self.forward_default_encoder(
self.encoder, src_tokens
)
return {
"encoder_rep": encoder_rep,
"padding_mask": padding_mask,
"masked_tokens": padding_mask,
}
def forward_decoder(
self,
decoder_src_tokens,
encoder_cls,
temperature,
encoder_padding_mask,
want_probs=True,
**kwargs
):
decoder_outprob = None
vae_kl_loss = None
if self.use_decoder:
decoder_padding_mask = decoder_src_tokens.eq(self.padding_idx)
if not decoder_padding_mask.any():
decoder_padding_mask = None
x_decoder = self.decoder_embed_tokens(decoder_src_tokens)
if self.args.flag_old:
x_decoder = x_decoder * math.sqrt(x_decoder.shape[-1]) # FFFFFF
seq_len = decoder_src_tokens.size(1)
x_decoder += self.decoder_embed_positions.weight[:seq_len, :]
if self.args.flag_old and decoder_padding_mask is not None:
x_decoder = x_decoder * (
1 - decoder_padding_mask.unsqueeze(-1).type_as(x_decoder)
)
attn_mask = (
kwargs["decoder_attn_mask"]
if "decoder_attn_mask" in kwargs.keys()
else None
)
if self.args.decoder_type == "default":
new_dict = {"attn_mask": attn_mask}
elif self.args.decoder_type == "new":
new_dict = {"emb_k_dynamic": attn_mask}
decoder_rep = self.decoder(
x_decoder,
padding_mask=decoder_padding_mask,
encoder_padding_mask=encoder_padding_mask,
encoder_out=encoder_cls,
**new_dict,
)
decoder_outprob = self.decoder_lm_head(decoder_rep)
if want_probs:
probs = self.get_normalized_probs(
decoder_outprob, temperature, log_probs=True, sample=None
)
probs = probs[:, -1, :]
return probs, None
return decoder_outprob, vae_kl_loss
def get_normalized_probs(self, net_output, temperature, log_probs, sample=None):
"""Get normalized probabilities (or log probs) from a net's output."""
logits = net_output # [0].float()
if log_probs:
return F.log_softmax(logits, dim=-1)
else:
return torch.log(F.softmax(logits / temperature, dim=-1))
def register_classification_head(
self, name, num_classes=None, inner_dim=None, **kwargs
):
"""Register a classification head."""
if name in self.classification_heads:
prev_num_classes = self.classification_heads[name].out_proj.out_features
prev_inner_dim = self.classification_heads[name].dense.out_features
if num_classes != prev_num_classes or inner_dim != prev_inner_dim:
logger.warning(
're-registering head "{}" with num_classes {} (prev: {}) '
"and inner_dim {} (prev: {})".format(
name, num_classes, prev_num_classes, inner_dim, prev_inner_dim
)
)
self.classification_heads[name] = ClassificationHead(
input_dim=self.args.encoder_embed_dim,
inner_dim=inner_dim or self.args.encoder_embed_dim,
num_classes=num_classes,
activation_fn=self.args.pooler_activation_fn,
pooler_dropout=self.args.pooler_dropout,
)
@register_model("NAG2GF")
class NAG2GFModel(NAG2GFBaseModel):
@staticmethod
def add_args(parser):
NAG2GFBaseModel.add_args(parser)
parser.add_argument(
"--encoder-type",
default="default",
choices=["default", "unimol", "default_and_unimol"],
help="model chosen as encoder",
)
def get_unimol_encoder(self, kwargs):
encoder_dictionary = (
kwargs["encoder_dictionary"]
if "encoder_dictionary" in kwargs.keys()
else None
)
assert encoder_dictionary is not None
from .unimol_encoder import CustomizedUniMolModel
encoder = CustomizedUniMolModel(self.args, encoder_dictionary)
return encoder
def get_encoder(self, kwargs):
if self.args.encoder_type == "default":
encoder = self.get_default_encoder()
elif self.args.encoder_type == "unimol":
encoder = self.get_unimol_encoder(kwargs)
elif self.args.encoder_type == "default_and_unimol":
encoder = nn.ModuleList(
[self.get_default_encoder(), self.get_unimol_encoder(kwargs)]
)
return encoder
def forward_unimol_encoder(self, encoder, src_tokens, **kwargs):
padding_mask = src_tokens.eq(self.padding_idx)
masked_tokens = ~padding_mask
if not padding_mask.any():
padding_mask = None
tmp_padding_mask = padding_mask
encoder_input_dict = {
"src_tokens": src_tokens,
"encoder_masked_tokens": masked_tokens,
"src_distance": kwargs["src_distance"],
"src_coord": kwargs["src_coord"],
"src_edge_type": kwargs["src_edge_type"],
"features_only": True,
}
encoder_rep, _, _, _, _, = encoder(
**encoder_input_dict,
)
return encoder_rep, padding_mask, tmp_padding_mask, masked_tokens
def forward_encoder(
self,
src_tokens,
# relation_type,
# masked_tokens=None,
**kwargs
):
if self.args.encoder_type == "default":
encoder_rep, padding_mask, _, masked_tokens = self.forward_default_encoder(
self.encoder, src_tokens
)
elif self.args.encoder_type == "unimol":
encoder_rep, padding_mask, _, masked_tokens = self.forward_unimol_encoder(
self.encoder, src_tokens, **kwargs
)
elif self.args.encoder_type == "default_and_unimol":
(
default_encoder_rep,
default_padding_mask,
default_padding_mask_tmp,
default_masked_tokens,
) = self.forward_default_encoder(
self.encoder[0], src_tokens=kwargs["smiles_src_tokens"]
)
(
unimol_encoder_rep,
unimol_padding_mask,
unimol_padding_mask_tmp,
unimol_masked_tokens,
) = self.forward_unimol_encoder(self.encoder[1], src_tokens, **kwargs)
encoder_rep = torch.cat([default_encoder_rep, unimol_encoder_rep], 1)
masked_tokens = torch.cat([default_masked_tokens, unimol_masked_tokens], 1)
if default_padding_mask is None and unimol_padding_mask is None:
padding_mask = None
else:
padding_mask = torch.cat(
[default_padding_mask_tmp, unimol_padding_mask_tmp], 1
)
return {
"encoder_rep": encoder_rep,
"padding_mask": padding_mask,
"masked_tokens": masked_tokens,
}
def get_src_tokens(self, sample):
src_tokens = sample["net_input"]["src_tokens"] # B x T
return src_tokens
@register_model_architecture("NAG2GF", "NAG2GF")
def base_architecture(args):
encoder_base_architecture(args)
decoder_base_architecture(args)
def encoder_base_architecture(args):
args.encoder_layers = getattr(args, "encoder_layers", 15)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 64)
args.dropout = getattr(args, "dropout", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", 0.1)
args.activation_fn = getattr(args, "activation_fn", "gelu")
args.pooler_activation_fn = getattr(args, "pooler_activation_fn", "tanh")
def decoder_base_architecture(args):
args.emb_dropout = getattr(args, "emb_dropout", 0.1)
args.activation_dropout = getattr(args, "activation_dropout", 0.0)
args.pooler_dropout = getattr(args, "pooler_dropout", 0.0)
args.max_seq_len = getattr(args, "max_seq_len", 512)
args.post_ln = getattr(args, "post_ln", False)
args.contrastive_global_negative = getattr(
args, "contrastive_global_negative", False
)
args.auto_regressive = getattr(args, "auto_regressive", False)
args.use_decoder = getattr(args, "use_decoder", False)
args.class_embedding = getattr(args, "class_embedding", False)
args.decoder_layers = getattr(args, "decoder_layers", 15)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 2048)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 64)
args.decoder_loss = getattr(args, "decoder_loss", 1)
args.rel_pos = getattr(args, "rel_pos", False)
args.flag_old = getattr(args, "flag_old", False)
args.decoder_type = getattr(args, "decoder_type", "default")
args.reduced_head_dim = getattr(args, "reduced_head_dim", 4)
args.q_reduced_before = getattr(args, "q_reduced_before", False)
args.want_emb_k_dynamic_proj = getattr(args, "want_emb_k_dynamic_proj", False)
args.want_emb_k_dynamic_dropout = getattr(args, "want_emb_k_dynamic_dropout", True)
# args.encoder_type = getattr(args, "encoder_type", "default")
@register_model_architecture("NAG2GF", "NAG2GF_base")
def NAG2G_base_architecture(args):
base_architecture(args)
@register_model_architecture("NAG2GF", "NAG2GF_unimol")
def NAG2G_unimol_architecture(args):
args.encoder_type = "unimol"
base_architecture(args)
@register_model_architecture("NAG2GF", "NAG2GF_DnU")
def NAG2G_default_and_unimol_architecture(args):
args.encoder_type = "default_and_unimol"
base_architecture(args)
| 26,512 | Python | .py | 640 | 30.490625 | 122 | 0.582552 | dptech-corp/NAG2G | 8 | 4 | 2 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,221 | __init__.py | dptech-corp_NAG2G/NAG2G/models/__init__.py | try:
from .unimol_encoder import CustomizedUniMolModel
except:
print("Cannot import unimol")
from .NAG2G import NAG2GFModel
from .G2G import G2GModel | 157 | Python | .py | 6 | 24 | 53 | 0.809211 | dptech-corp/NAG2G | 8 | 4 | 2 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,222 | G2G.py | dptech-corp_NAG2G/NAG2G/models/G2G.py | import logging
import torch
import torch.nn as nn
from unicore.models import register_model, register_model_architecture
from unimol import __version__
if __version__ == "1.5.0":
from unimol.models.transformer_m import TransformerMModel
from unimol.models.transformer_m import (
bert_base_architecture as encoder_base_architecture,
)
if __version__ == "2.0.0":
from unimol.models.unimolv2 import Unimolv2Model
from unimol.models.unimolv2 import base_architecture as encoder_base_architecture
from .NAG2G import NAG2GFModel, NAG2GFBaseModel, decoder_base_architecture
from unicore import utils
from NAG2G.modules import seq2attn
logger = logging.getLogger(__name__)
torch._C._jit_set_profiling_mode(False)
torch._C._jit_set_profiling_executor(False)
torch._C._jit_override_can_fuse_on_cpu(True)
torch._C._jit_override_can_fuse_on_gpu(True)
@register_model("G2G")
class G2GModel(NAG2GFModel):
@staticmethod
def add_args(parser):
if __version__ == "1.5.0":
TransformerMModel.add_args(parser)
parser.add_argument(
"--N_vnode",
type=int,
default=1,
metavar="N",
help="number of vnode",
)
elif __version__ == "2.0.0":
Unimolv2Model.add_args(parser)
NAG2GFBaseModel.default_decoder_add_args(parser)
parser.add_argument(
"--encoder-type",
default="transformerm",
choices=[
"default",
"transformerm",
"unimol",
"default_and_unimol",
"unimolv2",
],
help="model chosen as encoder",
)
def __init__(self, args, dictionary, **kwargs):
NAG2G_G2G_architecture(args)
super().__init__(args, dictionary, flag_use_base_architecture=False, **kwargs)
# self.degree_pe = nn.Embedding(12 + 1, self.args.decoder_attention_heads)
if self.args.decoder_type == "default":
self.degree_pe = nn.Embedding(100, self.args.decoder_attention_heads)
elif self.args.decoder_type == "new":
self.degree_pe = nn.Embedding(100, self.args.decoder_attention_heads * self.args.reduced_head_dim)
else:
raise
if self.args.want_decoder_attn and self.args.laplacian_pe_dim > 0:
assert self.args.decoder_type == "default"
if self.args.not_sumto2:
self.laplacian_linear = nn.Sequential(
nn.Linear(2 * self.args.laplacian_pe_dim, self.args.laplacian_pe_dim),
nn.ReLU(),
nn.Linear(
self.args.laplacian_pe_dim, self.args.decoder_attention_heads
),
)
else:
self.laplacian_linear = nn.Linear(2, self.args.decoder_attention_heads)
if self.args.want_decoder_attn:
self.seq2attn = seq2attn(
self.args.laplacian_pe_dim,
not self.args.not_sumto2,
dictionary=dictionary,
want_h_degree=self.args.want_h_degree,
idx_type=self.args.idx_type,
use_class=self.args.use_class,
)
def half(self):
super().half()
self.encoder = self.encoder.half()
return self
def half(self):
super().half()
self.encoder = self.encoder.half()
return self
def bfloat16(self):
super().bfloat16()
self.encoder = self.encoder.bfloat16()
return self
def float(self):
super().float()
self.encoder = self.encoder.float()
return self
def get_transformerm_encoder(self, kwargs):
encoder = TransformerMModel.build_model(self.args, None)
return encoder
def get_unimolv2_encoder(self, kwargs):
encoder = Unimolv2Model.build_model(self.args, None)
return encoder
def get_laplacian_attn_mask(self, laplacian_attn_mask):
laplacian_attn_mask = self.laplacian_linear(laplacian_attn_mask)
return laplacian_attn_mask
def get_encoder(self, kwargs):
if self.args.encoder_type == "transformerm":
encoder = self.get_transformerm_encoder(kwargs)
elif self.args.encoder_type == "unimolv2":
encoder = self.get_unimolv2_encoder(kwargs)
else:
encoder = super().get_encoder(kwargs)
return encoder
def get_attn_mask(self, **kwargs):
degree_attn_mask = (
kwargs.pop("decoder_degree_attn_mask")
if "decoder_degree_attn_mask" in kwargs
else None
)
laplacian_attn_mask = (
kwargs.pop("decoder_laplacian_attn_mask")
if "decoder_laplacian_attn_mask" in kwargs
else None
)
attn_mask = None
if degree_attn_mask is not None:
added_degree_attn_mask = self.degree_pe(degree_attn_mask)
added_degree_attn_mask[degree_attn_mask == 0] = 0
added_degree_attn_mask = added_degree_attn_mask.permute(0, 3, 1, 2)
added_degree_attn_mask = added_degree_attn_mask.reshape(
-1, degree_attn_mask.shape[1], degree_attn_mask.shape[2]
)
if attn_mask is None:
attn_mask = added_degree_attn_mask
else:
attn_mask = attn_mask + added_degree_attn_mask
if laplacian_attn_mask is not None:
laplacian_attn_mask = laplacian_attn_mask.to(attn_mask.dtype)
added_laplacian_attn_mask = self.get_laplacian_attn_mask(
laplacian_attn_mask
)
added_laplacian_attn_mask = added_laplacian_attn_mask.permute(
0, 3, 1, 2
).reshape(-1, laplacian_attn_mask.shape[1], laplacian_attn_mask.shape[2])
if attn_mask is None:
attn_mask = added_laplacian_attn_mask
else:
attn_mask = attn_mask + added_laplacian_attn_mask
return attn_mask
def get_degree_attn_mask(self, **kwargs):
assert "decoder_degree_attn_mask" in kwargs
degree_attn_mask = kwargs["decoder_degree_attn_mask"]
added_degree_attn_mask = self.degree_pe(degree_attn_mask)
added_degree_attn_mask[degree_attn_mask == 0] = 0
return added_degree_attn_mask
def get_pad_mask(self, kwargs):
if self.args.encoder_type == "unimolv2":
padding_mask = kwargs["batched_data"]["atom_mask"] == 0
n_mol = padding_mask.shape[0]
elif self.args.encoder_type == "transformerm":
data_x = kwargs["batched_data"]["x"]
n_mol, n_atom = data_x.size()[:2]
padding_mask = (data_x[:, :, 0]).eq(0) # B x T x 1
else:
raise
padding_mask_cls = torch.zeros(
n_mol,
self.args.N_vnode,
device=padding_mask.device,
dtype=padding_mask.dtype,
)
padding_mask = torch.cat((padding_mask_cls, padding_mask), dim=1)
return padding_mask
def forward_encoder(self, **kwargs):
if (
self.args.encoder_type == "transformerm"
or self.args.encoder_type == "unimolv2"
):
if self.args.add_len == 0:
padding_mask = self.get_pad_mask(kwargs)
masked_tokens = ~padding_mask
else:
masked_tokens = None
padding_mask = None
if self.args.use_reorder:
if self.args.encoder_type == "unimolv2":
kwargs["perturb"] = self.embed_positions.weight[
: kwargs["batched_data"]["atom_mask"].shape[1], :
]
elif self.args.encoder_type == "transformerm":
kwargs["perturb"] = self.embed_positions.weight[
: kwargs["batched_data"]["x"].shape[1], :
]
output = self.encoder(**kwargs)
if self.args.encoder_type == "transformerm":
encoder_rep = output[2]["inner_states"][-1].transpose(0, 1)
else:
_, _, _, _, encoder_rep = output
return {
"encoder_rep": encoder_rep,
"padding_mask": padding_mask,
"masked_tokens": masked_tokens,
}
else:
return super().forward_encoder(**kwargs)
def reorder_encoder_out(self, encoder_out, new_order):
"""Dummy re-order function for beamable enc-dec attention"""
"""
Reorder encoder output according to *new_order*.
Args:
encoder_out: output from the ``forward()`` method
new_order (LongTensor): desired order
Returns:
*encoder_out* rearranged according to *new_order*
"""
if len(encoder_out["encoder_rep"]) == 0:
new_encoder_rep = None
else:
new_encoder_rep = encoder_out["encoder_rep"].index_select(0, new_order)
if len(encoder_out["padding_mask"]) == 0:
new_padding_mask = None
else:
new_padding_mask = encoder_out["padding_mask"].index_select(0, new_order)
if len(encoder_out["masked_tokens"]) == 0:
new_masked_tokens = None
else:
new_masked_tokens = encoder_out["masked_tokens"].index_select(0, new_order)
return {
"encoder_rep": new_encoder_rep, # T x B x C
"padding_mask": new_padding_mask, # B x T x 1
"masked_tokens": new_masked_tokens, # B x T x 1
}
def forward_decoder(
self,
decoder_src_tokens,
encoder_cls,
temperature,
encoder_padding_mask,
want_probs=True,
**kwargs
):
decoder_attn_mask = None
if self.args.want_decoder_attn:
if self.args.decoder_attn_from_loader:
attn_mask_kwargs = kwargs
elif self.training or not self.args.infer_step:
attn_mask_kwargs = self.seq2attn.forward_train(decoder_src_tokens)
else:
attn_mask_kwargs = self.seq2attn.forward(decoder_src_tokens)
if self.args.decoder_type == "default":
decoder_attn_mask = self.get_attn_mask(**attn_mask_kwargs)
elif self.args.decoder_type == "new":
decoder_attn_mask = self.get_degree_attn_mask(**attn_mask_kwargs)
return super().forward_decoder(
decoder_src_tokens,
encoder_cls,
temperature,
encoder_padding_mask,
want_probs=want_probs,
decoder_attn_mask=decoder_attn_mask,
**kwargs
)
def get_src_tokens(self, sample):
if self.args.encoder_type == "transformerm":
src_tokens = sample["net_input"]["batched_data"]["x"]
elif self.args.encoder_type == "unimolv2":
src_tokens = sample["net_input"]["batched_data"]["atom_feat"]
return src_tokens
@register_model_architecture("G2G", "NAG2G_G2G")
def NAG2G_G2G_architecture(args):
if __version__ == "1.5.0":
assert args.encoder_type == "transformerm"
elif __version__ == "2.0.0":
assert args.encoder_type == "unimolv2"
encoder_base_architecture(args)
decoder_base_architecture(args)
if __version__ == "2.0.0":
assert args.add_len == 0
| 11,513 | Python | .py | 278 | 30.266187 | 110 | 0.57421 | dptech-corp/NAG2G | 8 | 4 | 2 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,223 | decay_t2t_schedule.py | dptech-corp_NAG2G/NAG2G/optim/lr_scheduler/decay_t2t_schedule.py | # Copyright (c) DP Techonology, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional, List
import math
from unicore.optim.lr_scheduler import UnicoreLRScheduler, register_lr_scheduler
@register_lr_scheduler("tensor2tensor_decay")
class Tensor2tensorDecayLRSchedule(UnicoreLRScheduler):
"""Decay the LR on the tensor2tensor schedule."""
def __init__(self, args, optimizer, total_train_steps):
super().__init__(args, optimizer, total_train_steps)
if self.args.warmup_ratio > 0:
# if warmup_ratio > 0, use external train steps
assert total_train_steps is not None
self.warmup_updates = int(
self.args.warmup_ratio * total_train_steps)
self.total_num_update = total_train_steps
else:
assert args.total_num_update > 0
self.warmup_updates = args.warmup_updates
self.total_num_update = args.total_num_update
self.lr = args.lr[0]
if self.warmup_updates > 0:
self.warmup_factor = 1.0 / self.warmup_updates
else:
self.warmup_factor = 1
self.total_num_update = total_train_steps
self.end_learning_rate = args.end_learning_rate
self.power = args.power
self.optimizer.set_lr(self.warmup_factor * self.lr)
@staticmethod
def add_args(parser):
"""Add arguments to the parser for this LR scheduler."""
parser.add_argument('--force-anneal', '--fa', type=int, metavar='N',
help='force annealing at specified epoch')
parser.add_argument('--warmup-updates', default=0, type=int, metavar='N',
help='warmup the learning rate linearly for the first N updates')
parser.add_argument('--warmup-ratio', default=-1.0, type=float, metavar='N',
help='warmup the learning rate linearly for the first N-percent updates')
parser.add_argument('--start-learning-rate', default=2.0, type=float)
parser.add_argument('--end-learning-rate', default=0.0, type=float)
parser.add_argument('--power', default=1.0, type=float)
parser.add_argument('--total-num-update', default=1000000, type=int)
def get_next_lr(self, epoch):
lrs = self.args.lr
if self.args.force_anneal is None or epoch < self.args.force_anneal:
# use fixed LR schedule
next_lr = lrs[min(epoch, len(lrs) - 1)]
else:
# annneal based on lr_shrink
next_lr = self.optimizer.get_lr()
return next_lr
def step_begin_epoch(self, epoch):
"""Update the learning rate at the beginning of the given epoch."""
self.lr = self.get_next_lr(epoch)
self.optimizer.set_lr(self.warmup_factor * self.lr)
return self.optimizer.get_lr()
def step_update(self, num_updates):
"""Update the learning rate after each update."""
if num_updates == 0:
t_num_updates = math.pow(1, -0.5)
else:
t_num_updates = math.pow(num_updates, -0.5)
if t_num_updates < num_updates * math.pow(self.warmup_updates, -1.5):
lr = self.args.start_learning_rate * \
(math.pow(self.args.encoder_embed_dim, -0.5) * t_num_updates)
else:
lr = self.args.start_learning_rate * \
(math.pow(self.args.encoder_embed_dim, -0.5) *
num_updates * math.pow(self.warmup_updates, -1.5))
self.optimizer.set_lr(lr)
return self.optimizer.get_lr()
| 3,679 | Python | .py | 73 | 40.438356 | 101 | 0.624409 | dptech-corp/NAG2G | 8 | 4 | 2 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,224 | __init__.py | dptech-corp_NAG2G/NAG2G/optim/lr_scheduler/__init__.py | from pathlib import Path
import importlib
import os
# automatically import any Python files in the optim/lr_scheduler/ directory
for file in os.listdir(os.path.dirname(__file__)):
if file.endswith(".py") and not file.startswith("_"):
file_name = file[: file.find(".py")]
importlib.import_module("NAG2G.optim.lr_scheduler." + file_name)
| 357 | Python | .py | 8 | 41 | 76 | 0.718391 | dptech-corp/NAG2G | 8 | 4 | 2 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,225 | unimolv2.py | dptech-corp_NAG2G/NAG2G/tasks/unimolv2.py | # Copyright (c) DP Techonology, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from unimol import __version__
if __version__ == "2.0.0":
import logging
import os
import numpy as np
from unicore.data import (
LMDBDataset,
RightPadDataset,
TokenizeDataset,
RightPadDataset2D,
NestedDictionaryDataset,
EpochShuffleDataset,
)
from unimol.data import (
KeyDataset,
ConformerPCQSampleDataset,
Unimolv2Features,
)
from NAG2G.data import (
CsvGraphormerDataset,
SmilesDataset,
GraphormerDataset,
ShuffleGraphormerDataset,
SeqGraphormerDataset,
RightPadDataset3D,
ReorderGraphormerDataset,
RandomSmilesDataset,
ReorderSmilesDataset,
ReorderCoordDataset,
BpeTokenizeDataset,
)
from unicore.tasks import UnicoreTask, register_task
from .transformer_m import G2GMTask
logger = logging.getLogger(__name__)
@register_task("G2G_unimolv2")
class G2GUnimolv2Task(G2GMTask):
"""Task for training transformer auto-encoder models."""
def load_dataset(self, split, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the data scoure (e.g., bppp)
"""
split_path = os.path.join(self.args.data, split + ".csv")
if os.path.exists(split_path):
raw_dataset = CsvGraphormerDataset(split_path)
else:
split_path = os.path.join(self.args.data, split + ".lmdb")
raw_dataset = LMDBDataset(split_path)
is_train = "train" in split
flag_aftsep2 = "aftspe2" in self.args.bpe_tokenizer_path
sample_dataset = ConformerPCQSampleDataset(
raw_dataset,
self.seed,
"target_coordinates",
None,
)
raw_coord_dataset = KeyDataset(sample_dataset, "coordinates")
map_coord_dataset = KeyDataset(sample_dataset, "target_map")
dataset = KeyDataset(raw_dataset, "rxn_smiles")
dataset = SmilesDataset(dataset)
reactant_dataset = KeyDataset(dataset, "reactant_smiles")
product_dataset = KeyDataset(dataset, "product_smiles")
if is_train and self.args.shufflegraph == "randomsmiles":
product_dataset = RandomSmilesDataset(product_dataset)
if self.args.use_reorder:
reactant_dataset = ReorderSmilesDataset(product_dataset, reactant_dataset)
else:
reactant_dataset = RandomSmilesDataset(reactant_dataset)
reactant_smiles_dataset = reactant_dataset
product_smiles_dataset = product_dataset
if not self.args.no_reactant:
reactant_dataset = GraphormerDataset(reactant_dataset)
product_dataset = GraphormerDataset(product_dataset)
if (not self.args.no_reactant) and self.args.use_reorder:
reorder_dataset = ReorderGraphormerDataset(
product_dataset,
reactant_dataset,
align_base="product",
)
product_dataset = KeyDataset(reorder_dataset, "product")
reactant_dataset = KeyDataset(reorder_dataset, "reactant")
raw_coord_dataset = ReorderCoordDataset(
raw_coord_dataset, map_coord_dataset, product_dataset
)
if self.args.use_class:
class_dataset = KeyDataset(raw_dataset, "class")
else:
class_dataset = None
if not self.args.no_reactant:
reactant_dataset = SeqGraphormerDataset(
reactant_dataset,
class_dataset,
min_node=self.args.laplacian_pe_dim,
want_attn=self.args.decoder_attn_from_loader,
want_charge_h=self.args.want_charge_h,
# max_seq_len=self.args.max_seq_len,
sumto2=not self.args.not_sumto2,
use_sep2=self.args.use_sep2 or flag_aftsep2,
want_h_degree=self.args.want_h_degree,
idx_type=self.args.idx_type,
charge_h_last=self.args.charge_h_last,
)
seq_reactant_dataset = KeyDataset(reactant_dataset, "seq")
seq_reactant_dataset = TokenizeDataset(
seq_reactant_dataset,
self.dictionary,
max_seq_len=self.args.max_seq_len + 1,
)
if self.args.bpe_tokenizer_path != "none":
seq_reactant_dataset = BpeTokenizeDataset(
seq_reactant_dataset,
self.args.bpe_tokenizer_path,
flag_aftsep2=flag_aftsep2,
)
product_dataset = Unimolv2Features(
product_dataset,
raw_coord_dataset,
None,
is_train=is_train,
label_prob=1,
mid_prob=self.args.mid_prob,
mid_lower=self.args.mid_lower,
mid_upper=self.args.mid_upper,
noise=self.args.noise_scale,
seed=self.seed + 2,
N_vnode=self.args.N_vnode,
)
net_input = {
"batched_data": product_dataset,
}
if not self.args.no_reactant:
net_input["decoder_src_tokens"] = RightPadDataset(
seq_reactant_dataset,
pad_idx=self.dictionary.pad(),
)
if self.args.decoder_attn_from_loader:
reactant_degree_attn_mask_dataset = KeyDataset(
reactant_dataset, "degree_attn_mask"
)
if self.args.laplacian_pe_dim > 0:
reactant_laplacian_attn_mask_dataset = KeyDataset(
reactant_dataset, "laplacian_attn_mask"
)
net_input["decoder_laplacian_attn_mask"] = RightPadDataset3D(
reactant_laplacian_attn_mask_dataset,
pad_idx=0,
)
net_input["decoder_degree_attn_mask"] = RightPadDataset2D(
reactant_degree_attn_mask_dataset,
pad_idx=0,
)
nest_dataset = NestedDictionaryDataset(
{
"net_input": net_input,
"target": {
"reactant_smiles": reactant_smiles_dataset,
"product_smiles": product_smiles_dataset,
},
},
)
if split in ["train", "train.small"]:
nest_dataset = EpochShuffleDataset(
nest_dataset, len(nest_dataset), self.seed
)
self.datasets[split] = nest_dataset
| 7,261 | Python | .py | 169 | 28.142012 | 94 | 0.542704 | dptech-corp/NAG2G | 8 | 4 | 2 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,226 | transformer_m.py | dptech-corp_NAG2G/NAG2G/tasks/transformer_m.py | # Copyright (c) DP Techonology, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from unimol import __version__
from unicore import distributed_utils
from NAG2G.utils import save_config
if __version__ == "1.5.0" or __version__ == "2.0.0":
import logging
import os
import torch
import numpy as np
from unicore.data import (
Dictionary,
LMDBDataset,
RightPadDataset,
TokenizeDataset,
RightPadDataset2D,
NestedDictionaryDataset,
EpochShuffleDataset,
)
from unimol.data import (
KeyDataset,
)
from NAG2G.data import (
CsvGraphormerDataset,
SmilesDataset,
GraphormerDataset,
ShuffleGraphormerDataset,
SeqGraphormerDataset,
RightPadDataset3D,
ReorderGraphormerDataset,
GraphFeatures,
RandomSmilesDataset,
ReorderSmilesDataset,
EMPTY_SMILES_Dataset_G2G,
SmilesDataset_2,
BpeTokenizeDataset,
)
from NAG2G.utils.chemutils import add_chirality
from NAG2G.utils.G2G_cal import get_smiles, gen_map
from unicore.tasks import UnicoreTask, register_task
from unicore import checkpoint_utils
logger = logging.getLogger(__name__)
@register_task("G2G")
class G2GMTask(UnicoreTask):
"""Task for training transformer auto-encoder models."""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument("data", help="downstream data path")
parser.add_argument(
"--dict-name",
default="dict.txt",
help="dict name",
)
parser.add_argument(
"--laplacian_pe_dim",
default=30,
type=int,
help="laplacian_pe_dim",
)
parser.add_argument(
"--use_reorder", action="store_true", help="use-reorder"
)
parser.add_argument(
"--decoder_attn_from_loader",
action="store_true",
help="decoder_attn_from_loader",
)
parser.add_argument(
"--not_want_edge_input",
action="store_true",
help="not_want_edge_input",
)
parser.add_argument(
"--want_charge_h",
action="store_true",
help="want_charge_h",
)
parser.add_argument(
"--shufflegraph",
default="none",
help="shufflegraph",
)
parser.add_argument(
"--not_sumto2",
action="store_true",
help="not_sumto2",
)
parser.add_argument(
"--add_len",
default=0,
type=int,
help="GraphFeatures add_len",
)
parser.add_argument(
"--N_left",
default=0,
type=int,
help="N_left",
)
parser.add_argument(
"--infer_save_name",
default="smi.txt",
help="infer_save_name",
)
parser.add_argument(
"--use_sep2",
action="store_true",
help="use_sep2",
)
parser.add_argument(
"--want_h_degree",
action="store_true",
help="want_h_degree",
)
parser.add_argument(
"--use_class",
action="store_true",
help="use_class",
)
parser.add_argument(
"--dataset_uspto_full",
action="store_true",
help="dataset_uspto_full",
)
parser.add_argument(
"--infer_step",
action="store_true",
help="infer_step",
)
parser.add_argument(
"--idx_type",
default=0,
type=int,
help="idx_type",
)
parser.add_argument(
"--want_decoder_attn",
action="store_true",
help="want_decoder_attn",
)
parser.add_argument(
"--init_train_path",
default="none",
help="init_train_path",
)
parser.add_argument(
"--bpe_tokenizer_path",
default="none",
help="bpe_tokenizer_path",
)
parser.add_argument(
"--charge_h_last",
action="store_true",
help="charge_h_last",
)
parser.add_argument(
"--use_class_encoder",
action="store_true",
help="use_class_encoder",
)
parser.add_argument(
"--no_reactant",
action="store_true",
help="no_reactant",
)
save_config.add_config_save_args(parser)
def __init__(self, args, dictionary):
if (distributed_utils.get_data_parallel_rank() == 0) and (
args.infer_step is False
):
save_config.save_config(args)
super().__init__(args)
self.seed = args.seed
self.dictionary = dictionary
if self.args.bpe_tokenizer_path != "none":
self.infer_dictionary = Dictionary.load(
os.path.join(args.bpe_tokenizer_path, "dict.txt")
)
@classmethod
def setup_task(cls, args, **kwargs):
dictionary = Dictionary.load(os.path.join(args.data, args.dict_name))
logger.info("dictionary: {} types".format(len(dictionary)))
return cls(args, dictionary)
def load_empty_dataset(self, **kwargs):
split = "test"
flag_label_wanted = False
dataset = EMPTY_SMILES_Dataset_G2G(name="product_smiles", **kwargs)
self.load_dataset_detail(
dataset=dataset,
split=split,
class_dataset=None,
flag_label_wanted=flag_label_wanted,
**kwargs
)
return dataset
def load_dataset(self, split, **kwargs):
flag_label_wanted = True
split_path = os.path.join(self.args.data, split + ".csv")
if os.path.exists(split_path):
raw_dataset = CsvGraphormerDataset(split_path)
else:
split_path = os.path.join(self.args.data, split + ".lmdb")
raw_dataset = LMDBDataset(split_path)
if self.args.use_class:
class_dataset = KeyDataset(raw_dataset, "class")
else:
class_dataset = None
if not self.args.dataset_uspto_full:
dataset = KeyDataset(raw_dataset, "rxn_smiles")
dataset = SmilesDataset(dataset)
else:
dataset = SmilesDataset_2(raw_dataset)
self.load_dataset_detail(
dataset=dataset,
split=split,
class_dataset=class_dataset,
flag_label_wanted=flag_label_wanted,
**kwargs
)
def load_dataset_detail(
self, dataset, split, class_dataset=None, flag_label_wanted=True, **kwargs
):
"""Load a given dataset split.
Args:
split (str): name of the data scoure (e.g., bppp)
"""
is_train = "train" in split
flag_aftsep2 = "aftsep2" in self.args.bpe_tokenizer_path
if flag_label_wanted:
reactant_dataset = KeyDataset(dataset, "reactant_smiles")
product_dataset = KeyDataset(dataset, "product_smiles")
if is_train and self.args.shufflegraph == "randomsmiles":
product_dataset = RandomSmilesDataset(product_dataset)
if flag_label_wanted:
if self.args.use_reorder:
reactant_dataset = ReorderSmilesDataset(product_dataset, reactant_dataset)
else:
reactant_dataset = RandomSmilesDataset(reactant_dataset)
if flag_label_wanted:
reactant_smiles_dataset = reactant_dataset
reactant_dataset = GraphormerDataset(reactant_dataset)
product_smiles_dataset = product_dataset
product_dataset = GraphormerDataset(product_dataset)
if flag_label_wanted and self.args.use_reorder:
reorder_dataset = ReorderGraphormerDataset(
product_dataset,
reactant_dataset,
align_base="product",
)
product_dataset = KeyDataset(reorder_dataset, "product")
reactant_dataset = KeyDataset(reorder_dataset, "reactant")
if flag_label_wanted:
reactant_dataset = SeqGraphormerDataset(
reactant_dataset,
class_dataset,
min_node=self.args.laplacian_pe_dim,
want_attn=self.args.decoder_attn_from_loader,
want_charge_h=self.args.want_charge_h,
# max_seq_len=self.args.max_seq_len,
sumto2=not self.args.not_sumto2,
use_sep2=self.args.use_sep2 or flag_aftsep2,
want_h_degree=self.args.want_h_degree,
idx_type=self.args.idx_type,
charge_h_last=self.args.charge_h_last,
)
seq_reactant_dataset = KeyDataset(reactant_dataset, "seq")
seq_reactant_dataset = TokenizeDataset(
seq_reactant_dataset,
self.dictionary,
max_seq_len=self.args.max_seq_len + 1,
)
if self.args.bpe_tokenizer_path != "none":
seq_reactant_dataset = BpeTokenizeDataset(
seq_reactant_dataset,
self.args.bpe_tokenizer_path,
flag_aftsep2=flag_aftsep2,
)
product_dataset = GraphFeatures(
product_dataset,
pos_dataset=None,
want_edge_input=not self.args.not_want_edge_input,
add_len=self.args.add_len,
)
net_input = {"batched_data": product_dataset}
if flag_label_wanted:
net_input["decoder_src_tokens"] = RightPadDataset(
seq_reactant_dataset,
pad_idx=self.dictionary.pad(),
)
if flag_label_wanted and self.args.decoder_attn_from_loader:
reactant_degree_attn_mask_dataset = KeyDataset(
reactant_dataset, "degree_attn_mask"
)
net_input["decoder_degree_attn_mask"] = RightPadDataset2D(
reactant_degree_attn_mask_dataset,
pad_idx=0,
)
if self.args.laplacian_pe_dim > 0:
reactant_laplacian_attn_mask_dataset = KeyDataset(
reactant_dataset, "laplacian_attn_mask"
)
net_input["decoder_laplacian_attn_mask"] = RightPadDataset3D(
reactant_laplacian_attn_mask_dataset,
pad_idx=0,
)
target = {"product_smiles": product_smiles_dataset}
if flag_label_wanted:
target["reactant_smiles"] = reactant_smiles_dataset
nest_dataset = NestedDictionaryDataset(
{"net_input": net_input, "target": target},
)
if split in ["train", "train.small"]:
nest_dataset = EpochShuffleDataset(
nest_dataset, len(nest_dataset), self.seed
)
self.datasets[split] = nest_dataset
def build_model(self, args):
from unicore import models
model = models.build_model(args, self)
if args.init_train_path != "none":
state = checkpoint_utils.load_checkpoint_to_cpu(args.init_train_path)
model.load_state_dict(state["model"], strict=False)
return model
def get_str(self, tokens):
if self.args.bpe_tokenizer_path == "none":
dictionary = self.dictionary
else:
dictionary = self.infer_dictionary
pad_idx = dictionary.pad()
tokens = tokens.tolist()
if pad_idx in tokens:
pad_idx = tokens.index(dictionary.pad())
token = tokens[1:pad_idx]
else:
token = tokens[1:]
if not hasattr(dictionary, "id2word"):
dictionary.id2word = {v: k for k, v in dictionary.indices.items()}
strs = " ".join([dictionary.id2word[i] for i in token])
if self.args.bpe_tokenizer_path != "none":
strs = strs.replace("?", " ")
return strs
def fill_first_step_tokens(self, bsz, beam_size, j, pred_dict):
max_pred_shape = max(
[pred_dict[i][j]["tokens"].shape[0] for i in range(bsz)]
)
scores = torch.zeros(bsz * beam_size, max_pred_shape).float()
tokens = (
torch.zeros(bsz * beam_size, max_pred_shape + 1)
.long()
.fill_(self.dictionary.pad())
)
for i in range(bsz):
for k in range(beam_size):
pred_shape = pred_dict[i][j]["tokens"].shape[0] + 1
tokens[i * beam_size + k, 1:pred_shape] = pred_dict[i][j]["tokens"]
scores[i * beam_size + k, : pred_shape - 1] = pred_dict[i][j][
"score"
]
return tokens, scores
def write_file_res(
self,
gt_product_smiles,
gt_reactant_smiles,
tgt_tokens,
pred,
beam_size,
file_check,
):
for i in range(len(gt_reactant_smiles)):
with open(file_check, "a") as w:
w.write(gt_product_smiles[i] + " " + "gt_product" + "\n")
w.write(gt_reactant_smiles[i] + " " + "target" + "\n")
if tgt_tokens is not None:
w.write(self.get_str(tgt_tokens[i]) + " " + "target" + "\n")
else:
w.write(" " + "target" + "\n")
for j in range(beam_size):
if (
self.args.search_strategies
== "SequenceGeneratorBeamSearch_test"
) or (
self.args.search_strategies == "SequenceGeneratorBeamSearch"
):
pred_piece = pred[i][j]["tokens"].cpu().numpy()
score = pred[i][j]["score"].cpu().detach().numpy()
pred_piece = np.insert(pred_piece, 0, 1)
pred_piece = pred_piece[:-1]
elif self.args.search_strategies == "SimpleGenerator":
pred_piece = pred[j + i * beam_size].cpu().numpy()
score = 0
pred_piece = self.get_str(pred_piece)
w.write(
pred_piece
+ " "
+ "predicted"
+ str(j)
+ " "
+ str(score)
+ "\n"
)
def get_one_sample(self, sample, i):
new_dict = {}
for k, v in sample["net_input"]["batched_data"].items():
new_dict[k] = v[i : i + 1]
dict_ = {"net_input": {"batched_data": new_dict}}
return dict_
def test_step(
self,
args,
sample,
model,
loss,
step,
seed,
second_beam_size=0,
second_token_size=0,
model2=None,
):
gt_reactant_smiles = sample["target"]["reactant_smiles"]
gt_product_smiles = sample["target"]["product_smiles"]
beam_size = model.beam_size
file_path = args.results_path
if self.args.search_strategies == "SequenceGeneratorBeamSearch_test":
prefix_tokens = None
if self.args.use_class:
prefix_tokens = sample["net_input"]["decoder_src_tokens"][
:, 1
].unsqueeze(1)
else:
prefix_tokens = None
pred = model(sample=sample, prefix_tokens=prefix_tokens)
bsz = self.args.batch_size
if len(model2) > 0:
list_bs = []
for i in range(bsz):
list_beam = []
for j in range(len(model2)):
sample_tmp = self.get_one_sample(sample, i)
pred_piece = pred[i][j]["tokens"]
score = pred[i][j]["score"]
pred_k = model2[j](
sample_tmp, prefix_tokens=pred_piece.unsqueeze(0)
)
for k in range(len(pred_k[0])):
pred_k[0][k]["score"] = score + pred_k[0][k]["score"]
list_beam.append(pred_k[0][k])
list_bs.append(list_beam)
pred = list_bs
vae_kl_loss = None
elif self.args.search_strategies == "SequenceGeneratorBeamSearch":
if self.args.use_class:
prefix_tokens = sample["net_input"]["decoder_src_tokens"][
:, 1
].unsqueeze(1)
else:
prefix_tokens = None
pred = model(sample=sample, prefix_tokens=prefix_tokens)
vae_kl_loss = None
elif self.args.search_strategies == "SimpleGenerator":
# if self.args.use_class:
# prefix_tokens = sample["net_input"]["decoder_src_tokens"][
# :, 1
# ].unsqueeze(1)
# else:
prefix_tokens = None
pred, vae_kl_loss = model._generate(sample, prefix_tokens)
else:
raise
if not os.path.exists(file_path):
os.makedirs(file_path)
rank = distributed_utils.get_data_parallel_rank()
file_check = os.path.join(
file_path,
self.args.infer_save_name.replace(".txt", "_" + str(rank) + ".txt"),
)
try:
tgt_tokens = sample["net_input"]["decoder_src_tokens"].cpu().numpy()
except:
tgt_tokens = None
self.write_file_res(
gt_product_smiles,
gt_reactant_smiles,
tgt_tokens,
pred,
beam_size,
file_check,
)
if "decoder_src_tokens" in sample["net_input"]:
return pred, {
"vae_kl_loss": vae_kl_loss,
"sample_size": 1,
"bsz": sample["net_input"]["batched_data"]["atom_feat"].size(0),
"seq_len": sample["net_input"]["decoder_src_tokens"].size(1)
* sample["net_input"]["decoder_src_tokens"].size(0),
}
else:
return pred, {
"vae_kl_loss": vae_kl_loss,
"sample_size": 1,
"bsz": 1,
"seq_len": 1,
}
def infer_step(self, sample, model, **kwargs):
gt_product_smiles = sample["target"]["product_smiles"]
if self.args.search_strategies == "SequenceGeneratorBeamSearch":
pred = model(sample)
vae_kl_loss = None
elif self.args.search_strategies == "SimpleGenerator":
pred, vae_kl_loss = model._generate(sample)
else:
raise
beam_size = model.beam_size
result_list = []
for i in range(len(gt_product_smiles)):
list_ = []
for j in range(beam_size):
if self.args.search_strategies == "SequenceGeneratorBeamSearch":
pred_piece = pred[i][j]["tokens"].cpu().numpy()
score = pred[i][j]["score"].cpu().detach().numpy()
pred_piece = np.insert(pred_piece, 0, 1)
pred_piece = pred_piece[:-1]
elif self.args.search_strategies == "SimpleGenerator":
pred_piece = pred[j + i * beam_size].cpu().numpy()
score = 0
pred_piece = self.get_str(pred_piece)
pred_piece = get_smiles(
pred_piece, atom_map=gen_map(gt_product_smiles[i])
)
try:
pred_piece = add_chirality(gt_product_smiles[i], pred_piece)
except:
pass
list_.append(pred_piece)
result_list.append(list_)
return result_list
| 22,317 | Python | .py | 538 | 25.249071 | 98 | 0.47254 | dptech-corp/NAG2G | 8 | 4 | 2 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,227 | __init__.py | dptech-corp_NAG2G/NAG2G/tasks/__init__.py | from pathlib import Path
import importlib
# automatically import any Python files in the criterions/ directory
for file in sorted(Path(__file__).parent.glob("*.py")):
if not file.name.startswith("_"):
importlib.import_module("NAG2G.tasks." + file.name[:-3])
| 271 | Python | .py | 6 | 42 | 68 | 0.723485 | dptech-corp/NAG2G | 8 | 4 | 2 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,228 | G2G_cal.py | dptech-corp_NAG2G/NAG2G/utils/G2G_cal.py | import sys
import numpy as np
from tqdm import tqdm
from rdkit import Chem
from .draw_img import draw_mol
from .chemutils import add_chirality
from .graph_process import seq2graph
from .mol_graph_basic import graph2mol, error, get_InchiKey, judge_InchiKey, same_smi
import os
from multiprocessing import Pool
import time
import glob
import json
def seq2smiles(seq, atom_map=None):
tmp = seq2graph(seq)
adjacency_matrix = tmp["adj_matrix"] + 1
adjacency_matrix[adjacency_matrix == 4] = 12
adjacency_matrix[adjacency_matrix == 23] = 0
atoms = tmp["atoms"]
atoms_charge = tmp["atoms_charge"].tolist()
atom_h_number = tmp["atoms_h"].tolist()
if atom_map is not None:
if len(atom_map) > len(atoms):
atom_map = atom_map[: len(atoms)]
elif len(atom_map) < len(atoms):
atom_map = atom_map + [0 for _ in range(len(atoms) - len(atom_map))]
smiles = graph2mol(
adjacency_matrix=adjacency_matrix,
atoms=atoms,
atoms_charge=atoms_charge,
atom_h_number=atom_h_number,
atoms_map=atom_map,
)
return smiles
def get_smiles(seq, atom_map=None):
seq = [i for i in seq.split(" ") if i != ""]
if "[SEP]" in seq:
seq = seq[: seq.index("[SEP]")]
smiles = seq2smiles(seq, atom_map)
return smiles
def gen_map(smiles):
mol = Chem.MolFromSmiles(smiles)
atoms_map = [atom.GetAtomMapNum() for atom in mol.GetAtoms()]
return atoms_map
def worker(args):
i, context, iter_N, N_beam_search = args
gt_product = context[iter_N * i].split("gt_product")[0]
target1 = context[iter_N * i + 1].split("target")[0]
target2 = context[iter_N * i + 2].replace("[SEP]", "").split("target")[0]
target2 = get_smiles(target2)
dup_key_list = set([None])
gt_reactant_key = get_InchiKey(target1)
pred_list = []
unstrict_list = []
strict_list = []
nodup_list = []
pred_nodup_list = []
flag_strict = False
flag_unstrict = False
for j in range(N_beam_search):
assert "predicted" in context[iter_N * i + 3 + j]
str2 = context[iter_N * i + 3 + j].split("predicted")[0]
pred = get_smiles(str2)
if flag_unstrict is False and same_smi(target2, pred):
unstrict_list.append(1)
flag_unstrict = True
else:
unstrict_list.append(0)
pred = get_smiles(str2, atom_map=gen_map(gt_product))
try:
pred = add_chirality(gt_product, pred)
except:
pass
pred_key = get_InchiKey(pred)
if pred_key not in dup_key_list:
if judge_InchiKey(pred_key, gt_reactant_key):
nodup_list.append(1)
else:
nodup_list.append(0)
dup_key_list.add(pred_key)
pred_nodup_list.append(pred)
if flag_strict is False and same_smi(target1, pred):
strict_list.append(1)
flag_strict = True
else:
strict_list.append(0)
pred_list.append(pred)
nodup_list = nodup_list + [0 for _ in range(N_beam_search - len(nodup_list))]
# print(i, "*"*10)
# print(target1, error(target1))
# print(i, "*"*10)
# for ii in pred_nodup_list:
# print(ii, error(ii))
# if np.array(strict_list).sum() == 0:
# draw_mol(
# [gt_product, target1, None, None, None] + pred_nodup_list,
# "img/{}_{}.png".format(i, j),
# mols_per_row=5,
# img_size=(400, 400),
# )
return {"product": gt_product,
"target": [target1, target2],
"unstrict_list": unstrict_list,
"strict_list": strict_list,
"pred_list": pred_list,
"nodup_list": nodup_list}
def get_context_by_one(smi_path):
print(smi_path.split("/")[-1])
if not os.path.exists(smi_path):
return []
with open(smi_path, "r") as f:
context = f.readlines()
print("single lines:", len(context))
return context
def run(smi_path, save_path, N_beam_search=10, if_full=False):
if "{}" in smi_path:
context = []
files = glob.glob(smi_path.replace("{}", "*"))
for i in files:
context += get_context_by_one(i)
else:
context = get_context_by_one(smi_path)
iter_N = N_beam_search + 3
N_mol = int(len(context) / iter_N)
if if_full and N_mol != 5007:
return
print(N_mol)
start = time.time()
with Pool() as pool:
results = pool.map(
worker,
[
(i, context, iter_N, N_beam_search)
for i in tqdm(range(len(context) // iter_N))
],
)
target_list_all = []
unstrict_list_all = []
strict_list_all = []
pred_list_all = []
nodup_list_all = []
if save_path is not None:
with open(save_path.replace(save_path.split("/")[-1], "smiles_infer.txt"), 'w') as f:
# json.dump(i, f)
f.write(json.dumps(results, indent=4))
for i in results:
target = i["target"]
unstrict_list = i["unstrict_list"]
strict_list = i["strict_list"]
pred_list = i["pred_list"]
nodup_list = i["nodup_list"]
target_list_all.append(target)
unstrict_list_all.append(unstrict_list)
strict_list_all.append(strict_list)
pred_list_all.append(pred_list)
nodup_list_all.append(nodup_list)
if save_path is not None:
f = open(save_path, "w")
else:
f = None
print(time.time() - start)
unstrict_list_all = np.array(unstrict_list_all)
strict_list_all = np.array(strict_list_all)
unstrict_list_all = unstrict_list_all.sum(0)
strict_list_all = strict_list_all.sum(0)
nodup_list_all = np.array(nodup_list_all)
nodup_list_all = nodup_list_all.sum(0)
print("total", N_mol)
print("unstrict", unstrict_list_all)
print("unstrict", unstrict_list_all / N_mol)
print("strict", strict_list_all)
print("strict", strict_list_all / N_mol)
print("nodup_list_all", nodup_list_all)
print("nodup_list_all", nodup_list_all / N_mol)
print("\n")
if f is not None:
f.write("total " + str(N_mol))
f.write("\nunstrict " + str(unstrict_list_all))
f.write("\nunstrict " + str(unstrict_list_all / N_mol))
f.write("\nstrict " + str(strict_list_all))
f.write("\nstrict " + str(strict_list_all / N_mol))
f.write("\nnodup_list_all " + str(nodup_list_all))
f.write("\nnodup_list_all " + str(nodup_list_all / N_mol))
f.write("\n")
try:
unstrict_list_tmp = [unstrict_list_all[0]]
for i in range(1, len(unstrict_list_all)):
unstrict_list_tmp.append(unstrict_list_tmp[i - 1] + unstrict_list_all[i])
unstrict_list_tmp = np.array(unstrict_list_tmp)
except:
unstrict_list_tmp = 0
strict_list_tmp = [strict_list_all[0]]
for i in range(1, len(strict_list_all)):
strict_list_tmp.append(strict_list_tmp[i - 1] + strict_list_all[i])
strict_list_tmp = np.array(strict_list_tmp)
nodup_list_tmp = [nodup_list_all[0]]
for i in range(1, len(nodup_list_all)):
nodup_list_tmp.append(nodup_list_tmp[i - 1] + nodup_list_all[i])
nodup_list_tmp = np.array(nodup_list_tmp)
print("unstrict", unstrict_list_tmp)
print("unstrict", unstrict_list_tmp / N_mol)
print("strict", strict_list_tmp)
print("strict", strict_list_tmp / N_mol)
print("nodup_list_all", nodup_list_tmp)
print("nodup_list_all", nodup_list_tmp / N_mol)
if f is not None:
f.write("unstrict " + str(unstrict_list_tmp))
f.write("\nunstrict " + str(unstrict_list_tmp / N_mol))
f.write("\nstrict " + str(strict_list_tmp))
f.write("\nstrict " + str(strict_list_tmp / N_mol))
f.write("\nnodup_list_all " + str(nodup_list_tmp))
f.write("\nnodup_list_all " + str(nodup_list_tmp / N_mol))
f.close()
if __name__ == "__main__":
if len(sys.argv) < 2:
raise "ERROR"
smi_path = sys.argv[1]
N_beam_search = 10
if len(sys.argv) >= 3:
N_beam_search = int(sys.argv[2])
if "--if_full" in sys.argv:
if_full = True
else:
if_full = False
if len(sys.argv) >= 5:
score_name = sys.argv[4]
else:
score_name = "score"
save_path = smi_path.replace(smi_path.split("/")[-1], score_name)
run(smi_path, save_path, N_beam_search=N_beam_search, if_full=if_full)
| 8,522 | Python | .py | 230 | 29.904348 | 93 | 0.590799 | dptech-corp/NAG2G | 8 | 4 | 2 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,229 | chemutils.py | dptech-corp_NAG2G/NAG2G/utils/chemutils.py | # Modified from https://github.com/wengong-jin/iclr19-graph2graph
import rdkit.Chem as Chem
from rdchiral.chiral import copy_chirality
from rdkit.Chem import SanitizeMol, SanitizeFlags
from rdkit.Chem.AllChem import AssignStereochemistry
def canonicalize(smiles, add_atom_num=False):
try:
tmp = Chem.MolFromSmiles(smiles)
except Exception as e:
print(e)
return smiles
if tmp is None:
print("wrong smiles: %s" % (smiles))
return smiles
tmp = Chem.RemoveHs(tmp)
[a.ClearProp("molAtomMapNumber") for a in tmp.GetAtoms()]
smiles = Chem.MolToSmiles(tmp)
if add_atom_num:
mol = Chem.MolFromSmiles(smiles)
for atom in mol.GetAtoms():
atom.SetAtomMapNum(atom.GetIdx() + 1)
smiles = Chem.MolToSmiles(mol)
return smiles
else:
return smiles
def get_mol(smiles, sanitize=True):
mol = Chem.MolFromSmiles(smiles, sanitize=sanitize)
if mol is None:
return None
Chem.Kekulize(mol)
return mol
def get_smiles(mol):
return Chem.MolToSmiles(mol, kekuleSmiles=True)
def sanitize(mol):
try:
smiles = get_smiles(mol)
mol = get_mol(smiles)
except Exception:
return None
return mol
def atom_equal(a1, a2):
return (
a1.GetSymbol() == a2.GetSymbol()
and a1.GetFormalCharge() == a2.GetFormalCharge()
)
def copy_bond_dir(product, pre_react):
"""copy the direction of bonds from the product molecule to the predicted reactant molecules"""
bond_dir_map = {}
bond_stereo_map = {}
for bond in product.GetBonds():
begin_atom = bond.GetBeginAtom().GetAtomMapNum()
end_atom = bond.GetEndAtom().GetAtomMapNum()
if bond.GetBondDir() != Chem.rdchem.BondDir.NONE:
bond_dir_map[(begin_atom, end_atom)] = bond.GetBondDir()
if bond.GetStereo() != Chem.rdchem.BondStereo.STEREONONE:
bond_stereo_map[(begin_atom, end_atom)] = bond.GetStereo()
change_mol = Chem.RWMol(pre_react)
for bond in change_mol.GetBonds():
begin_atom = bond.GetBeginAtom()
end_atom = bond.GetEndAtom()
begin_atom_mapnum = begin_atom.GetAtomMapNum()
end_atom_mapnum = end_atom.GetAtomMapNum()
if begin_atom_mapnum == 0 or end_atom_mapnum == 0:
continue
if (end_atom_mapnum, begin_atom_mapnum) in bond_stereo_map:
begin_atom_mapnum, end_atom_mapnum = end_atom_mapnum, begin_atom_mapnum
if (begin_atom_mapnum, end_atom_mapnum) in bond_stereo_map:
bond.SetStereo(bond_stereo_map[(begin_atom_mapnum, end_atom_mapnum)])
if (end_atom_mapnum, begin_atom_mapnum) in bond_dir_map:
begin_atom_mapnum, end_atom_mapnum = end_atom_mapnum, begin_atom_mapnum
if (begin_atom_mapnum, end_atom_mapnum) in bond_dir_map:
bond.SetBondDir(bond_dir_map[(begin_atom_mapnum, end_atom_mapnum)])
return change_mol
def add_chirality(product, pred_react):
"""copy the atom chirality and bond direction from the product molecule to the predicted reactant molecule"""
prod_mol = Chem.MolFromSmiles(product)
react_mol = Chem.MolFromSmiles(pred_react)
react_atom_map = {}
for atom in react_mol.GetAtoms():
mapnum = atom.GetAtomMapNum()
react_atom_map[mapnum] = atom
for atom in prod_mol.GetAtoms():
mapnum = atom.GetAtomMapNum()
ratom = react_atom_map[mapnum]
copy_chirality(atom, ratom)
chiral_react_smiles = Chem.MolToSmiles(react_mol, isomericSmiles=True)
react_mol = Chem.MolFromSmiles(chiral_react_smiles)
change_react_mol = copy_bond_dir(prod_mol, react_mol)
SanitizeMol(
change_react_mol, sanitizeOps=SanitizeFlags.SANITIZE_ALL, catchErrors=False
)
AssignStereochemistry(
change_react_mol, cleanIt=True, force=True, flagPossibleStereoCenters=True
)
chiral_react_smiles = Chem.MolToSmiles(change_react_mol, isomericSmiles=True)
return chiral_react_smiles
def is_sim(smile1, smile2):
try:
smile1 = canonicalize(smile1)
smile2 = canonicalize(smile2)
except:
return False
if smile1 == smile2:
return True
else:
return False
if __name__ == "__main__":
gt_reactant = "O=C1CCC(=O)N1[Br:11].[CH3:1][Si:2]([CH3:3])([CH3:4])[O:5][C:6](=[O:7])/[CH:8]=[CH:9]/[CH3:10]"
gt_product = "[CH3:1][Si:2]([CH3:3])([CH3:4])[O:5][C:6](=[O:7])/[CH:8]=[CH:9]/[CH2:10][Br:11]"
from mol_graph_basic import get_canonical_smile, same_smi
fake_reactant = get_canonical_smile(gt_reactant, isomericSmiles=False)
pred = add_chirality(gt_product, fake_reactant)
print(same_smi(gt_reactant, pred))
print(same_smi(gt_reactant, fake_reactant))
| 4,778 | Python | .py | 114 | 35.04386 | 113 | 0.669262 | dptech-corp/NAG2G | 8 | 4 | 2 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,230 | utils.py | dptech-corp_NAG2G/NAG2G/utils/utils.py | import torch
import torch.nn.functional as F
def softmax(x, dim: int, onnx_trace: bool = False):
if onnx_trace:
return F.softmax(x.float(), dim=dim)
else:
return F.softmax(x, dim=dim, dtype=torch.float32)
def log_softmax(x, dim: int, onnx_trace: bool = False):
if onnx_trace:
return F.log_softmax(x.float(), dim=dim)
else:
return F.log_softmax(x, dim=dim, dtype=torch.float32)
def get_perplexity(loss, round=2, base=2):
from unicore.logging.meters import safe_round
if loss is None:
return 0.0
try:
return safe_round(base ** loss, round)
except OverflowError:
return float("inf") | 673 | Python | .py | 20 | 28 | 61 | 0.660494 | dptech-corp/NAG2G | 8 | 4 | 2 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,231 | mol_graph_basic.py | dptech-corp_NAG2G/NAG2G/utils/mol_graph_basic.py | from rdkit import Chem
from rdkit.Chem import AllChem
import numpy as np
try:
from rdkit.Chem import Draw
except:
print("can not import chem draw")
from itertools import product
from copy import deepcopy
from collections import OrderedDict
np.set_printoptions(threshold=np.inf)
flag_kekulize = False
flag_atoms_chiraltag = "new"
flag_use_list = False
# 22 type
bond_type_list = [
Chem.rdchem.BondType.UNSPECIFIED,
Chem.rdchem.BondType.SINGLE,
Chem.rdchem.BondType.DOUBLE,
Chem.rdchem.BondType.TRIPLE,
Chem.rdchem.BondType.QUADRUPLE,
Chem.rdchem.BondType.QUINTUPLE,
Chem.rdchem.BondType.HEXTUPLE,
Chem.rdchem.BondType.ONEANDAHALF,
Chem.rdchem.BondType.TWOANDAHALF,
Chem.rdchem.BondType.THREEANDAHALF,
Chem.rdchem.BondType.FOURANDAHALF,
Chem.rdchem.BondType.FIVEANDAHALF,
Chem.rdchem.BondType.AROMATIC,
Chem.rdchem.BondType.IONIC,
Chem.rdchem.BondType.HYDROGEN,
Chem.rdchem.BondType.THREECENTER,
Chem.rdchem.BondType.DATIVEONE,
Chem.rdchem.BondType.DATIVE,
Chem.rdchem.BondType.DATIVEL,
Chem.rdchem.BondType.DATIVER,
Chem.rdchem.BondType.OTHER,
Chem.rdchem.BondType.ZERO,
]
chiral_type_list_1 = [
Chem.rdchem.ChiralType.CHI_UNSPECIFIED, # chirality that hasn't been specified
Chem.rdchem.ChiralType.CHI_TETRAHEDRAL_CW, # tetrahedral: clockwise rotation (SMILES @@)
Chem.rdchem.ChiralType.CHI_TETRAHEDRAL_CCW, # tetrahedral: counter-clockwise rotation (SMILES @)
Chem.rdchem.ChiralType.CHI_OTHER, # some unrecognized type of chirality
# Chem.rdchem.ChiralType.CHI_TETRAHEDRAL, # tetrahedral, use permutation flag
# Chem.rdchem.ChiralType.CHI_ALLENE, # allene, use permutation flag
# Chem.rdchem.ChiralType.CHI_SQUAREPLANAR, # square planar, use permutation flag
# Chem.rdchem.ChiralType.CHI_TRIGONALBIPYRAMIDAL, # trigonal bipyramidal, use permutation flag
# Chem.rdchem.ChiralType.CHI_OCTAHEDRAL, # octahedral, use permutation flag
]
chiral_type_list = ["", "S", "R"]
bond_stereo_list = [ # stereochemistry of double bonds
Chem.rdchem.BondStereo.STEREONONE, # no special style
Chem.rdchem.BondStereo.STEREOANY, # intentionally unspecified
# -- Put any true specifications about this point so
# that we can do comparisons like if(bond->getStereo()>Bond::STEREOANY)
Chem.rdchem.BondStereo.STEREOZ, # Z double bond
Chem.rdchem.BondStereo.STEREOE, # E double bond
Chem.rdchem.BondStereo.STEREOCIS, # cis double bond
Chem.rdchem.BondStereo.STEREOTRANS, # trans double bond
]
def set_h_number(mol, atom_h_number):
for i in range(len(atom_h_number)):
for _ in range(atom_h_number[i]):
atom_tmp = Chem.Atom("H")
molecular_index = mol.AddAtom(atom_tmp)
try:
mol.AddBond(i, molecular_index, Chem.rdchem.BondType.SINGLE)
except:
mol.RemoveAtom(molecular_index)
def get_adjacency_matrix(smiles, add_h=None):
mol = Chem.MolFromSmiles(smiles)
if flag_kekulize:
Chem.Kekulize(mol, clearAromaticFlags=True)
if add_h is True:
mol = AllChem.AddHs(mol)
elif add_h is False:
mol = AllChem.RemoveHs(mol)
adjacency_matrix = Chem.rdmolops.GetAdjacencyMatrix(mol)
bond_stereo = np.zeros_like(adjacency_matrix)
bond_stereo_dict = dict()
for bond in mol.GetBonds():
begin_idx, end_idx = bond.GetBeginAtomIdx(), bond.GetEndAtomIdx()
bond_type = bond_type_list.index(bond.GetBondType())
adjacency_matrix[begin_idx, end_idx] = bond_type
adjacency_matrix[end_idx, begin_idx] = bond_type
bond_stereo_value = bond.GetStereo()
bond_stereo[begin_idx, end_idx] = bond_stereo_value
bond_stereo[end_idx, begin_idx] = bond_stereo_value
stereo_atoms = list(bond.GetStereoAtoms())
if len(stereo_atoms) >= 2:
bond_stereo_dict[(begin_idx, end_idx)] = stereo_atoms
bond_stereo_dict[(end_idx, begin_idx)] = stereo_atoms
atoms_map = [atom.GetAtomMapNum() for atom in mol.GetAtoms()]
atoms_charge = [atom.GetFormalCharge() for atom in mol.GetAtoms()]
atoms = [atom.GetSymbol() for atom in mol.GetAtoms()]
atom_h_number = [atom.GetTotalNumHs() for atom in mol.GetAtoms()]
if flag_atoms_chiraltag == "old":
atoms_chiraltag = [
chiral_type_list_1.index(atom.GetChiralTag()) for atom in mol.GetAtoms()
]
else:
atoms_chiraltag = [0 for _ in range(mol.GetNumAtoms())]
for i, c in Chem.FindMolChiralCenters(mol):
atoms_chiraltag[i] = chiral_type_list.index(c)
return {
"adjacency_matrix": adjacency_matrix,
"atoms": atoms,
"atoms_map": atoms_map,
"atoms_chiraltag": atoms_chiraltag,
"atoms_charge": atoms_charge,
"bond_stereo": bond_stereo,
"bond_stereo_dict": bond_stereo_dict,
"atom_h_number": atom_h_number,
}
def create_molecule_with_atoms(atoms, atoms_map, atoms_charge, atoms_chiraltag):
molecule = Chem.RWMol()
atom_index = []
for atom_number, atom_symbol in enumerate(atoms):
atom_tmp = Chem.Atom(atom_symbol)
if atoms_map is not None:
atom_tmp.SetAtomMapNum(atoms_map[atom_number])
if atoms_charge is not None:
atom_tmp.SetFormalCharge(atoms_charge[atom_number])
if atoms_chiraltag is not None and flag_atoms_chiraltag == "old":
atom_tmp.SetChiralTag(chiral_type_list_1[atoms_chiraltag[atom_number]])
molecular_index = molecule.AddAtom(atom_tmp)
atom_index.append(molecular_index)
return molecule, atom_index
def add_bonds_to_molecule(molecule, atom_index, adjacency_matrix, bond_type_list):
for index_x, row_vector in enumerate(adjacency_matrix):
for index_y, bond in enumerate(row_vector[index_x + 1 :], start=index_x + 1):
if bond != 0:
molecule.AddBond(
atom_index[index_x], atom_index[index_y], bond_type_list[bond]
)
def set_bond_stereo(molecule, bond_stereo, bond_stereo_list, bond_stereo_dict):
for bond in molecule.GetBonds():
begin_idx, end_idx = bond.GetBeginAtomIdx(), bond.GetEndAtomIdx()
stereo_tmp = bond_stereo[begin_idx, end_idx]
bond.SetStereo(bond_stereo_list[stereo_tmp])
if (begin_idx, end_idx) in bond_stereo_dict.keys():
stereo_atoms = bond_stereo_dict[(begin_idx, end_idx)]
bond.SetStereoAtoms(stereo_atoms[0], stereo_atoms[1])
def update_molecule_property_cache(molecule):
try:
molecule.UpdatePropertyCache()
except:
pass
def assign_chiral_tags(
molecule, atoms_chiraltag, atom_index, chiral_type_list, flag_use_list
):
trials = [
Chem.rdchem.ChiralType.CHI_TETRAHEDRAL_CCW,
Chem.rdchem.ChiralType.CHI_TETRAHEDRAL_CW,
]
chis = OrderedDict()
for i, c in enumerate(atoms_chiraltag):
if c > 0:
chis.update({atom_index[i]: chiral_type_list[c]})
if chis:
if flag_use_list:
molecule_list = []
for prod in product(trials, repeat=len(chis)):
m = deepcopy(molecule)
for atIdx, chiral_tag in zip(chis.keys(), prod):
m.GetAtomWithIdx(atIdx).SetChiralTag(chiral_tag)
Chem.AssignStereochemistry(m)
matches = [chis[atIdx] == c for atIdx, c in Chem.FindMolChiralCenters(m)]
if all(matches):
if flag_use_list:
molecule_list.append(m)
else:
molecule = m
break
if flag_use_list:
molecule = molecule_list
else:
Chem.AssignStereochemistry(molecule)
if flag_use_list:
molecule = [molecule]
return molecule
def get_molecule_smiles(molecule, flag_kekulize, flag_use_list, add_h):
# Chem.AssignAtomChiralTagsFromStructure(molecule)
# molecule = AllChem.RemoveHs(molecule)
# return molecule
if flag_kekulize:
smiles = Chem.MolToSmiles(molecule, kekuleSmiles=True)
else:
# molecule = AllChem.RemoveHs(molecule)
if flag_use_list:
# smiles = [Chem.MolToSmiles(AllChem.RemoveHs(i)) for i in molecule]
if isinstance(molecule, list): # ?
molecule = [molecule]
if add_h:
molecule = [AllChem.AddHs(i) for i in molecule]
smiles = [Chem.MolToSmiles(i) for i in molecule]
else:
# molecule = AllChem.RemoveHs(molecule)
if add_h:
molecule = AllChem.AddHs(molecule)
smiles = Chem.MolToSmiles(molecule)
return smiles
def graph2mol(
adjacency_matrix,
atoms,
atoms_map=None,
atoms_chiraltag=None,
atoms_charge=None,
bond_stereo=None,
bond_stereo_dict=None,
atom_h_number=None,
add_h=False,
):
molecule, atom_index = create_molecule_with_atoms(
atoms, atoms_map, atoms_charge, atoms_chiraltag
)
add_bonds_to_molecule(molecule, atom_index, adjacency_matrix, bond_type_list)
if bond_stereo is not None:
set_bond_stereo(molecule, bond_stereo, bond_stereo_list, bond_stereo_dict)
if atom_h_number is not None:
set_h_number(molecule, atom_h_number)
molecule = molecule.GetMol()
update_molecule_property_cache(molecule)
if atoms_chiraltag is not None and flag_atoms_chiraltag == "new":
molecule = assign_chiral_tags(
molecule, atoms_chiraltag, atom_index, chiral_type_list, flag_use_list
)
smiles = get_molecule_smiles(molecule, flag_kekulize, flag_use_list, add_h)
return smiles
def get_InchiKey(smi):
if not smi:
return None
try:
mol = Chem.MolFromSmiles(smi)
except:
return None
if mol is None:
return None
try:
key = Chem.MolToInchiKey(mol)
return key
except:
return None
def judge_InchiKey(key1, key2):
if key1 is None or key2 is None:
return False
return key1 == key2
def same_smi(smi1, smi2):
key1 = get_InchiKey(smi1)
if key1 is None:
return False
key2 = get_InchiKey(smi2)
if key2 is None:
return False
return judge_InchiKey(key1, key2)
def get_charge_dict(smiles):
mol = Chem.MolFromSmiles(smiles)
charge_dict = {
atom.GetAtomMapNum(): atom.GetFormalCharge()
for atom in mol.GetAtoms()
if atom.GetAtomMapNum() != 0
}
return dict(sorted(charge_dict.items()))
def get_dict(path):
with open(path, "r") as f:
a = [i.strip() for i in f.readlines()]
return a
def get_canonical_smile(testsmi, isomericSmiles=True):
try:
mol = Chem.MolFromSmiles(testsmi)
return Chem.MolToSmiles(mol, isomericSmiles=isomericSmiles)
except:
print("Cannot convert {} to canonical smiles")
return testsmi
def error(testsmi):
try:
mol = Chem.MolFromSmiles(testsmi)
_ = Chem.MolToSmiles(mol)
return True
except:
return False
def drop_map(smiles):
mol = Chem.MolFromSmiles(smiles)
for atom in mol.GetAtoms():
atom.SetAtomMapNum(0)
return Chem.MolToSmiles(mol)
def setmap2smiles(smiles):
mol = Chem.MolFromSmiles(smiles)
mol = AllChem.RemoveHs(mol)
[atom.SetAtomMapNum(idx + 1) for idx, atom in enumerate(mol.GetAtoms())]
return Chem.MolToSmiles(mol)
def test(smiles):
mol = Chem.MolFromSmiles(smiles)
# mol = Chem.RemoveHs(mol)
# chis1 = list(Chem.FindMolChiralCenters(mol))
# print([bo.GetStereo() for bo in mol.GetBonds()])
# Draw.MolToFile(mol, "test1.png", (1000, 1000))
if flag_kekulize:
Chem.Kekulize(mol, clearAromaticFlags=True)
smiles_refined = Chem.MolToSmiles(mol, kekuleSmiles=True)
else:
if False:
[a.SetAtomMapNum(0) for a in mol.GetAtoms()]
smiles_refined = Chem.MolToSmiles(mol)
else:
smiles_refined = smiles
kwargs = get_adjacency_matrix(smiles_refined, add_h=None)
smiles2 = graph2mol(**kwargs)
# chis2 = list(Chem.FindMolChiralCenters(mol))
# print([tuple(bo.GetStereoAtoms()) for bo in mol.GetBonds()])
# Draw.MolToFile(mol, "test2.png", (1000, 1000))
if flag_use_list:
same = [same_smi(smiles_refined, i) for i in smiles2]
same = True if True in same else False
else:
same = same_smi(smiles_refined, smiles2)
if not same:
print("*" * 10)
print(smiles)
print(smiles_refined)
print(smiles2)
# print(chis1, chis2)
# draw_mol([smiles, smiles_refined, smiles2], "2.png")
return same
def test2(smiles, lis):
mol = Chem.MolFromSmiles(smiles)
if flag_kekulize:
Chem.Kekulize(mol, clearAromaticFlags=True)
atoms = [atom.GetSymbol() for atom in mol.GetAtoms()]
for i in atoms:
if i not in lis:
print(i)
return False
return True | 13,048 | Python | .py | 335 | 31.731343 | 101 | 0.661209 | dptech-corp/NAG2G | 8 | 4 | 2 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,232 | get_curve_plot.py | dptech-corp_NAG2G/NAG2G/utils/get_curve_plot.py | import numpy as np
import matplotlib.pyplot as plt
import os
import sys
def get_result(lines):
count = 0
want = ""
for i in lines:
if "strict" in i or "nodup_list_all" in i:
count += 1
if count == 10:
want = want + i
if count == 11:
break
want = want.replace("\n", " ").replace("\t", " ").replace("[", "").replace("]", "")
want = [float(i) for i in want.split() if i != "" and i != "strict"]
return want
def get(path, save_name="a.png"):
dirs = [i for i in os.listdir(path) if "checkpoint_" in i and ".pt" not in i]
idx = list(range(1, 11))
x = {}
for i in dirs:
iters = i.split("_")[-1]
if iters == "best" or iters == "last":
continue
path_new = os.path.join(path, i, "score")
if not os.path.exists(path_new):
path_new = os.path.join(path, i, "score.txt")
if os.path.exists(path_new):
with open(path_new, "r") as f:
lines = f.readlines()
x[int(iters)] = get_result(lines)
x = sorted(x.items(), key = lambda kv:(kv[1], kv[0]))
plt.title("retro")
[print(i) for i in x]
all = [i[1] for i in x]
x = [i[0] for i in x]
for i in range(0, len(x)):
if x[i] % 10000 == 0:
plt.plot(idx, all[i], label=str(x[i]))
plt.xlabel("top K")
plt.ylim(0, 1)
plt.ylabel("hit percentage")
plt.grid()
plt.yticks(np.arange(0, 1.0, 0.05))
plt.legend()
plt.savefig(save_name)
if __name__ == "__main__":
if len(sys.argv) < 2:
raise "ERROR"
path = sys.argv[1]
get(path, save_name=path + "/a.png")
| 1,674 | Python | .py | 52 | 25.307692 | 87 | 0.522896 | dptech-corp/NAG2G | 8 | 4 | 2 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,233 | draw_img.py | dptech-corp_NAG2G/NAG2G/utils/draw_img.py | from rdkit import Chem
try:
from rdkit.Chem import Draw
except:
print("can not import chem draw")
def draw_mol(smis, save_path, mols_per_row=4, img_size=(400, 400)):
mols = []
for smi in smis:
try:
mol = Chem.MolFromSmiles(smi)
except:
mol = None
mols.append(mol)
img = Draw.MolsToGridImage(
mols, molsPerRow=mols_per_row, subImgSize=img_size, legends=["" for x in mols]
)
img.save(save_path)
| 479 | Python | .py | 17 | 22.117647 | 86 | 0.619565 | dptech-corp/NAG2G | 8 | 4 | 2 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,234 | graph_process.py | dptech-corp_NAG2G/NAG2G/utils/graph_process.py | from rdkit import Chem
from rdkit.Chem import AllChem
import numpy as np
import pandas as pd
import random
import torch
import time
from tqdm import tqdm
from functools import lru_cache, wraps
# from scipy.sparse.csgraph import laplacian
import scipy.sparse as sparse
# allowable multiple choice node and edge features
allowable_features = {
"possible_atomic_num_list": list(range(1, 119)) + ["misc"],
"possible_chirality_list": [
"CHI_UNSPECIFIED",
"CHI_TETRAHEDRAL_CW",
"CHI_TETRAHEDRAL_CCW",
"CHI_TRIGONALBIPYRAMIDAL",
"CHI_OCTAHEDRAL",
"CHI_SQUAREPLANAR",
"CHI_OTHER",
"CHI_TETRAHEDRAL",
"CHI_ALLENE",
],
"possible_degree_list": list(range(99)) + ["misc"],
"possible_formal_charge_list": [i - 8 for i in range(17)] + ["misc"],
"possible_numH_list": [0, 1, 2, 3, 4, 5, 6, 7, 8, "misc"],
"possible_number_radical_e_list": [0, 1, 2, 3, 4, "misc"],
"possible_hybridization_list": [
"SP",
"SP2",
"SP3",
"SP3D",
"SP3D2",
"S",
"SP2D",
"OTHER",
"UNSPECIFIED",
"misc",
],
"possible_is_aromatic_list": [False, True],
"possible_is_in_ring_list": [False, True],
"possible_bond_type_list": [
"SINGLE",
"DOUBLE",
"TRIPLE",
"AROMATIC",
"UNSPECIFIED",
"QUADRUPLE",
"QUINTUPLE",
"HEXTUPLE",
"ONEANDAHALF",
"TWOANDAHALF",
"THREEANDAHALF",
"FOURANDAHALF",
"FIVEANDAHALF",
"IONIC",
"HYDROGEN",
"THREECENTER",
"DATIVEONE",
"DATIVE",
"DATIVEL",
"DATIVER",
"OTHER",
"ZERO",
"misc",
],
"possible_bond_stereo_list": [
"STEREONONE",
"STEREOZ",
"STEREOE",
"STEREOCIS",
"STEREOTRANS",
"STEREOANY",
],
"possible_is_conjugated_list": [False, True],
}
def fill_adj_matrix(result):
num_atoms = result["atoms"].shape[0]
adj_matrix = np.full((num_atoms, num_atoms), -1, dtype=result["edge_index"].dtype)
adj_matrix[result["edge_index"][0, :], result["edge_index"][1, :]] = result[
"edge_attr"
][:, 0]
assert np.array_equal(adj_matrix, adj_matrix.T)
return adj_matrix
def laplacian_pe_2(A, k, idx_type):
n = A.shape[0]
assert n> k and k <= 0
degree = A.sum(axis=-1)
return None, degree
def list_add(
degree_attn_list,
seq_list,
laplacian_attn_list,
token,
attn_adj_matrix,
min_node,
update_dict,
max_seq_len,
h_list,
degree_h_attn_list,
flag_atom,
flag_atom_list,
idx_type,
):
len_seq_list = len(seq_list)
if max_seq_len is not None and len_seq_list >= max_seq_len:
if len_seq_list > max_seq_len:
raise
return
seq_list.append(token)
if (
attn_adj_matrix is not None
and degree_attn_list is not None
and laplacian_attn_list is not None
):
list_add_pe(
degree_attn_list,
laplacian_attn_list,
attn_adj_matrix,
min_node,
update_dict,
idx_type=idx_type,
)
if h_list is not None and degree_h_attn_list is not None:
degree_h_attn_list.append(h_list.copy())
if flag_atom is not None and flag_atom_list is not None:
flag_atom_list.append(flag_atom.copy())
def list_add_pe(
degree_attn_list,
laplacian_attn_list,
attn_adj_matrix,
min_node,
update_dict,
idx_type,
):
if update_dict["is_A_updated"]:
result, degree = laplacian_pe_2(attn_adj_matrix, min_node, idx_type=idx_type)
update_dict["last_result"], update_dict["last_degree"] = result, degree
update_dict["is_A_updated"] = False
else:
result, degree = update_dict["last_result"], update_dict["last_degree"]
# degree_attn_list.append(attn_adj_matrix.sum(axis=-1))
degree_attn_list.append(degree)
if min_node > 0:
laplacian_attn_list.append(result)
def graph2seq_process(
result,
class_idx,
min_node,
want_attn=False,
want_charge_h=True,
max_seq_len=None,
sumto2=True,
use_sep2=False,
want_h_degree=False,
idx_type=0,
charge_h_last=False
):
if charge_h_last:
raise
update_dict = {
"is_A_updated": True,
"last_result": None,
"last_degree": None,
}
len_atoms = result["atoms"].shape[0]
seq_list = []
attn_adj_matrix = None
degree_attn_list = None
laplacian_attn_list = None
h_list = None
degree_h_attn_list = None
flag_atom = None
flag_atom_list = None
if want_attn:
N_node = max(len_atoms, min_node + 2)
# N_node = max(len_atoms, 250)
attn_adj_matrix = np.zeros([N_node, N_node], dtype=np.float32)
degree_attn_list = []
laplacian_attn_list = []
flag_atom = np.array([0] * N_node, dtype=int)
flag_atom_list = []
if want_h_degree:
h_list = np.array([0] * N_node, dtype=int)
degree_h_attn_list = []
dict_ = {
"degree_attn_list": degree_attn_list,
"seq_list": seq_list,
"laplacian_attn_list": laplacian_attn_list,
"attn_adj_matrix": attn_adj_matrix,
"min_node": min_node,
"update_dict": update_dict,
"max_seq_len": max_seq_len,
"h_list": h_list,
"degree_h_attn_list": degree_h_attn_list,
"flag_atom": flag_atom,
"flag_atom_list": flag_atom_list,
"idx_type": idx_type,
}
list_add(token="[CLS]", **dict_)
if class_idx is not None:
list_add(token="[class{}]".format(class_idx), **dict_)
adj_matrix = fill_adj_matrix(result)
map_flag = True
for i in range(len_atoms):
if use_sep2 and map_flag and result["atoms_map"][i] == 0:
map_flag = False
list_add(token="[SEP2]", **dict_)
if flag_atom is not None:
flag_atom[i] = 1
list_add(token=result["atoms"][i], **dict_)
if want_charge_h:
key = allowable_features["possible_formal_charge_list"][
result["node_attr"][i][3]
]
assert key != "misc"
if key != 0:
key = "(charge{})".format(key)
list_add(token=key, **dict_)
key = allowable_features["possible_numH_list"][result["node_attr"][i][4]]
assert key != "misc"
if key != 0:
if h_list is not None and h_list[i] == 0:
h_list[i] = key
key = "(H{})".format(key)
list_add(token=key, **dict_)
if i == 0:
continue
gap_count = 0
for j in range(i - 1, -1, -1):
if adj_matrix[i, j] == -1:
gap_count += 1
else:
if gap_count != 0:
list_add(token="[gap{}]".format(gap_count), **dict_)
gap_count = 0
key = allowable_features["possible_bond_type_list"][adj_matrix[i, j]]
assert key != "misc"
key = "[{}]".format(key)
if want_attn:
attn_adj_matrix[i, j] = 1
attn_adj_matrix[j, i] = 1
update_dict["is_A_updated"] = True
list_add(token=key, **dict_)
list_add(token="[SEP]", **dict_)
if want_attn:
node_in_list = ["[" not in i and "(" not in i for i in seq_list]
flag_atom_list = np.stack(flag_atom_list)
degree_attn_list = np.stack(degree_attn_list) + flag_atom_list
if want_h_degree:
degree_h_attn_list = np.stack(degree_h_attn_list)
degree_attn_list = degree_attn_list + degree_h_attn_list
degree_attn_list = degree_attn_list[:, : sum(node_in_list)]
degree_attn_mask = np.zeros([len(seq_list), len(seq_list)])
degree_attn_mask[:, node_in_list] = degree_attn_list
if min_node > 0:
laplacian_attn_list = np.stack(laplacian_attn_list)
laplacian_attn_list[flag_atom_list == 0] = 0
laplacian_attn_list = laplacian_attn_list[:, : sum(node_in_list)]
laplacian_attn_mask = np.zeros([len(seq_list), len(seq_list), min_node * 2])
laplacian_attn_mask[:, node_in_list, :] = laplacian_attn_list
laplacian_attn_mask = torch.tensor(laplacian_attn_mask, dtype=torch.float)
if sumto2:
laplacian_attn_mask = laplacian_attn_mask.reshape(
len(seq_list), len(seq_list), 2, min_node
).sum(dim=-1)
else:
laplacian_attn_mask = None
return {
"seq": seq_list,
"degree_attn_mask": torch.tensor(degree_attn_mask).long(),
"laplacian_attn_mask": laplacian_attn_mask,
}
else:
return {
"seq": seq_list,
}
def seq2graph(
seq_list,
min_node=None,
result=None,
want_attn=False,
):
if want_attn:
raise
atoms = [i for i in seq_list if "[" not in i and "(" not in i]
atoms_charge = np.array([0 for _ in atoms])
atoms_h = np.array([0 for _ in atoms])
atoms = np.array(atoms)
len_atoms = atoms.shape[0]
adj_matrix = (np.ones([len_atoms, len_atoms]) * (-1)).astype(np.int32)
i = -1
j = -1
for k in seq_list:
if (k in ["[CLS]", "[PAD]", "[UNK]", "[SEP2]"]) or ("class" in k):
pass
elif k in ["[SEP]"]:
break
elif "[" not in k and "(" not in k:
i += 1
j = i - 1
elif "(charge" in k:
key = int(k[7:-1])
if i >= 0 and atoms_charge[i] == 0:
atoms_charge[i] = key
elif "(H" in k:
key = int(k[2:-1])
if i >= 0 and atoms_h[i] == 0:
atoms_h[i] = key
elif "gap" in k:
j -= int(k[4:-1])
else:
bond_type_value = allowable_features["possible_bond_type_list"].index(
k[1:-1]
)
if i >= 0 and j >= 0:
adj_matrix[i, j] = bond_type_value
adj_matrix[j, i] = bond_type_value
j -= 1
if result is not None:
assert (result["atoms"] == atoms).all()
assert (adj_matrix == fill_adj_matrix(result)).all()
atoms_charge_tmp = np.array(
[
allowable_features["possible_formal_charge_list"].index(i)
for i in atoms_charge
]
)
atoms_h_tmp = np.array(
[allowable_features["possible_numH_list"].index(i) for i in atoms_h]
)
assert (atoms_charge_tmp == result["node_attr"][:, 3]).all()
assert (atoms_h_tmp == result["node_attr"][:, 4]).all()
return_dict = {
"atoms": atoms,
"adj_matrix": adj_matrix,
"atoms_charge": atoms_charge,
"atoms_h": atoms_h,
}
return return_dict
def process_one(smiles):
mol = Chem.MolFromSmiles(smiles)
atoms = np.array([x.GetSymbol() for x in mol.GetAtoms()])
atoms_map = np.array([x.GetAtomMapNum() for x in mol.GetAtoms()])
node_attr, edge_index, edge_attr = get_graph(mol)
return {
"atoms": atoms,
"atoms_map": atoms_map,
"smi": smiles,
"node_attr": node_attr,
"edge_index": edge_index,
"edge_attr": edge_attr,
}
def safe_index(l, e):
"""
Return index of element e in list l. If e is not present, return the last index
"""
try:
return l.index(e)
except:
print(l, e)
# raise
return len(l) - 1
def atom_to_feature_vector(atom):
"""
Converts rdkit atom object to feature list of indices
:param mol: rdkit atom object
:return: list
"""
atom_feature = [
safe_index(allowable_features["possible_atomic_num_list"], atom.GetAtomicNum()),
allowable_features["possible_chirality_list"].index(str(atom.GetChiralTag())),
safe_index(allowable_features["possible_degree_list"], atom.GetTotalDegree()),
safe_index(
allowable_features["possible_formal_charge_list"], atom.GetFormalCharge()
),
safe_index(allowable_features["possible_numH_list"], atom.GetTotalNumHs()),
safe_index(
allowable_features["possible_number_radical_e_list"],
atom.GetNumRadicalElectrons(),
),
safe_index(
allowable_features["possible_hybridization_list"],
str(atom.GetHybridization()),
),
allowable_features["possible_is_aromatic_list"].index(atom.GetIsAromatic()),
allowable_features["possible_is_in_ring_list"].index(atom.IsInRing()),
]
return atom_feature
def bond_to_feature_vector(bond):
"""
Converts rdkit bond object to feature list of indices
:param mol: rdkit bond object
:return: list
"""
bond_feature = [
safe_index(
allowable_features["possible_bond_type_list"], str(bond.GetBondType())
),
allowable_features["possible_bond_stereo_list"].index(str(bond.GetStereo())),
allowable_features["possible_is_conjugated_list"].index(bond.GetIsConjugated()),
]
return bond_feature
def get_graph(mol):
"""
Converts SMILES string to graph Data object
:input: SMILES string (str)
:return: graph object
"""
atom_features_list = []
for atom in mol.GetAtoms():
atom_features_list.append(atom_to_feature_vector(atom))
x = np.array(atom_features_list, dtype=np.int32)
# bonds
num_bond_features = 3 # bond type, bond stereo, is_conjugated
if len(mol.GetBonds()) > 0: # mol has bonds
edges_list = []
edge_features_list = []
for bond in mol.GetBonds():
i = bond.GetBeginAtomIdx()
j = bond.GetEndAtomIdx()
edge_feature = bond_to_feature_vector(bond)
# add edges in both directions
edges_list.append((i, j))
edge_features_list.append(edge_feature)
edges_list.append((j, i))
edge_features_list.append(edge_feature)
# data.edge_index: Graph connectivity in COO format with shape [2, num_edges]
edge_index = np.array(edges_list, dtype=np.int32).T
# data.edge_attr: Edge feature matrix with shape [num_edges, num_edge_features]
edge_attr = np.array(edge_features_list, dtype=np.int32)
else: # mol has no bonds
edge_index = np.empty((2, 0), dtype=np.int32)
edge_attr = np.empty((0, num_bond_features), dtype=np.int32)
return x, edge_index, edge_attr
def shuffle_graph_process(result, list_=None):
if list_ is None:
list_ = [i for i in range(result["atoms"].shape[0])]
random.shuffle(list_)
result["atoms"] = result["atoms"][list_]
result["atoms_map"] = result["atoms_map"][list_]
result["node_attr"] = result["node_attr"][list_]
list_reverse = {i: idx for idx, i in enumerate(list_)}
for i in range(result["edge_index"].shape[0]):
for j in range(result["edge_index"].shape[1]):
result["edge_index"][i, j] = list_reverse[result["edge_index"][i, j]]
return result
| 15,340 | Python | .py | 449 | 25.824053 | 88 | 0.56205 | dptech-corp/NAG2G | 8 | 4 | 2 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,235 | save_config.py | dptech-corp_NAG2G/NAG2G/utils/save_config.py | import configparser
list_ = [
"batch_size",
"batch_size_valid",
"data",
"tensorboard_logdir",
"bf16",
"num_workers",
"required_batch_size_multiple",
"valid_subset",
"label_prob",
"mid_prob",
"mid_upper",
"mid_lower",
"plddt_loss_weight",
"pos_loss_weight",
"shufflegraph",
"infer_save_name",
"decoder_attn_from_loader",
"infer_step",
"config_file",
"path",
"results_path",
"beam_size",
"search_strategies",
"len_penalty",
"temperature",
"beam_size_second",
"beam_head_second",
"nprocs_per_node",
"data_buffer_size",
"distributed_rank",
"distributed_port",
"distributed_world_size",
"distributed_backend",
"distributed_init_method",
"distributed_no_spawn",
"lr_shrink"
]
def add_config_save_args(parser):
parser.add_argument(
"--config_file",
type=str,
default="",
help="Path to configuration file",
)
def save_config(args):
save_path = args.config_file
if save_path == "":
return
args_dict = vars(args)
args_dict = {k: str(v) for k, v in args_dict.items()}
config = configparser.ConfigParser()
config.read_dict({"DEFAULT": args_dict})
with open(save_path, "w") as f:
config.write(f)
def read_config(args):
if args.config_file != "":
config = configparser.ConfigParser()
config.read(args.config_file)
for arg in vars(args):
if arg in list_:
continue
value = config["DEFAULT"].get(arg)
if arg == "noise_scale":
value = 0.0
type_arg = type(getattr(args, arg))
if type_arg == type(None):
if arg in [
"log_format",
"distributed_init_method",
"path",
"results_path",
]:
type_arg = type("abc")
elif arg in [
"fp16_scale_window",
"batch_size",
"fixed_validation_seed",
"batch_size_valid",
"max_valid_steps",
"force_anneal",
]:
type_arg = type(123)
elif arg in ["threshold_loss_scale"]:
type_arg = type(1.2)
else:
raise
if value is not None and value != "None":
# print(arg, type_arg, value)
if type_arg == type(True):
if value == "True":
setattr(args, arg, True)
elif value == "False":
setattr(args, arg, False)
else:
setattr(args, arg, type_arg(value))
return args
| 2,862 | Python | .py | 98 | 18.979592 | 57 | 0.492017 | dptech-corp/NAG2G | 8 | 4 | 2 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,236 | new_multihead_attention.py | dptech-corp_NAG2G/NAG2G/decoder/new_multihead_attention.py | from typing import Dict, Optional
import torch
from torch import Tensor, nn
from unicore.modules import softmax_dropout
class NewSelfMultiheadAttention(nn.Module):
def __init__(
self,
embed_dim,
num_heads,
dropout=0.1,
bias=True,
scaling_factor=1,
reduced_head_dim=4,
q_reduced_before=False,
):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert (
self.head_dim * num_heads == self.embed_dim
), "embed_dim must be divisible by num_heads"
self.scaling = (self.head_dim * scaling_factor) ** -0.5
self.reduced_head_dim = reduced_head_dim
self.q_reduced_before = q_reduced_before
if self.q_reduced_before:
self.reduced_head_dim_proj = nn.Linear(embed_dim, self.num_heads * self.reduced_head_dim, bias=bias)
else:
self.reduced_head_dim_proj = nn.Linear(self.head_dim, self.reduced_head_dim, bias=bias)
self.in_proj = nn.Linear(embed_dim, embed_dim * 3, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
def forward(
self,
query,
k_dynamic_T: Optional[Tensor] = None,
key_padding_mask: Optional[Tensor] = None,
attn_bias: Optional[Tensor] = None,
return_attn: bool = False,
) -> Tensor:
bsz, tgt_len, embed_dim = query.size()
assert embed_dim == self.embed_dim
q, k, v = self.in_proj(query).chunk(3, dim=-1)
if k_dynamic_T is not None and self.q_reduced_before:
# q_reduced = self.reduced_head_dim_proj(q)
q_reduced = self.reduced_head_dim_proj(query)
q_reduced = (
q_reduced.view(bsz, tgt_len, self.num_heads, self.reduced_head_dim)
.transpose(1, 2)
.contiguous()
.view(-1, self.reduced_head_dim).unsqueeze(1)
)
q = (
q.view(bsz, tgt_len, self.num_heads, self.head_dim)
.transpose(1, 2)
.contiguous()
.view(bsz * self.num_heads, -1, self.head_dim)
)
if k_dynamic_T is not None:
# [batchsize * h, tgt_len, head_dim] -> [batchsize * h * tgt_len, 1, reduced_head_dim]
if not self.q_reduced_before:
q_reduced = self.reduced_head_dim_proj(q).view(-1, self.reduced_head_dim).unsqueeze(1)
attn_weights_dynamic = torch.bmm(q_reduced, k_dynamic_T).view(-1, tgt_len, tgt_len)
q = q * self.scaling
if k is not None:
k = (
k.view(bsz, -1, self.num_heads, self.head_dim)
.transpose(1, 2)
.contiguous()
.view(bsz * self.num_heads, -1, self.head_dim)
)
if v is not None:
v = (
v.view(bsz, -1, self.num_heads, self.head_dim)
.transpose(1, 2)
.contiguous()
.view(bsz * self.num_heads, -1, self.head_dim)
)
assert k is not None
src_len = k.size(1)
# This is part of a workaround to get around fork/join parallelism
# not supporting Optional types.
if key_padding_mask is not None and key_padding_mask.dim() == 0:
key_padding_mask = None
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
attn_weights = torch.bmm(q, k.transpose(1, 2))
if k_dynamic_T is not None:
attn_weights = attn_weights + attn_weights_dynamic
assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len]
if key_padding_mask is not None:
# don't attend to padding symbols
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights.masked_fill_(
key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool), float("-inf")
)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if not return_attn:
attn = softmax_dropout(
attn_weights, self.dropout, self.training, bias=attn_bias,
)
else:
attn_weights += attn_bias
attn = softmax_dropout(
attn_weights, self.dropout, self.training, inplace=False,
)
o = torch.bmm(attn, v)
assert list(o.size()) == [bsz * self.num_heads, tgt_len, self.head_dim]
o = (
o.view(bsz, self.num_heads, tgt_len, self.head_dim)
.transpose(1, 2)
.contiguous()
.view(bsz, tgt_len, embed_dim)
)
o = self.out_proj(o)
if not return_attn:
return o
else:
return o, attn_weights, attn | 5,017 | Python | .py | 120 | 30.5 | 112 | 0.552184 | dptech-corp/NAG2G | 8 | 4 | 2 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,237 | transformer_decoder.py | dptech-corp_NAG2G/NAG2G/decoder/transformer_decoder.py | # Copyright (c) DP Technology.
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from unicore.modules import LayerNorm
from unicore.modules.transformer_encoder import relative_position_bucket
from . import TransformerDecoderLayer
def fill_with_neg_inf(t):
return t.fill_(float("-inf"))
def bulid_future_mask(seq_len):
return torch.triu(
fill_with_neg_inf(torch.zeros([seq_len, seq_len])), 1
)
class TransformerDecoder(nn.Module):
def __init__(
self,
decoder_layers: int = 6,
embed_dim: int = 768,
ffn_embed_dim: int = 3072,
attention_heads: int = 8,
emb_dropout: float = 0.1,
dropout: float = 0.1,
attention_dropout: float = 0.1,
activation_dropout: float = 0.0,
max_seq_len: int = 256,
activation_fn: str = "gelu",
rel_pos: bool = True,
rel_pos_bins: int = 32,
max_rel_pos: int = 128,
post_ln: bool = False,
auto_regressive: bool = True,
reduced_head_dim = 4,
q_reduced_before = False,
want_emb_k_dynamic_proj = False,
want_emb_k_dynamic_dropout = True,
) -> None:
super().__init__()
self.emb_dropout = emb_dropout
self.max_seq_len = max_seq_len
self.embed_dim = embed_dim
self.attention_heads = attention_heads
self.emb_layer_norm = LayerNorm(self.embed_dim)
self.auto_regressive = auto_regressive
if self.auto_regressive:
self._future_mask = bulid_future_mask(self.max_seq_len)
else:
self._future_mask = None
if not post_ln:
self.final_layer_norm = LayerNorm(self.embed_dim)
else:
self.final_layer_norm = None
self.reduced_head_dim = reduced_head_dim
self.k_dynamic_scaling = self.reduced_head_dim ** -0.5
self.emb_k_dynamic_layer_norm = LayerNorm(self.attention_heads * self.reduced_head_dim)
self.want_emb_k_dynamic_dropout = want_emb_k_dynamic_dropout
self.want_emb_k_dynamic_proj = want_emb_k_dynamic_proj
if self.want_emb_k_dynamic_proj:
self.emb_k_dynamic_proj = nn.Linear(self.attention_heads * reduced_head_dim, self.attention_heads * reduced_head_dim, bias=True)
self.layers = nn.ModuleList(
[
TransformerDecoderLayer(
embed_dim=self.embed_dim,
ffn_embed_dim=ffn_embed_dim,
attention_heads=attention_heads,
dropout=dropout,
attention_dropout=attention_dropout,
activation_dropout=activation_dropout,
activation_fn=activation_fn,
post_ln=post_ln,
reduced_head_dim=self.reduced_head_dim,
q_reduced_before=q_reduced_before
)
for _ in range(decoder_layers)
]
)
self.rel_pos = rel_pos
if self.rel_pos:
assert rel_pos_bins % 2 == 0
self.rel_pos_bins = rel_pos_bins
self.max_rel_pos = max_rel_pos
self.relative_attention_bias = nn.Embedding(
self.rel_pos_bins, self.attention_heads)
seq_len = self.max_seq_len
context_position = torch.arange(seq_len, dtype=torch.long)[:, None]
memory_position = torch.arange(seq_len, dtype=torch.long)[None, :]
relative_position = memory_position - context_position
self.rp_bucket = relative_position_bucket(
relative_position,
num_buckets=self.rel_pos_bins,
max_distance=self.max_rel_pos
)
self.rp_bucket -= self.rp_bucket.min()
def get_rel_pos_bias(self, x):
# Assume the input is ordered. If your input token is permuted, you may need to update this accordingly
if self.rp_bucket.device != x.device:
self.rp_bucket = self.rp_bucket.to(x.device)
seq_len = x.size(1)
rp_bucket = self.rp_bucket[:seq_len, :seq_len]
values = F.embedding(rp_bucket, self.relative_attention_bias.weight)
values = values.permute([2, 0, 1])
return values.contiguous()
def get_future_mask(self, x, attn_mask):
if not self.auto_regressive:
return attn_mask
if self._future_mask.device != x.device:
self._future_mask = self._future_mask.to(x.device)
if self._future_mask.dtype != x.dtype:
self._future_mask = self._future_mask.type_as(x)
if attn_mask is None:
ret = self._future_mask[:x.size(1), :x.size(1)]
ret = ret.contiguous().unsqueeze(0).repeat(
x.size(0)*self.attention_heads, 1, 1)
return ret
else:
assert list(attn_mask.size()) == [x.size(
0) * self.attention_heads, x.size(1), x.size(1)]
return attn_mask + self._future_mask[:x.size(1), :x.size(1)]
def forward(
self,
emb,
emb_k_dynamic: Optional[torch.Tensor] = None,
encoder_out: Optional[torch.Tensor] = None,
padding_mask: Optional[torch.Tensor] = None,
encoder_padding_mask: Optional[torch.Tensor] = None,
attn_mask: Optional[torch.Tensor] = None,
encoder_attn_mask: Optional[torch.Tensor] = None,
) -> torch.Tensor:
seq_len = emb.size(1)
x = self.emb_layer_norm(emb)
x = F.dropout(x, p=self.emb_dropout, training=self.training)
# account for padding while computing the representation
if padding_mask is not None:
x = x * (1 - padding_mask.unsqueeze(-1).type_as(x))
if emb_k_dynamic is not None:
bsz = emb_k_dynamic.shape[0]
emb_k_dynamic = self.emb_k_dynamic_layer_norm(emb_k_dynamic)
if self.want_emb_k_dynamic_dropout:
emb_k_dynamic = F.dropout(emb_k_dynamic, p=self.emb_dropout, training=self.training)
if self.want_emb_k_dynamic_proj:
emb_k_dynamic = self.emb_k_dynamic_proj(emb_k_dynamic)
# [batchsize, n, n, head_dim * reduced_head_dim] -> [batchsize * h * seq_len, seq_len, reduced_head_dim]
emb_k_dynamic = (
emb_k_dynamic.view(bsz, seq_len, seq_len, self.attention_heads, self.reduced_head_dim)
.permute(0,3,1,2,4)
.contiguous()
.view(bsz * self.attention_heads * seq_len, seq_len, self.reduced_head_dim)
.transpose(1,2)
) * self.k_dynamic_scaling
rel_pos_bias = self.get_rel_pos_bias(x).repeat(
x.size(0), 1, 1) if self.rel_pos else None
if attn_mask is None:
attn_mask = rel_pos_bias
elif rel_pos_bias is not None:
attn_mask += rel_pos_bias
if self.auto_regressive:
attn_mask = self.get_future_mask(x, attn_mask)
if attn_mask is not None and padding_mask is not None:
# merge key_padding_mask and attn_mask
attn_mask = attn_mask.view(x.size(0), -1, seq_len, seq_len)
attn_mask.masked_fill_(
padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool),
float("-inf")
)
attn_mask = attn_mask.view(-1, seq_len, seq_len)
padding_mask = None
for layer in self.layers:
x = layer(x, k_dynamic_T=emb_k_dynamic, encoder_out=encoder_out, padding_mask=padding_mask, attn_bias=attn_mask,
encoder_padding_mask=encoder_padding_mask, encoder_attn_bias=encoder_attn_mask)
if self.final_layer_norm is not None:
x = self.final_layer_norm(x)
return x
| 7,986 | Python | .py | 176 | 34.454545 | 140 | 0.590366 | dptech-corp/NAG2G | 8 | 4 | 2 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,238 | transformer_decoder_layer.py | dptech-corp_NAG2G/NAG2G/decoder/transformer_decoder_layer.py | # Copyright (c) DP Technology.
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict, Optional
import torch
import torch.nn.functional as F
from unicore import utils
from torch import nn
from unicore.modules import LayerNorm, SelfMultiheadAttention, CrossMultiheadAttention
from . import NewSelfMultiheadAttention
class TransformerDecoderLayer(nn.Module):
"""
Implements a Transformer Encoder Layer used in BERT/XLM style pre-trained
models.
"""
def __init__(
self,
embed_dim: int = 768,
ffn_embed_dim: int = 3072,
attention_heads: int = 8,
dropout: float = 0.1,
attention_dropout: float = 0.1,
activation_dropout: float = 0.0,
activation_fn: str = "gelu",
post_ln = False,
reduced_head_dim=4,
q_reduced_before=False,
) -> None:
super().__init__()
# Initialize parameters
self.embed_dim = embed_dim
self.attention_heads = attention_heads
self.attention_dropout = attention_dropout
self.dropout = dropout
self.activation_dropout = activation_dropout
self.activation_fn = utils.get_activation_fn(activation_fn)
self.self_attn = NewSelfMultiheadAttention(
self.embed_dim,
attention_heads,
dropout=attention_dropout,
reduced_head_dim=reduced_head_dim,
q_reduced_before=q_reduced_before
)
# layer norm associated with the self attention layer
self.self_attn_layer_norm = LayerNorm(self.embed_dim)
self.encoder_attn = CrossMultiheadAttention(
self.embed_dim,
attention_heads,
dropout=attention_dropout,
)
# layer norm associated with the self attention layer
self.encoder_attn_layer_norm = LayerNorm(self.embed_dim)
self.fc1 = nn.Linear(self.embed_dim, ffn_embed_dim)
self.fc2 = nn.Linear(ffn_embed_dim, self.embed_dim)
self.final_layer_norm = LayerNorm(self.embed_dim)
self.post_ln = post_ln
def forward(
self,
x: torch.Tensor,
k_dynamic_T: Optional[torch.Tensor] = None,
encoder_out:torch.Tensor=None,
attn_bias: Optional[torch.Tensor] = None,
padding_mask: Optional[torch.Tensor] = None,
encoder_attn_bias: Optional[torch.Tensor] = None,
encoder_padding_mask: Optional[torch.Tensor] = None,
) -> torch.Tensor:
"""
LayerNorm is applied either before or after the self-attention/ffn
modules similar to the original Transformer implementation.
"""
residual = x
if not self.post_ln:
x = self.self_attn_layer_norm(x)
# new added
x = self.self_attn(
query=x,
k_dynamic_T=k_dynamic_T,
key_padding_mask=padding_mask,
attn_bias=attn_bias,
)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
if self.post_ln:
x = self.self_attn_layer_norm(x)
if encoder_out is not None:
residual = x
if not self.post_ln:
x = self.encoder_attn_layer_norm(x)
x = self.encoder_attn(
query=x,
key=encoder_out,
value=encoder_out,
key_padding_mask=encoder_padding_mask,
attn_bias=encoder_attn_bias,
)
#x = self.dropout_module(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
if self.post_ln:
x = self.encoder_attn_layer_norm(x)
residual = x
if not self.post_ln:
x = self.final_layer_norm(x)
x = self.fc1(x)
x = self.activation_fn(x)
x = F.dropout(x, p=self.activation_dropout, training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
if self.post_ln:
x = self.final_layer_norm(x)
return x
| 4,261 | Python | .py | 114 | 28.026316 | 86 | 0.605627 | dptech-corp/NAG2G | 8 | 4 | 2 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,239 | __init__.py | dptech-corp_NAG2G/NAG2G/decoder/__init__.py | from .new_multihead_attention import NewSelfMultiheadAttention
from .transformer_decoder_layer import TransformerDecoderLayer
from .transformer_decoder import TransformerDecoder
| 178 | Python | .py | 3 | 58.333333 | 62 | 0.902857 | dptech-corp/NAG2G | 8 | 4 | 2 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,240 | init_method.py | dptech-corp_NAG2G/NAG2G/modules/init_method.py | from torch.nn.init import xavier_uniform_
def init_xavier_params(module):
for p in module.parameters():
if p.dim() > 1:
xavier_uniform_(p)
for p in module.parameters():
if p.dim() > 1:
xavier_uniform_(p)
| 254 | Python | .py | 8 | 24.5 | 41 | 0.594262 | dptech-corp/NAG2G | 8 | 4 | 2 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,241 | heads.py | dptech-corp_NAG2G/NAG2G/modules/heads.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from unicore import utils
from unicore.modules import LayerNorm
import logging
logger = logging.getLogger(__name__)
class MaskLMHead(nn.Module):
"""Head for masked language modeling."""
def __init__(self, embed_dim, output_dim, activation_fn, weight=None):
super().__init__()
self.dense = nn.Linear(embed_dim, embed_dim)
self.activation_fn = utils.get_activation_fn(activation_fn)
self.layer_norm = LayerNorm(embed_dim)
if weight is None:
weight = nn.Linear(embed_dim, output_dim, bias=False).weight
self.weight = weight
self.bias = nn.Parameter(torch.zeros(output_dim))
def forward(self, features, masked_tokens=None, **kwargs):
# Only project the masked tokens while training,
# saves both memory and computation
if masked_tokens is not None:
features = features[masked_tokens, :]
x = self.dense(features)
x = self.activation_fn(x)
x = self.layer_norm(x)
# project back to size of vocabulary with bias
x = F.linear(x, self.weight) + self.bias
return x
class ClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(
self,
input_dim,
inner_dim,
num_classes,
activation_fn,
pooler_dropout,
):
super().__init__()
self.dense = nn.Linear(input_dim, inner_dim)
self.activation_fn = utils.get_activation_fn(activation_fn)
self.dropout = nn.Dropout(p=pooler_dropout)
self.out_proj = nn.Linear(inner_dim, num_classes)
def forward(self, features, **kwargs):
x = features[:, 0, :] # take <s> token (equiv. to [CLS])
x = self.dropout(x)
x = self.dense(x)
x = self.activation_fn(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
class NonLinearHead(nn.Module):
"""Head for simple classification tasks."""
def __init__(
self,
input_dim,
out_dim,
activation_fn,
hidden=None,
):
super().__init__()
hidden = input_dim if not hidden else hidden
self.linear1 = nn.Linear(input_dim, hidden)
self.linear2 = nn.Linear(hidden, out_dim)
self.activation_fn = utils.get_activation_fn(activation_fn)
def forward(self, x):
x = self.linear1(x)
x = self.activation_fn(x)
x = self.linear2(x)
return x
| 2,550 | Python | .py | 71 | 28.323944 | 74 | 0.617946 | dptech-corp/NAG2G | 8 | 4 | 2 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,242 | attn_bias_layer.py | dptech-corp_NAG2G/NAG2G/modules/attn_bias_layer.py | import torch
import torch.nn as nn
from functools import lru_cache
import numpy as np
import time
@lru_cache(maxsize=2)
def laplacian_pe_batch(A, k, idx_type=0):
assert len(A.shape) == 3
B, n, _ = A.shape
assert n > k and k <= 0
degree = A.sum(axis=-1)
return None, degree
class seq2attn: # (nn.Module):
def __init__(self, min_node, sumto2, dictionary=None, want_h_degree=False, idx_type=0, use_class=False):
self.min_node = min_node
self.sumto2 = sumto2
self.dictionary = dictionary
assert dictionary is not None
self.get_token_indexs(dictionary)
self.want_h_degree = want_h_degree
self.idx_type = idx_type
self.use_class = use_class
def get_token_indexs(self, dictionary):
token_categories = {
"special": ["[CLS]", "[PAD]", "[UNK]", "[SEP]"],
"charge": ["(charge"],
"hydrogen": ["(H"],
"sep2": ["[SEP2]"],
"gap": ["[gap"],
"react_class": ["[class"],
"bond": ["["],
"atom": [""],
}
for category in token_categories.keys():
setattr(self, category, [])
for k, v in dictionary.indices.items():
for category, tokens in token_categories.items():
if any(token in k for token in tokens):
getattr(self, category).append(v)
break
for category in token_categories.keys():
category_list = getattr(self, category)
if category_list:
assert max(category_list) - min(category_list) + 1 == len(category_list)
setattr(self, category, [min(category_list), max(category_list)])
else:
raise
def new_status_get(self, seq, i_j, attn_adj_matrix, h_degree):
k = seq[:, -1].detach().cpu().numpy()
flag_atom = np.logical_and(self.atom[0] <= k, k <= self.atom[1])
i_j[flag_atom, 1] = i_j[flag_atom, 0]
i_j[flag_atom, 0] = i_j[flag_atom, 0] + 1
flag_gap = np.logical_and(self.gap[0] <= k, k <= self.gap[1])
i_j[flag_gap, 1] -= k[flag_gap] - self.gap[0] + 1
flag_bond = np.logical_and(self.bond[0] <= k, k <= self.bond[1])
flag_bond2 = np.logical_and(i_j[:, 0] >= 0, i_j[:, 1] >= 0)
flag_bond = np.logical_and(flag_bond, flag_bond2)
attn_adj_matrix[flag_bond, i_j[flag_bond, 0], i_j[flag_bond, 1]] = 1
attn_adj_matrix[flag_bond, i_j[flag_bond, 1], i_j[flag_bond, 0]] = 1
i_j[flag_bond, 1] -= 1
flag_h = np.logical_and(self.hydrogen[0] <= k, k <= self.hydrogen[1])
flag_h2 = np.logical_and(
i_j[:, 0] >= 0, h_degree[np.arange(k.shape[0]), i_j[:, 0]] == 0
)
flag_h = np.logical_and(flag_h, flag_h2)
h_degree[flag_h, i_j[flag_h, 0]] = k[flag_h] - self.hydrogen[0]
def update_status(self, seq, idx, N_node):
device = seq.device
seq_shape_0 = seq.shape[0]
if seq.shape[1] == 1 or (seq.shape[1] == 2 and self.use_class is True):
i_j = np.full((seq_shape_0, 2), -1, dtype=np.int32)
attn_adj_matrix = np.zeros([seq_shape_0, N_node, N_node])
h_degree = np.zeros([seq_shape_0, N_node])
else:
i_j = self.i_j[idx]
attn_adj_matrix = self.attn_adj_matrix[idx]
h_degree = self.h_degree[idx]
if N_node > attn_adj_matrix.shape[1]:
attn_adj_matrix = np.pad(
attn_adj_matrix,
(
(0, 0),
(0, N_node - attn_adj_matrix.shape[1]),
(0, N_node - attn_adj_matrix.shape[2]),
),
mode="constant",
)
h_degree = np.pad(
h_degree, ((0, 0), (0, N_node - h_degree.shape[1])), mode="constant"
)
self.new_status_get(seq, i_j, attn_adj_matrix, h_degree)
self.i_j = i_j
self.attn_adj_matrix = attn_adj_matrix
self.h_degree = h_degree
return torch.tensor(
attn_adj_matrix, device=device, dtype=torch.float
), torch.tensor(h_degree, device=device, dtype=torch.float)
def set_attn_bias(self, seq, idx_list):
seq_shape = seq.shape
device = seq.device
degree_attn_mask = torch.zeros(
[seq_shape[0], seq_shape[1], seq_shape[1]], device=device
)
laplacian_attn_mask = None
if self.min_node > 0:
if self.sumto2:
laplacian_attn_mask = torch.zeros(
[seq_shape[0], seq_shape[1], seq_shape[1], 2], device=device
)
else:
laplacian_attn_mask = torch.zeros(
[seq_shape[0], seq_shape[1], seq_shape[1], 2 * self.min_node],
device=device,
)
if idx_list is not None:
degree_attn_mask[:, :-1, :-1] = self.current_degree_attn_mask[idx_list]
if laplacian_attn_mask is not None:
laplacian_attn_mask[:, :-1, :-1] = self.current_laplacian_attn_mask[
idx_list
]
return degree_attn_mask, laplacian_attn_mask
def forward(self, seq):
# start = time.time()
seq_tmp = seq.cpu().detach().numpy().tolist()
idx_list = None
if seq.shape[1] > 2 or (seq.shape[1] > 1 and self.use_class is False):
idx_list = [
self.current_seq.index(hash(str(seq_tmp[i][:-1])))
for i in range(seq.shape[0])
]
self.current_seq = [hash(str(seq_tmp[i])) for i in range(seq.shape[0])]
node_in_list = (seq >= self.atom[0]) & (seq <= self.atom[1])
len_atoms = node_in_list.sum(-1)
N_node = max(len_atoms.max().item(), self.min_node + 2)
# N_node = max(len_atoms.max().item(), 250)
batch_attn_adj_matrix, h_degree = self.update_status(seq, idx_list, N_node)
laplacian_attn_list, degree_attn_list = laplacian_pe_batch(
batch_attn_adj_matrix, self.min_node, idx_type=self.idx_type
)
degree_attn_list = degree_attn_list + 1
if self.want_h_degree:
degree_attn_list = degree_attn_list + h_degree
if self.min_node > 0 and self.sumto2:
laplacian_attn_list = laplacian_attn_list.reshape(
laplacian_attn_list.shape[0],
laplacian_attn_list.shape[1],
2,
self.min_node,
).sum(dim=-1)
degree_attn_mask, laplacian_attn_mask = self.set_attn_bias(seq, idx_list)
degree_tmp = torch.cat(
[degree_attn_list[i, : len_atoms[i]] for i in range(seq.shape[0])], 0
)
degree_attn_mask[:, -1][node_in_list] = degree_tmp
flag_pad = seq[:, -1] == self.dictionary.pad()
degree_attn_mask[flag_pad, -1] = 0
self.current_degree_attn_mask = degree_attn_mask
if self.min_node > 0:
laplacian_tmp = torch.cat(
[laplacian_attn_list[i, : len_atoms[i]] for i in range(seq.shape[0])], 0
)
laplacian_attn_mask[:, -1][node_in_list] = laplacian_tmp
laplacian_attn_mask[flag_pad, -1] = 0
self.current_laplacian_attn_mask = laplacian_attn_mask
return {
"decoder_degree_attn_mask": degree_attn_mask.long(),
"decoder_laplacian_attn_mask": laplacian_attn_mask,
}
def forward_train(self, seq):
for i in range(1, seq.shape[1]):
_ = self.forward(seq[:, :i])
result = self.forward(seq)
return result
| 7,731 | Python | .py | 173 | 33.219653 | 108 | 0.530591 | dptech-corp/NAG2G | 8 | 4 | 2 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,243 | __init__.py | dptech-corp_NAG2G/NAG2G/modules/__init__.py | from .init_method import init_xavier_params
from .heads import MaskLMHead, ClassificationHead, NonLinearHead
from .freeze_network import freeze_network
from .attn_bias_layer import seq2attn
| 190 | Python | .py | 4 | 46.5 | 64 | 0.854839 | dptech-corp/NAG2G | 8 | 4 | 2 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,244 | NAG2G.py | dptech-corp_NAG2G/NAG2G/losses/NAG2G.py | import math
import torch
import torch.nn.functional as F
from unicore import metrics, utils
from unicore.losses import UnicoreLoss, register_loss
import torch.nn as nn
import torch.distributed as dist
def get_loss(logits_decoder, decoder_target, padding_idx):
decoder_target = decoder_target[:, 1:]
logits_decoder = logits_decoder[:, :-1]
decode_tokens = decoder_target.ne(padding_idx)
decoder_sample_size = decode_tokens.long().sum()
decoder_loss = F.nll_loss(
F.log_softmax(
logits_decoder[decode_tokens], dim=-1, dtype=torch.float32),
decoder_target[decode_tokens].view(-1),
ignore_index=padding_idx,
reduction='mean',
)
decoder_pred = torch.argmax(
logits_decoder[decode_tokens], dim=-1)
decoder_hit = (decoder_pred == decoder_target[decode_tokens]).long().sum()
decoder_cnt = decoder_sample_size
acc_sentence_count = []
for i in range(decoder_target.shape[0]):
decoder_cnt_per_sen = decode_tokens[i].long().sum()
decoder_pred_per_sen = torch.argmax(
logits_decoder[i][decode_tokens[i]], dim=-1)
decoder_hit_per_sen = (decoder_pred_per_sen ==
decoder_target[i][decode_tokens[i]]).long().sum()
acc_sentence_count.append(decoder_hit_per_sen == decoder_cnt_per_sen)
acc_sentence_count = (sum(acc_sentence_count), len(acc_sentence_count))
return decoder_loss, decoder_hit, decoder_cnt, acc_sentence_count
@register_loss("NAG2GF")
class NAG2GFLoss(UnicoreLoss):
def __init__(self, task):
super().__init__(task)
self.padding_idx = task.dictionary.pad()
def forward(self, model, sample, reduce=True):
def inner_forward(input_key='net_input', target_key='target'):
logits_encoder, logits_decoder, cl_out, vae_kl_loss = model(
**sample[input_key], features_only=True)
loss = torch.tensor(0.0)
logging_output = {
"sample_size": 1,
"bsz": sample[input_key]['src_tokens'].size(0),
"seq_len": sample[input_key]['src_tokens'].size(1) * sample[input_key]['src_tokens'].size(0),
}
if logits_decoder is not None:
decoder_target = sample[input_key]['decoder_src_tokens']
decoder_loss, decoder_hit, decoder_cnt, acc_sentence_count = get_loss(
logits_decoder, decoder_target, self.padding_idx)
loss = decoder_loss * self.args.decoder_loss
logging_output = {
"sample_size": 1,
"bsz": sample[input_key]['src_tokens'].size(0),
"seq_len": sample[input_key]['src_tokens'].size(1) * sample[input_key]['src_tokens'].size(0),
"decoder_loss": decoder_loss.data,
"decoder_hit": decoder_hit.data,
"decoder_cnt": decoder_cnt.data,
"acc_sentence_hit": acc_sentence_count[0],
"acc_sentence_cnt": acc_sentence_count[1],
}
logging_output['loss'] = loss.data
return loss, 1, logging_output, cl_out
loss, sample_size, logging_output, cls_repr = inner_forward()
return loss, sample_size, logging_output
@staticmethod
def reduce_metrics(logging_outputs, split='valid') -> None:
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
bsz = sum(log.get("bsz", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
seq_len = sum(log.get("seq_len", 0) for log in logging_outputs)
metrics.log_scalar(
"loss", loss_sum / sample_size, sample_size, round=5
)
metrics.log_scalar(
"seq_len", seq_len / bsz, 1, round=3
)
decoder_loss = sum(log.get('decoder_loss', 0)
for log in logging_outputs)
if decoder_loss > 0:
metrics.log_scalar('decoder_loss', decoder_loss /
sample_size, sample_size, round=5)
decoder_acc = sum(log.get('decoder_hit', 0) for log in logging_outputs) / \
sum(log.get('decoder_cnt', 1) for log in logging_outputs)
if decoder_acc > 0:
metrics.log_scalar(
'decoder_acc', decoder_acc, sample_size, round=5)
decoder_cnt_t = sum(log.get('decoder_cnt', 1)
for log in logging_outputs)
decoder_ppl = math.exp(min(decoder_loss / decoder_cnt_t, 100))
if decoder_ppl > 0:
metrics.log_scalar(
'decoder_ppl', decoder_ppl, sample_size, round=5)
acc_sentence_count = sum(log.get('acc_sentence_hit', 0) for log in logging_outputs)
acc_sentence_count = acc_sentence_count / \
sum(log.get('acc_sentence_cnt', 0) for log in logging_outputs)
metrics.log_scalar('acc_sentence_percentage',
acc_sentence_count, sample_size, round=5)
@staticmethod
def logging_outputs_can_be_summed(is_train) -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
| 5,524 | Python | .py | 109 | 39.045872 | 113 | 0.591481 | dptech-corp/NAG2G | 8 | 4 | 2 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,245 | __init__.py | dptech-corp_NAG2G/NAG2G/losses/__init__.py | from pathlib import Path
import importlib
# automatically import any Python files in the criterions/ directory
for file in sorted(Path(__file__).parent.glob("*.py")):
if not file.name.startswith("_"):
importlib.import_module("NAG2G.losses." + file.name[:-3])
| 272 | Python | .py | 6 | 42.166667 | 68 | 0.724528 | dptech-corp/NAG2G | 8 | 4 | 2 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,246 | G2G.py | dptech-corp_NAG2G/NAG2G/losses/G2G.py | import math
import torch
import torch.nn.functional as F
from unicore import metrics, utils
from unicore.losses import UnicoreLoss, register_loss
import torch.nn as nn
import torch.distributed as dist
def get_loss(logits_decoder, decoder_target, padding_idx):
decoder_target = decoder_target[:, 1:]
logits_decoder = logits_decoder[:, :-1]
decode_tokens = decoder_target.ne(padding_idx)
decoder_sample_size = decode_tokens.long().sum()
decoder_loss = F.nll_loss(
F.log_softmax(
logits_decoder[decode_tokens], dim=-1, dtype=torch.float32),
decoder_target[decode_tokens].view(-1),
ignore_index=padding_idx,
reduction='mean',
)
decoder_pred = torch.argmax(
logits_decoder[decode_tokens], dim=-1)
# print('test decoder_pred: ', decoder_pred)
decoder_hit = (decoder_pred == decoder_target[decode_tokens]).long().sum()
decoder_cnt = decoder_sample_size
acc_sentence_count = []
for i in range(decoder_target.shape[0]):
decoder_cnt_per_sen = decode_tokens[i].long().sum()
decoder_pred_per_sen = torch.argmax(
logits_decoder[i][decode_tokens[i]], dim=-1)
decoder_hit_per_sen = (decoder_pred_per_sen ==
decoder_target[i][decode_tokens[i]]).long().sum()
acc_sentence_count.append(decoder_hit_per_sen == decoder_cnt_per_sen)
acc_sentence_count = (sum(acc_sentence_count), len(acc_sentence_count))
return decoder_loss, decoder_hit, decoder_cnt, acc_sentence_count
@register_loss("G2G")
class G2GLoss(UnicoreLoss):
def __init__(self, task):
super().__init__(task)
self.padding_idx = task.dictionary.pad()
def forward(self, model, sample, reduce=True):
def inner_forward(input_key='net_input', target_key='target'):
logits_encoder, logits_decoder, cl_out, vae_kl_loss = model(
**sample[input_key], features_only=True)
loss = torch.tensor(0.0)
logging_output = {
"sample_size": 1,
"bsz": sample[input_key]['decoder_src_tokens'].size(0),
"seq_len": sample[input_key]['decoder_src_tokens'].size(1) * sample[input_key]['decoder_src_tokens'].size(0),
}
if logits_decoder is not None:
decoder_target = sample[input_key]['decoder_src_tokens']
decoder_loss, decoder_hit, decoder_cnt, acc_sentence_count = get_loss(
logits_decoder, decoder_target, self.padding_idx)
loss = decoder_loss * self.args.decoder_loss
logging_output = {
"sample_size": 1,
"bsz": sample[input_key]['decoder_src_tokens'].size(0),
"seq_len": sample[input_key]['decoder_src_tokens'].size(1) * sample[input_key]['decoder_src_tokens'].size(0),
"decoder_loss": decoder_loss.data,
"decoder_hit": decoder_hit.data,
"decoder_cnt": decoder_cnt.data,
"acc_sentence_hit": acc_sentence_count[0],
"acc_sentence_cnt": acc_sentence_count[1],
}
logging_output['loss'] = loss.data
return loss, 1, logging_output, cl_out
loss, sample_size, logging_output, cls_repr = inner_forward()
return loss, sample_size, logging_output
@staticmethod
def reduce_metrics(logging_outputs, split='valid') -> None:
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
bsz = sum(log.get("bsz", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
seq_len = sum(log.get("seq_len", 0) for log in logging_outputs)
metrics.log_scalar(
"loss", loss_sum / sample_size, sample_size, round=5
)
metrics.log_scalar(
"seq_len", seq_len / bsz, 1, round=3
)
decoder_loss = sum(log.get('decoder_loss', 0)
for log in logging_outputs)
if decoder_loss > 0:
metrics.log_scalar('decoder_loss', decoder_loss /
sample_size, sample_size, round=5)
decoder_acc = sum(log.get('decoder_hit', 0) for log in logging_outputs) / \
sum(log.get('decoder_cnt', 1) for log in logging_outputs)
if decoder_acc > 0:
metrics.log_scalar(
'decoder_acc', decoder_acc, sample_size, round=5)
decoder_cnt_t = sum(log.get('decoder_cnt', 1)
for log in logging_outputs)
decoder_ppl = math.exp(min(decoder_loss / decoder_cnt_t, 100))
if decoder_ppl > 0:
metrics.log_scalar(
'decoder_ppl', decoder_ppl, sample_size, round=5)
acc_sentence_count = sum(log.get('acc_sentence_hit', 0) for log in logging_outputs)
acc_sentence_count = acc_sentence_count / \
sum(log.get('acc_sentence_cnt', 0) for log in logging_outputs)
metrics.log_scalar('acc_sentence_percentage',
acc_sentence_count, sample_size, round=5)
@staticmethod
def logging_outputs_can_be_summed(is_train) -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
| 5,615 | Python | .py | 110 | 39.472727 | 129 | 0.593989 | dptech-corp/NAG2G | 8 | 4 | 2 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,247 | preprocess_smi_to_3d.py | dptech-corp_NAG2G/data_preprocess/preprocess_smi_to_3d.py | import numpy as np
import warnings
import contextlib
import timeout_decorator
from sklearn.mixture import BayesianGaussianMixture
from rdkit import Chem
from rdkit.Chem import Descriptors
from rdkit.Chem import AllChem
from rdkit.Chem import rdMolTransforms
from rdkit import RDLogger
RDLogger.DisableLog("rdApp.*")
warnings.filterwarnings(action="ignore")
@contextlib.contextmanager
def numpy_seed(seed, *addl_seeds):
"""Context manager which seeds the NumPy PRNG with the specified seed and
restores the state afterward"""
if seed is None:
yield
return
if len(addl_seeds) > 0:
seed = int(hash((seed, *addl_seeds)) % 1e6)
state = np.random.get_state()
np.random.seed(seed)
try:
yield
finally:
np.random.set_state(state)
def get_torsions(m):
m = Chem.RemoveHs(m)
torsionList = []
torsionSmarts = "[!$(*#*)&!D1]-&!@[!$(*#*)&!D1]"
torsionQuery = Chem.MolFromSmarts(torsionSmarts)
matches = m.GetSubstructMatches(torsionQuery)
for match in matches:
idx2 = match[0]
idx3 = match[1]
bond = m.GetBondBetweenAtoms(idx2, idx3)
jAtom = m.GetAtomWithIdx(idx2)
kAtom = m.GetAtomWithIdx(idx3)
for b1 in jAtom.GetBonds():
if b1.GetIdx() == bond.GetIdx():
continue
idx1 = b1.GetOtherAtomIdx(idx2)
for b2 in kAtom.GetBonds():
if (b2.GetIdx() == bond.GetIdx()) or (b2.GetIdx() == b1.GetIdx()):
continue
idx4 = b2.GetOtherAtomIdx(idx3)
# skip 3-membered rings
if idx4 == idx1:
continue
# skip torsions that include hydrogens
if (m.GetAtomWithIdx(idx1).GetAtomicNum() == 1) or (
m.GetAtomWithIdx(idx4).GetAtomicNum() == 1
):
continue
if m.GetAtomWithIdx(idx4).IsInRing():
torsionList.append((idx4, idx3, idx2, idx1))
break
else:
torsionList.append((idx1, idx2, idx3, idx4))
break
break
return torsionList
def SetDihedral(conf, atom_idx, new_vale):
rdMolTransforms.SetDihedralRad(
conf, atom_idx[0], atom_idx[1], atom_idx[2], atom_idx[3], new_vale
)
def GetDihedral(conf, atom_idx):
return rdMolTransforms.GetDihedralRad(
conf, atom_idx[0], atom_idx[1], atom_idx[2], atom_idx[3]
)
@timeout_decorator.timeout(30)
def inner_smi2coords(smi, num_confs=100, seed=42, cluster_size=10):
coordinate_list, rotable_bonds_list = [], []
mol = Chem.MolFromSmiles(smi)
mol = AllChem.AddHs(mol, addCoords=True)
atoms = [atom.GetSymbol() for atom in mol.GetAtoms()]
wt = Descriptors.ExactMolWt(mol)
# skip for heavy molecules
if wt > 2000:
return None
# at least have two atoms
if len(atoms) < 2:
return None
# allconformers = AllChem.EmbedMultipleConfs(mol, numConfs=num_confs, randomSeed=seed, clearConfs=True, numThreads=1)
res = AllChem.EmbedMolecule(mol, randomSeed=seed)
if res == 0:
rotable_bonds = get_torsions(mol)
for i in range(num_confs):
np.random.seed(i)
values = 3.1415926 * 2 * np.random.rand(len(rotable_bonds))
for idx in range(len(rotable_bonds)):
SetDihedral(mol.GetConformer(), rotable_bonds[idx], values[idx])
Chem.rdMolTransforms.CanonicalizeConformer(mol.GetConformer())
try:
AllChem.MMFFOptimizeMolecule(mol)
coordinate_list.append(
mol.GetConformer().GetPositions().astype(np.float32)
)
rotable_bonds_value = [
GetDihedral(mol.GetConformer(), rotable_bonds[idx])
for idx in range(len(rotable_bonds))
]
rotable_bonds_list.append(rotable_bonds_value)
except:
continue
elif res == -1:
mol_tmp = Chem.MolFromSmiles(smi)
AllChem.EmbedMolecule(mol_tmp, maxAttempts=5000, randomSeed=seed)
mol_tmp = AllChem.AddHs(mol_tmp, addCoords=True)
rotable_bonds = get_torsions(mol_tmp)
for i in range(num_confs):
np.random.seed(i)
values = 3.1415926 * 2 * np.random.rand(len(rotable_bonds))
for idx in range(len(rotable_bonds)):
SetDihedral(mol_tmp.GetConformer(), rotable_bonds[idx], values[idx])
Chem.rdMolTransforms.CanonicalizeConformer(mol_tmp.GetConformer())
try:
AllChem.MMFFOptimizeMolecule(mol_tmp)
coordinate_list.append(
mol_tmp.GetConformer().GetPositions().astype(np.float32)
)
rotable_bonds_value = [
GetDihedral(mol_tmp.GetConformer(), rotable_bonds[idx])
for idx in range(len(rotable_bonds))
]
rotable_bonds_list.append(rotable_bonds_value)
except:
continue
if num_confs != cluster_size:
X = np.array(rotable_bonds_list)
clf = BayesianGaussianMixture(n_components=cluster_size, random_state=seed).fit(
X
)
probs = clf.predict_proba(X)
# filter redundant clusters
probs = probs[:, probs.mean(axis=0) != 0.0]
ids = probs.argmax(axis=0)
# padding to cluster_size
if len(ids) < cluster_size:
ids = ids + [ids[0]] * (cluster_size - len(ids))
cluster_coordinate_list = [coordinate_list[idx] for idx in ids]
print(ids)
else:
cluster_coordinate_list = coordinate_list
return {"atoms": atoms, "coordinates": cluster_coordinate_list, "smi": smi}
def smi2coords_3D(smi):
try:
return inner_smi2coords(smi, num_confs=10)
except:
return None
def smi2_2Dcoords(smi):
mol = Chem.MolFromSmiles(smi)
mol = AllChem.AddHs(mol)
AllChem.Compute2DCoords(mol)
coordinates = mol.GetConformer().GetPositions().astype(np.float32)
len(mol.GetAtoms()) == len(
coordinates
), "2D coordinates shape is not align with {}".format(smi)
return coordinates
def smi2coords_2D(smi):
try:
mol = Chem.MolFromSmiles(smi)
coordinate_list = [smi2_2Dcoords(smi).astype(np.float32)]
mol = AllChem.AddHs(mol)
atoms = [atom.GetSymbol() for atom in mol.GetAtoms()] # after add H
return {"atoms": atoms, "coordinates": coordinate_list, "smi": smi}
except:
return None
| 6,673 | Python | .py | 169 | 29.828402 | 121 | 0.604536 | dptech-corp/NAG2G | 8 | 4 | 2 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,248 | basic.py | dptech-corp_NAG2G/data_preprocess/basic.py | import pandas as pd
from rdkit import Chem
from rdkit.Chem import AllChem
def get_canonical_smile(testsmi, isomericSmiles=True):
if testsmi == "":
return testsmi
try:
mol = Chem.MolFromSmiles(testsmi)
canonical_smi = Chem.MolToSmiles(mol, isomericSmiles=isomericSmiles)
except:
canonical_smi = testsmi
return canonical_smi
def get_target_order(smiles_target, check=False, add_h=True):
mol = Chem.MolFromSmiles(smiles_target)
if add_h:
mol = AllChem.AddHs(mol)
atoms = [atom.GetAtomMapNum() for atom in mol.GetAtoms()]
assert (not check) or (0 not in atoms)
return atoms
def get_atoms(smi, add_h=True):
mol = Chem.MolFromSmiles(smi)
if add_h:
mol = AllChem.AddHs(mol)
atoms = [atom.GetSymbol() for atom in mol.GetAtoms()] # after add H
if not add_h:
atoms = [i for i in atoms if i != "H"]
return atoms
def csv_file_read(path, usecols=None):
head_row = pd.read_csv(path, nrows=0)
print(list(head_row))
head_row_list = list(head_row)
if usecols is None:
usecols = head_row_list
csv_result = pd.read_csv(path, usecols=usecols)
row_list = csv_result.values.tolist()
return row_list
def rm_h_coordinates_map(target_atoms, target_coordinates, target_map):
assert (
len(target_atoms) == len(target_map)
and len(target_atoms) == target_coordinates.shape[1]
)
target_atoms_tmp = [i for i in target_atoms if i != "H"]
idx = [i != "H" for i in target_atoms]
target_coordinates_tmp = target_coordinates[:, idx]
target_map_tmp = [
target_map[i] for i in range(len(target_atoms)) if target_atoms[i] != "H"
]
assert len(target_atoms_tmp) == len(target_map_tmp) and len(target_atoms_tmp) == (
target_coordinates_tmp.shape[1]
)
return target_atoms_tmp, target_coordinates_tmp, target_map_tmp
def renumber_atom_maps(smi):
if smi == "":
return smi
mol = Chem.MolFromSmiles(smi)
if mol is None:
raise ValueError("Invalid SMILES string")
atom_map_nums = [atom.GetAtomMapNum() for atom in mol.GetAtoms()]
if any(
num == 0
for atom, num in zip(mol.GetAtoms(), atom_map_nums)
if atom.GetAtomicNum() > 1
):
current_map_num = 1
for atom in mol.GetAtoms():
if atom.GetAtomicNum() > 1: # Heavy atom
atom.SetAtomMapNum(current_map_num)
current_map_num += 1
else: # H atom
atom.SetAtomMapNum(0)
return Chem.MolToSmiles(mol)
| 2,585 | Python | .py | 71 | 29.929577 | 86 | 0.640144 | dptech-corp/NAG2G | 8 | 4 | 2 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,249 | lmdb_preprocess.py | dptech-corp_NAG2G/data_preprocess/lmdb_preprocess.py | # Copyright (c) DP Technology.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import lmdb
import os
import sys
import pickle
import logging
from tqdm import tqdm
import numpy as np
from preprocess_smi_to_3d import smi2coords_3D, smi2coords_2D
from basic import (
csv_file_read,
get_target_order,
rm_h_coordinates_map,
get_atoms,
get_canonical_smile,
renumber_atom_maps,
)
logger = logging.getLogger(__name__)
def make_lmdb(path_smi, outputfilename):
assert ".csv" in path_smi
try:
os.remove(outputfilename)
except:
pass
dataset_smi = csv_file_read(path_smi)
env_new = lmdb.open(
outputfilename,
subdir=False,
readonly=False,
lock=False,
readahead=False,
meminit=False,
max_readers=1,
map_size=int(100e9),
)
txn_write = env_new.begin(write=True)
ii = 0
for i in tqdm(range(len(dataset_smi))):
result = dataset_smi[i]
raw_string = result[2]
target = raw_string.split(">")[-1]
target = get_canonical_smile(target)
reactant = raw_string.split(">")[0]
reactant = get_canonical_smile(reactant)
reactant = renumber_atom_maps(reactant)
target = renumber_atom_maps(target)
raw_string = f"{reactant}>>{target}"
result = {}
result["rxn_smiles"] = raw_string
result["target_map"] = get_target_order(target, check=False, add_h=True)
result["target_atoms"] = get_atoms(target, add_h=True)
tmp_result_3d = smi2coords_3D(target)
not_exist_3d = tmp_result_3d is None or len(tmp_result_3d["coordinates"]) == 0
if not not_exist_3d:
assert result["target_atoms"] == tmp_result_3d["atoms"]
result["target_coordinates"] = tmp_result_3d["coordinates"].copy()
tmp_result_2d = smi2coords_2D(target)
not_exist_2d = tmp_result_2d is None or len(tmp_result_2d["coordinates"]) == 0
if not_exist_2d and not_exist_3d:
print("No 2D or 3D coordinates")
continue
elif not not_exist_2d:
if not not_exist_3d:
assert tmp_result_2d["atoms"] == result["target_atoms"]
result["target_coordinates"] = tmp_result_2d["coordinates"].copy()
else:
assert tmp_result_2d["atoms"] == result["target_atoms"]
assert (
result["target_coordinates"][0].shape
== tmp_result_2d["coordinates"][0].shape
)
result["target_coordinates"] += tmp_result_2d["coordinates"].copy()
result["target_coordinates"] = np.array(result["target_coordinates"])
if result["target_coordinates"].shape[0] > 0:
target_atoms_tmp, target_coordinates_tmp, target_map_tmp = (
rm_h_coordinates_map(
result["target_atoms"],
result["target_coordinates"],
result["target_map"],
)
)
result["target_atoms"] = target_atoms_tmp
result["target_coordinates"] = target_coordinates_tmp
result["target_map"] = target_map_tmp
if len(result["target_coordinates"]) > 0:
inner_output = pickle.dumps(result, protocol=-1)
txn_write.put(f"{ii}".encode("ascii"), inner_output)
ii += 1
txn_write.commit()
env_new.close()
print("count", ii)
if __name__ == "__main__":
path_smi = sys.argv[1]
outputfilename = sys.argv[2]
make_lmdb(
path_smi=path_smi,
outputfilename=outputfilename,
)
| 3,728 | Python | .py | 100 | 28.54 | 86 | 0.595633 | dptech-corp/NAG2G | 8 | 4 | 2 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,250 | inference.py | dptech-corp_NAG2G/unimol_plus/inference.py | #!/usr/bin/env python3 -u
# Copyright (c) DP Techonology, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import sys
import pickle
import torch
import lmdb
import gzip
import numpy as np
from unicore import checkpoint_utils, distributed_utils, options, utils
from unicore.logging import progress_bar
from unicore import tasks
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger("unimol.inference")
def predicted_lddt(plddt_logits: torch.Tensor) -> torch.Tensor:
"""Computes per-residue pLDDT from logits.
Args:
logits: [num_res, num_bins] output from the PredictedLDDTHead.
Returns:
plddt: [num_res] per-residue pLDDT.
"""
num_bins = plddt_logits.shape[-1]
bin_probs = torch.nn.functional.softmax(plddt_logits.float(), dim=-1)
bin_width = 1.0 / num_bins
bounds = torch.arange(
start=0.5 * bin_width, end=1.0, step=bin_width, device=plddt_logits.device
)
plddt = torch.sum(
bin_probs * bounds.view(*((1,) * len(bin_probs.shape[:-1])), *bounds.shape),
dim=-1,
)
return plddt
def masked_mean(mask, value, dim, eps=1e-10, keepdim=False):
mask = mask.expand(*value.shape)
return torch.sum(mask * value, dim=dim, keepdim=keepdim) / (
eps + torch.sum(mask, dim=dim, keepdim=keepdim)
)
def main(args):
assert (
args.batch_size is not None
), "Must specify batch size either with --batch-size"
use_fp16 = args.fp16
use_cuda = torch.cuda.is_available() and not args.cpu
if use_cuda:
torch.cuda.set_device(args.device_id)
if args.distributed_world_size > 1:
data_parallel_world_size = distributed_utils.get_data_parallel_world_size()
data_parallel_rank = distributed_utils.get_data_parallel_rank()
else:
data_parallel_world_size = 1
data_parallel_rank = 0
# Load model
logger.info("loading model(s) from {}".format(args.path))
state = checkpoint_utils.load_checkpoint_to_cpu(args.path)
task = tasks.setup_task(args)
model = task.build_model(args)
model.load_state_dict(state["ema"]["params"], strict=True)
# Move models to GPU
# if use_fp16:
# model.half()
if use_cuda:
model.cuda()
model.eval()
# Print args
logger.info(args)
# Build loss
loss = task.build_loss(args)
loss.eval()
if data_parallel_world_size > 1:
tmp = distributed_utils.all_gather_list(
[torch.tensor(0)],
max_size=10000,
group=distributed_utils.get_data_parallel_group(),
)
for subset in args.valid_subset.split(","):
try:
task.load_dataset(subset, combine=False, epoch=1, force_valid=True)
dataset = task.dataset(subset)
except KeyError:
raise Exception("Cannot find dataset: " + subset)
# Initialize data iterator
itr = task.get_batch_iterator(
dataset=dataset,
batch_size=args.batch_size,
ignore_invalid_inputs=True,
required_batch_size_multiple=args.required_batch_size_multiple,
seed=args.seed,
num_shards=data_parallel_world_size,
shard_id=data_parallel_rank,
num_workers=args.num_workers,
data_buffer_size=args.data_buffer_size,
).next_epoch_itr(shuffle=False)
progress = progress_bar.progress_bar(
itr,
log_format=args.log_format,
log_interval=args.log_interval,
prefix=f"valid on '{subset}' subset",
default_log_format=("tqdm" if not args.no_progress_bar else "simple"),
)
outputs = []
for i, sample in enumerate(progress):
sample = utils.move_to_cuda(sample) if use_cuda else sample
if len(sample) == 0:
continue
with torch.no_grad():
(_, pos_pred, _, plddt_logits) = model(**sample)[:4]
id = sample["batched_data"]["id"]
assert len(id) == len(pos_pred)
id = id.cpu().numpy()
pos_pred = pos_pred.cpu().numpy()
atom_mask = sample["batched_data"]["atom_mask"].float()
plddt = (
masked_mean(atom_mask, predicted_lddt(plddt_logits), dim=-1)
.cpu()
.numpy()
)
outputs.append((id, pos_pred, plddt))
progress.log({}, step=i)
pickle.dump(
outputs,
open(
os.path.join(
args.results_path, subset + "_{}.pkl".format(data_parallel_rank)
),
"wb",
),
)
print("Finished {} subset, rank {}".format(subset, data_parallel_rank))
if data_parallel_world_size > 1:
tmp = distributed_utils.all_gather_list(
[torch.tensor(0)],
max_size=10000,
group=distributed_utils.get_data_parallel_group(),
)
return None
def cli_main():
parser = options.get_validation_parser()
options.add_model_args(parser)
args = options.parse_args_and_arch(parser)
distributed_utils.call_main(args, main)
if __name__ == "__main__":
cli_main()
| 5,540 | Python | .py | 151 | 28.490066 | 84 | 0.60492 | dptech-corp/NAG2G | 8 | 4 | 2 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,251 | setup.py | dptech-corp_NAG2G/unimol_plus/setup.py | """Install script for setuptools."""
from setuptools import find_packages
from setuptools import setup
setup(
name="unimol",
version="2.0.0",
description="",
author="DP Technology",
author_email="[email protected]",
license="The MIT License",
url="https://github.com/dptech-corp/Uni-Mol",
packages=find_packages(
exclude=["scripts", "tests", "example_data", "docker", "figure"]
),
install_requires=[
"numpy",
"pandas",
],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
)
| 995 | Python | .py | 30 | 27.166667 | 72 | 0.620976 | dptech-corp/NAG2G | 8 | 4 | 2 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,252 | __init__.py | dptech-corp_NAG2G/unimol_plus/unimol/__init__.py | __version__ = "2.0.0"
import importlib
import unimol.tasks
import unimol.data
import unimol.models
import unimol.losses | 128 | Python | .py | 6 | 19 | 21 | 0.824561 | dptech-corp/NAG2G | 8 | 4 | 2 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,253 | data_utils.py | dptech-corp_NAG2G/unimol_plus/unimol/data/data_utils.py | # Copyright (c) DP Technology.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import contextlib
def str_hash(text: str):
hash = 0
for ch in text:
hash = (hash * 281 ^ ord(ch) * 997) & 0xFFFFFFFF
return hash
@contextlib.contextmanager
def numpy_seed(seed, *addl_seeds, key=None):
"""Context manager which seeds the NumPy PRNG with the specified seed and
restores the state afterward"""
if seed is None:
yield
return
def check_seed(s):
assert type(s) == int or type(s) == np.int32 or type(s) == np.int64
check_seed(seed)
if len(addl_seeds) > 0:
for s in addl_seeds:
check_seed(s)
seed = int(hash((seed, *addl_seeds)) % 1e8)
if key is not None:
seed = int(hash((seed, str_hash(key))) % 1e8)
state = np.random.get_state()
np.random.seed(seed)
try:
yield
finally:
np.random.set_state(state)
| 1,037 | Python | .py | 32 | 26.5625 | 77 | 0.636273 | dptech-corp/NAG2G | 8 | 4 | 2 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,254 | conformer_sample_dataset.py | dptech-corp_NAG2G/unimol_plus/unimol/data/conformer_sample_dataset.py | # Copyright (c) DP Technology.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
from functools import lru_cache
from unicore.data import BaseWrapperDataset, data_utils
from copy import deepcopy
from tqdm import tqdm
class ConformerPCQSampleDataset(BaseWrapperDataset):
def __init__(
self,
dataset,
seed,
coordinates,
target_coordinates,
):
self.dataset = dataset
self.seed = seed
self.coordinates = coordinates
self.target_coordinates = target_coordinates
self.set_epoch(None)
def set_epoch(self, epoch, **unused):
super().set_epoch(epoch)
self.epoch = epoch
@lru_cache(maxsize=16)
def __cached_item__(self, index: int, epoch: int):
data = deepcopy(self.dataset[index])
size = len(data[self.coordinates])
with data_utils.numpy_seed(self.seed, epoch, index):
sample_idx = np.random.randint(size)
coordinates = data[self.coordinates][sample_idx]
if self.target_coordinates is not None:
if isinstance(data[self.target_coordinates], list):
target_coordinates = data[self.target_coordinates][-1]
else:
target_coordinates = data[self.target_coordinates]
del data[self.target_coordinates]
data["target_coordinates"] = target_coordinates
del data[self.coordinates]
data["coordinates"] = coordinates
return data
def __getitem__(self, index: int):
return self.__cached_item__(index, self.epoch)
class ConformerPCQTTASampleDataset(BaseWrapperDataset):
def __init__(
self,
dataset,
seed,
coordinates,
target_coordinates,
):
self.dataset = dataset
self.seed = seed
self.coordinates = coordinates
self.target_coordinates = target_coordinates
self._init_idx()
self.set_epoch(None)
def set_epoch(self, epoch, **unused):
super().set_epoch(epoch)
self.epoch = epoch
def _init_idx(self):
self.idx2key = []
for i in tqdm(range(len(self.dataset))):
size = len(self.dataset[i][self.coordinates])
self.idx2key.extend([(i, j) for j in range(size)])
self.cnt = len(self.idx2key)
def __len__(self):
return self.cnt
@lru_cache(maxsize=16)
def __cached_item__(self, index: int, epoch: int):
key_idx, conf_idx = self.idx2key[index]
data = self.dataset[key_idx]
coordinates = data[self.coordinates][conf_idx]
if self.target_coordinates is not None:
if isinstance(data[self.target_coordinates], list):
target_coordinates = data[self.target_coordinates][-1]
else:
target_coordinates = data[self.target_coordinates]
ret_data = deepcopy(data)
del ret_data[self.coordinates]
ret_data["coordinates"] = coordinates
if self.target_coordinates is not None:
del ret_data[self.target_coordinates]
ret_data["target_coordinates"] = target_coordinates
return ret_data
def __getitem__(self, index: int):
return self.__cached_item__(index, self.epoch)
| 3,352 | Python | .py | 87 | 30.034483 | 70 | 0.633733 | dptech-corp/NAG2G | 8 | 4 | 2 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,255 | molecule_dataset.py | dptech-corp_NAG2G/unimol_plus/unimol/data/molecule_dataset.py | # Copyright (c) DP Technology.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
from unicore.data import BaseWrapperDataset
from . import data_utils
from numba import njit
from functools import lru_cache
from scipy.spatial.transform import Rotation
def get_graph_features(item, N_vnode=1):
atom_feat_sizes = [128] + [16 for _ in range(8)]
edge_feat_sizes = [16, 16, 16]
edge_attr, edge_index, x = (
item["edge_attr"],
item["edge_index"],
item["node_attr"],
)
N = x.shape[0]
atom_feat = convert_to_single_emb(x, atom_feat_sizes)
# node adj matrix [N, N] bool
adj = np.zeros([N, N], dtype=np.int32)
adj[edge_index[0, :], edge_index[1, :]] = 1
degree = adj.sum(axis=-1)
# edge feature here
if len(edge_attr.shape) == 1:
edge_attr = edge_attr[:, None]
edge_feat = np.zeros([N, N, edge_attr.shape[-1]], dtype=np.int32)
edge_feat[edge_index[0, :], edge_index[1, :]] = (
convert_to_single_emb(edge_attr, edge_feat_sizes) + 1
)
shortest_path_result = floyd_warshall(adj)
spatial_pos = torch.from_numpy((shortest_path_result)).long() # plus 1 for padding
# combine
feat = {}
feat["atom_feat"] = torch.from_numpy(atom_feat).long()
feat["atom_mask"] = torch.ones(N).long()
feat["edge_feat"] = torch.from_numpy(edge_feat).long() + 1
feat["shortest_path"] = spatial_pos + 1
feat["degree"] = torch.from_numpy(degree).long().view(-1) + 1
# pair-type
atoms = feat["atom_feat"][..., 0]
pair_type = torch.cat(
[
atoms.view(-1, 1, 1).expand(-1, N, -1),
atoms.view(1, -1, 1).expand(N, -1, -1),
],
dim=-1,
)
feat["pair_type"] = convert_to_single_emb(pair_type, [128, 128])
feat["attn_bias"] = torch.zeros((N + N_vnode, N + N_vnode), dtype=torch.float32)
return feat
def kabsch_rotation(P, Q):
C = P.transpose(-1, -2) @ Q
V, _, W = np.linalg.svd(C)
d = (np.linalg.det(V) * np.linalg.det(W)) < 0.0
if d:
V[:, -1] = -V[:, -1]
U = V @ W
return U
def get_optimal_transform(src_atoms, tgt_atoms):
src_center = src_atoms.mean(-2)[None, :]
tgt_center = tgt_atoms.mean(-2)[None, :]
r = kabsch_rotation(src_atoms - src_center, tgt_atoms - tgt_center)
x = tgt_center - src_center @ r
return r, x
class Unimolv2Features(BaseWrapperDataset):
def __init__(
self,
dataset,
src_pos_dataset,
tgt_pos_dataset,
is_train,
label_prob,
mid_prob,
mid_lower,
mid_upper,
noise,
seed,
N_vnode=1
):
super().__init__(dataset)
self.dataset = dataset
self.src_pos_dataset = src_pos_dataset
self.tgt_pos_dataset = tgt_pos_dataset
self.seed = seed
self.is_train = is_train
self.label_prob = label_prob
self.mid_prob = mid_prob
self.mid_lower = mid_lower
self.mid_upper = mid_upper
self.noise = noise
self.N_vnode = N_vnode
def set_epoch(self, epoch, **unused):
super().set_epoch(epoch)
self.src_pos_dataset.set_epoch(epoch)
if self.tgt_pos_dataset is not None:
self.tgt_pos_dataset.set_epoch(epoch)
self.epoch = epoch
def __getitem__(self, idx: int):
return self.__getitem_cached__(self.epoch, idx)
@lru_cache(maxsize=16)
def __getitem_cached__(self, epoch: int, idx: int):
with data_utils.numpy_seed(self.seed, epoch, idx):
data = self.dataset[idx]
feat = get_graph_features(data, N_vnode=self.N_vnode)
pos = self.src_pos_dataset[idx]
pos_target = (
self.tgt_pos_dataset[idx] if self.tgt_pos_dataset is not None else pos
)
if self.is_train:
random_rotate = Rotation.random().as_matrix()
pos_target = pos_target @ random_rotate
pos_target = torch.from_numpy(pos_target).float()
use_label = False
use_mid = False
if self.is_train:
p = np.random.rand()
if p < self.label_prob:
use_label = True
elif p < self.label_prob + self.mid_prob:
use_mid = True
if use_label:
feat["pos"] = (
pos_target
+ self.noise
* torch.from_numpy(np.random.randn(*pos_target.shape)).float()
)
elif use_mid:
q = np.random.uniform(self.mid_lower, self.mid_upper)
pos = torch.from_numpy(pos).float()
R, T = get_optimal_transform(pos, pos_target)
pos = pos @ R + T
feat["pos"] = (
q * pos
+ (1 - q) * pos_target
+ self.noise
* torch.from_numpy(np.random.randn(*pos_target.shape)).float()
)
else:
feat["pos"] = torch.from_numpy(pos).float()
def zero_center(pos):
return pos - pos.mean(0, keepdim=True)
feat["pos"] = zero_center(feat["pos"])
feat["pos_target"] = zero_center(pos_target)
R, T = get_optimal_transform(feat["pos"], feat["pos_target"])
feat["pos"] = feat["pos"] @ R + T
feat["target"] = data["target"] if "target" in data.keys() and data["target"] is not None else 0.0
if "id" in data.keys():
feat["id"] = data["id"]
return feat
def collater(self, items):
multi_hop_max_dist = 5
target = np.stack([x["target"] for x in items])
if "id" in items[0].keys():
id = np.stack([int(x["id"]) for x in items]).astype(np.int64)
pad_fns = {
"atom_feat": pad_1d_feat,
"atom_mask": pad_1d,
"edge_feat": pad_2d_feat,
"shortest_path": pad_2d,
"degree": pad_1d,
"pos": pad_1d_feat,
"pos_target": pad_1d_feat,
"pair_type": pad_2d_feat,
"attn_bias": pad_attn_bias,
}
max_node_num = max([item["atom_mask"].shape[0] for item in items])
max_node_num = (max_node_num + 1 + 3) // 4 * 4 - 1
batched_data = {}
for key in items[0].keys():
samples = [item[key] for item in items]
if key in pad_fns:
if key == "attn_bias":
batched_data[key] = pad_fns[key](samples, max_node_num, self.N_vnode)
else:
batched_data[key] = pad_fns[key](samples, max_node_num)
batched_data["target"] = torch.from_numpy(target).float()
if "id" in batched_data.keys():
batched_data["id"] = torch.from_numpy(id).long()
return batched_data
@njit
def floyd_warshall(M):
(nrows, ncols) = M.shape
assert nrows == ncols
n = nrows
# set unreachable nodes distance to 510
for i in range(n):
for j in range(n):
if M[i, j] == 0:
M[i, j] = 510
for i in range(n):
M[i, i] = 0
# floyed algo
for k in range(n):
for i in range(n):
for j in range(n):
cost_ikkj = M[i, k] + M[k, j]
if M[i, j] > cost_ikkj:
M[i, j] = cost_ikkj
for i in range(n):
for j in range(n):
if M[i, j] >= 510:
M[i, j] = 510
return M
def convert_to_single_emb(x, sizes):
assert x.shape[-1] == len(sizes)
offset = 1
for i in range(len(sizes)):
assert (x[..., i] < sizes[i]).all()
x[..., i] = x[..., i] + offset
offset += sizes[i]
return x
def pad_1d(samples, pad_len, pad_value=0):
batch_size = len(samples)
tensor = torch.full([batch_size, pad_len], pad_value, dtype=samples[0].dtype)
for i in range(batch_size):
tensor[i, : samples[i].shape[0]] = samples[i]
return tensor
def pad_1d_feat(samples, pad_len, pad_value=0):
batch_size = len(samples)
assert len(samples[0].shape) == 2
feat_size = samples[0].shape[-1]
tensor = torch.full(
[batch_size, pad_len, feat_size], pad_value, dtype=samples[0].dtype
)
for i in range(batch_size):
tensor[i, : samples[i].shape[0]] = samples[i]
return tensor
def pad_2d(samples, pad_len, pad_value=0):
batch_size = len(samples)
tensor = torch.full(
[batch_size, pad_len, pad_len], pad_value, dtype=samples[0].dtype
)
for i in range(batch_size):
tensor[i, : samples[i].shape[0], : samples[i].shape[1]] = samples[i]
return tensor
def pad_2d_feat(samples, pad_len, pad_value=0):
batch_size = len(samples)
assert len(samples[0].shape) == 3
feat_size = samples[0].shape[-1]
tensor = torch.full(
[batch_size, pad_len, pad_len, feat_size], pad_value, dtype=samples[0].dtype
)
for i in range(batch_size):
tensor[i, : samples[i].shape[0], : samples[i].shape[1]] = samples[i]
return tensor
def pad_attn_bias(samples, pad_len, N_vnode=1):
batch_size = len(samples)
pad_len = pad_len + N_vnode
tensor = torch.full(
[batch_size, pad_len, pad_len], float("-inf"), dtype=samples[0].dtype
)
for i in range(batch_size):
tensor[i, : samples[i].shape[0], : samples[i].shape[1]] = samples[i]
tensor[i, samples[i].shape[0] :, : samples[i].shape[1]] = 0
return tensor
| 9,658 | Python | .py | 257 | 28.645914 | 110 | 0.548387 | dptech-corp/NAG2G | 8 | 4 | 2 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,256 | coord_noise_dataset.py | dptech-corp_NAG2G/unimol_plus/unimol/data/coord_noise_dataset.py | # Copyright (c) DP Technology.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from functools import lru_cache
import numpy as np
import torch
from unicore.data import BaseWrapperDataset
from . import data_utils
def kabsch_rotation(P, Q):
C = P.transpose(-1, -2) @ Q
V, _, W = np.linalg.svd(C)
d = (np.linalg.det(V) * np.linalg.det(W)) < 0.0
if d:
V[:, -1] = -V[:, -1]
U = V @ W
return U
def get_optimal_transform(src_atoms, tgt_atoms):
src_center = src_atoms.mean(-2)[None, :]
tgt_center = tgt_atoms.mean(-2)[None, :]
r = kabsch_rotation(src_atoms - src_center, tgt_atoms - tgt_center)
x = tgt_center - src_center @ r
return r, x
class CoordNoiseDataset(BaseWrapperDataset):
def __init__(
self,
dataset: torch.utils.data.Dataset,
tgt_dataset: torch.utils.data.Dataset,
coord_gen_prob: float,
coord_noise_prob: float,
src_noise: float = 1.0,
tgt_noise: float = 1.0,
seed: int = 1,
):
assert 0.0 <= coord_noise_prob <= 1.0
self.dataset = dataset
self.tgt_dataset = tgt_dataset
self.coord_gen_prob = coord_gen_prob
self.coord_noise_prob = coord_noise_prob
self.seed = seed
self.src_noise = src_noise
self.tgt_noise = tgt_noise
self.epoch = None
def set_epoch(self, epoch, **unused):
super().set_epoch(epoch)
self.dataset.set_epoch(epoch)
self.epoch = epoch
def __getitem__(self, index: int):
return self.__getitem_cached__(self.epoch, index)
@lru_cache(maxsize=16)
def __getitem_cached__(self, epoch: int, index: int):
src_coord = self.dataset[index]
tgt_coord = self.tgt_dataset[index]
num_atoms = src_coord.shape[0]
with data_utils.numpy_seed(self.seed, epoch, index):
if np.random.rand() < self.coord_gen_prob:
src_coord = np.copy(src_coord)
noise = self.src_noise
else:
src_coord = np.copy(tgt_coord)
noise = self.tgt_noise
if np.random.rand() < self.coord_noise_prob:
src_coord = src_coord + np.random.randn(num_atoms, 3) * noise
R, T = get_optimal_transform(src_coord, tgt_coord)
src_coord = src_coord @ R + T
return {"coordinates": src_coord.astype(np.float32)}
| 2,471 | Python | .py | 65 | 30.430769 | 77 | 0.606516 | dptech-corp/NAG2G | 8 | 4 | 2 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,257 | __init__.py | dptech-corp_NAG2G/unimol_plus/unimol/data/__init__.py | from .key_dataset import KeyDataset
from .conformer_sample_dataset import (
ConformerPCQSampleDataset,
ConformerPCQTTASampleDataset,
)
from .coord_noise_dataset import CoordNoiseDataset
from .lmdb_dataset import (
LMDBPCQDataset,
)
from .molecule_dataset import (
Unimolv2Features,
)
from .data_utils import numpy_seed
__all__ = []
| 349 | Python | .py | 14 | 22.714286 | 50 | 0.796407 | dptech-corp/NAG2G | 8 | 4 | 2 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,258 | key_dataset.py | dptech-corp_NAG2G/unimol_plus/unimol/data/key_dataset.py | # Copyright (c) DP Technology.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from functools import lru_cache
from unicore.data import BaseWrapperDataset
class KeyDataset(BaseWrapperDataset):
def __init__(self, dataset, key):
self.dataset = dataset
self.key = key
self.epoch = None
def set_epoch(self, epoch, **unused):
super().set_epoch(epoch)
self.epoch = epoch
def __len__(self):
return len(self.dataset)
@lru_cache(maxsize=16)
def __cached_item__(self, idx: int, epoch: int):
return self.dataset[idx][self.key]
def __getitem__(self, idx):
return self.__cached_item__(idx, self.epoch)
| 760 | Python | .py | 20 | 32.25 | 65 | 0.671214 | dptech-corp/NAG2G | 8 | 4 | 2 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,259 | lmdb_dataset.py | dptech-corp_NAG2G/unimol_plus/unimol/data/lmdb_dataset.py | # Copyright (c) DP Technology.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from xmlrpc.client import gzip_encode
import lmdb
import os
import numpy as np
import gzip
import collections
import pickle
from functools import lru_cache
import logging
from . import data_utils
logger = logging.getLogger(__name__)
class LMDBPCQDataset:
def __init__(self, db_path):
self.db_path = db_path
assert os.path.isfile(self.db_path), "{} not found".format(self.db_path)
env = self.connect_db(self.db_path)
with env.begin() as txn:
self._keys = list(txn.cursor().iternext(values=False))
def connect_db(self, lmdb_path, save_to_self=False):
env = lmdb.open(
lmdb_path,
subdir=False,
readonly=True,
lock=False,
readahead=False,
meminit=False,
max_readers=256,
)
if not save_to_self:
return env
else:
self.env = env
def __len__(self):
return len(self._keys)
@lru_cache(maxsize=16)
def __getitem__(self, idx):
if not hasattr(self, "env"):
self.connect_db(self.db_path, save_to_self=True)
key = self._keys[idx]
datapoint_pickled = self.env.begin().get(key)
data = pickle.loads(gzip.decompress(datapoint_pickled))
data["id"] = int.from_bytes(key, "big")
# data["id"] = int(np.frombuffer(key, dtype=np.int64))
return data
| 1,567 | Python | .py | 47 | 26.212766 | 80 | 0.623016 | dptech-corp/NAG2G | 8 | 4 | 2 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,260 | unimolv2.py | dptech-corp_NAG2G/unimol_plus/unimol/models/unimolv2.py | import logging
import numpy as np
import torch
import torch.nn as nn
from unicore import utils
from unimol.data import numpy_seed
from unicore.models import (
BaseUnicoreModel,
register_model,
register_model_architecture,
)
from unicore.modules import (
LayerNorm,
)
from .layers import (
AtomFeature,
EdgeFeature,
SE3InvariantKernel,
MovementPredictionHead,
EnergyHead,
PredictedLDDTHead,
Linear,
Embedding,
)
from .unimolv2_encoder import UniMolv2Encoder
logger = logging.getLogger(__name__)
def init_params(module):
def normal_(data):
data.copy_(data.cpu().normal_(mean=0.0, std=0.02).to(data.device))
if isinstance(module, nn.Linear) or isinstance(module, Linear):
normal_(module.weight.data)
if module.bias is not None:
module.bias.data.zero_()
if isinstance(module, nn.Embedding) or isinstance(module, Embedding):
normal_(module.weight.data)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
@register_model("unimolv2")
class Unimolv2Model(BaseUnicoreModel):
"""
Class for training a Masked Language Model. It also supports an
additional sentence level prediction if the sent-loss argument is set.
"""
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# Arguments related to dropout
parser.add_argument(
"--num-3d-bias-kernel",
type=int,
default=128,
metavar="D",
help="number of kernel in 3D attention bias",
)
parser.add_argument(
"--droppath-prob",
type=float,
metavar="D",
help="stochastic path probability",
default=0.0,
)
parser.add_argument(
"--dropout", type=float, metavar="D", help="dropout probability"
)
parser.add_argument(
"--attention-dropout",
type=float,
metavar="D",
help="dropout probability for" " attention weights",
)
parser.add_argument(
"--act-dropout",
type=float,
metavar="D",
help="dropout probability after" " activation in FFN",
)
# Arguments related to hidden states and self-attention
parser.add_argument(
"--encoder-ffn-embed-dim",
type=int,
metavar="N",
help="encoder embedding dimension for FFN",
)
parser.add_argument(
"--pair-embed-dim",
type=int,
metavar="N",
help="encoder embedding dimension for FFN",
)
parser.add_argument(
"--encoder-layers", type=int, metavar="N", help="num encoder layers"
)
parser.add_argument(
"--encoder-attention-heads",
type=int,
metavar="N",
help="num encoder attention heads",
)
# Arguments related to input and output embeddings
parser.add_argument(
"--encoder-embed-dim",
type=int,
metavar="N",
help="encoder embedding dimension",
)
# misc params
parser.add_argument(
"--activation-fn",
choices=utils.get_available_activation_fns(),
help="activation function to use",
)
parser.add_argument(
"--encoder-normalize-before",
action="store_true",
help="apply layernorm before each encoder block",
)
parser.add_argument(
"--num-block",
type=int,
metavar="N",
help="number of recycle",
)
parser.add_argument(
"--noise-scale",
default=0.2,
type=float,
help="coordinate noise for masked atoms",
)
parser.add_argument(
"--label-prob",
default=0.4,
type=float,
help="the probability of using label conformer as input",
)
parser.add_argument(
"--mid-prob",
default=0.2,
type=float,
help="the probability of using label conformer as input",
)
parser.add_argument(
"--mid-upper",
default=0.6,
type=float,
help="the probability of using label conformer as input",
)
parser.add_argument(
"--mid-lower",
default=0.4,
type=float,
help="the probability of using label conformer as input",
)
parser.add_argument(
"--plddt-loss-weight",
default=0.01,
type=float,
help="loss weight for plddt",
)
parser.add_argument(
"--pos-loss-weight",
default=0.2,
type=float,
help="loss weight for pos",
)
parser.add_argument(
"--pos-step-size",
type=float,
help="step size for pos update",
)
parser.add_argument(
"--gaussian-std-width",
type=float,
)
parser.add_argument(
"--gaussian-mean-start",
type=float,
)
parser.add_argument(
"--gaussian-mean-stop",
type=float,
)
parser.add_argument(
"--pretrain", action="store_true", help="3d pretrain or not"
)
parser.add_argument(
"--N_vnode",
type=int,
default=1,
metavar="N",
help="number of vnode",
)
def __init__(self, args):
super().__init__()
base_architecture(args)
self.args = args
self.molecule_encoder = UniMolv2Encoder(
num_encoder_layers=args.encoder_layers,
embedding_dim=args.encoder_embed_dim,
pair_dim=args.pair_embed_dim,
ffn_embedding_dim=args.encoder_ffn_embed_dim,
num_attention_heads=args.encoder_attention_heads,
dropout=args.dropout,
attention_dropout=args.attention_dropout,
activation_dropout=args.act_dropout,
activation_fn=args.activation_fn,
droppath_prob=args.droppath_prob,
)
num_atom = 512
num_degree = 128
num_edge = 64
num_pair = 512
num_spatial = 512
embedding_dim = args.encoder_embed_dim
num_attention_heads = args.encoder_attention_heads
num_3d_bias_kernel = args.num_3d_bias_kernel
self.atom_feature = AtomFeature(
num_atom=num_atom,
num_degree=num_degree,
hidden_dim=embedding_dim,
N_vnode=args.N_vnode
)
self.edge_feature = EdgeFeature(
pair_dim=args.pair_embed_dim,
num_edge=num_edge,
num_spatial=num_spatial,
N_vnode=args.N_vnode
)
self.se3_invariant_kernel = SE3InvariantKernel(
pair_dim=args.pair_embed_dim,
num_pair=num_pair,
num_kernel=num_3d_bias_kernel,
std_width=args.gaussian_std_width,
start=args.gaussian_mean_start,
stop=args.gaussian_mean_stop,
)
if not self.args.pretrain:
self.energy_head = EnergyHead(args.encoder_embed_dim, 1)
else:
self.energy_head = None
self.movement_pred_head = MovementPredictionHead(
args.encoder_embed_dim, args.pair_embed_dim, args.encoder_attention_heads
)
self.lddt_head = PredictedLDDTHead(
50, args.encoder_embed_dim, args.encoder_embed_dim // 2
)
self.movement_pred_head.zero_init()
self._num_updates = 0
self.dtype = torch.float32
def half(self):
super().half()
self.se3_invariant_kernel = self.se3_invariant_kernel.float()
self.atom_feature = self.atom_feature.float()
self.edge_feature = self.edge_feature.float()
if self.energy_head is not None:
self.energy_head = self.energy_head.float()
self.dtype = torch.half
return self
def bfloat16(self):
super().bfloat16()
self.se3_invariant_kernel = self.se3_invariant_kernel.float()
self.atom_feature = self.atom_feature.float()
self.edge_feature = self.edge_feature.float()
if self.energy_head is not None:
self.energy_head = self.energy_head.float()
self.dtype = torch.bfloat16
return self
def float(self):
super().float()
self.dtype = torch.float32
return self
def forward(self, batched_data, perturb=None, cls_embedding=None):
data_x = batched_data["atom_feat"]
atom_mask = batched_data["atom_mask"]
pair_type = batched_data["pair_type"]
pos = batched_data["pos"]
if self.args.pretrain:
with numpy_seed(self.args.seed, self._num_updates, key="recycle"):
if self.training:
num_block = np.random.randint(1, self.args.num_block + 1)
else:
num_block = self.args.num_block
else:
num_block = self.args.num_block
n_mol, n_atom = data_x.shape[:2]
x = self.atom_feature(batched_data)
dtype = self.dtype
x = x.type(dtype)
if cls_embedding is not None:
x[:, 0, :] = cls_embedding
if perturb is not None:
x[:, self.args.N_vnode:, :] += perturb.type(dtype)
attn_mask = batched_data["attn_bias"].clone()
attn_bias = torch.zeros_like(attn_mask)
attn_mask = attn_mask.unsqueeze(1).repeat(
1, self.args.encoder_attention_heads, 1, 1
)
attn_bias = attn_bias.unsqueeze(-1).repeat(1, 1, 1, self.args.pair_embed_dim)
attn_bias = self.edge_feature(batched_data, attn_bias)
attn_mask = attn_mask.type(self.dtype)
atom_mask_cls = torch.cat(
[
torch.ones(n_mol, self.args.N_vnode, device=atom_mask.device, dtype=atom_mask.dtype),
atom_mask,
],
dim=1,
).type(self.dtype)
pair_mask = atom_mask_cls.unsqueeze(-1) * atom_mask_cls.unsqueeze(-2)
def one_block(x, pos, return_x=False):
delta_pos = pos.unsqueeze(1) - pos.unsqueeze(2)
dist = delta_pos.norm(dim=-1)
attn_bias_3d = self.se3_invariant_kernel(dist.detach(), pair_type)
new_attn_bias = attn_bias.clone()
new_attn_bias[:, self.args.N_vnode:, self.args.N_vnode:, :] = new_attn_bias[:, self.args.N_vnode:, self.args.N_vnode:, :] + attn_bias_3d
new_attn_bias = new_attn_bias.type(dtype)
x, pair = self.molecule_encoder(
x,
new_attn_bias,
atom_mask=atom_mask_cls,
pair_mask=pair_mask,
attn_mask=attn_mask,
)
node_output = self.movement_pred_head(
x[:, self.args.N_vnode:, :],
pair[:, self.args.N_vnode:, self.args.N_vnode:, :],
attn_mask[:, :, self.args.N_vnode:, self.args.N_vnode:],
delta_pos.detach(),
)
node_output = node_output * self.args.pos_step_size
if return_x:
return x, pos + node_output
else:
return pos + node_output
if self.args.pretrain:
with torch.no_grad():
for _ in range(num_block - 1):
pos = one_block(x, pos)
pos = one_block(x, pos)
pred_y = None
else:
for _ in range(num_block - 1):
pos = one_block(x, pos)
x, pos = one_block(x, pos, return_x=True)
pred_y = self.energy_head(x[:, 0, :]).view(-1)
pred_dist = (pos.unsqueeze(1) - pos.unsqueeze(2)).norm(dim=-1)
plddt = self.lddt_head(x[:, self.args.N_vnode:, :])
return (
pred_y,
pos,
pred_dist,
plddt,
x
)
@classmethod
def build_model(cls, args, task):
return cls(args)
def set_num_updates(self, num_updates):
"""State from trainer to pass along to model at every update."""
self._num_updates = num_updates
@register_model_architecture("unimolv2", "unimolv2_base")
def base_architecture(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 768)
args.pair_embed_dim = getattr(args, "pair_embed_dim", 128)
args.encoder_layers = getattr(args, "encoder_layers", 12)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 48)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 768)
args.activation_fn = getattr(args, "activation_fn", "gelu")
args.droppath_prob = getattr(args, "droppath_prob", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", 0.1)
args.act_dropout = getattr(args, "act_dropout", 0.1)
args.dropout = getattr(args, "dropout", 0.0)
args.num_3d_bias_kernel = getattr(args, "num_3d_bias_kernel", 128)
args.num_block = getattr(args, "num_block", 4)
args.pretrain = getattr(args, "pretrain", False)
args.pos_step_size = getattr(args, "pos_step_size", 0.01)
args.gaussian_std_width = getattr(args, "gaussian_std_width", 1.0)
args.gaussian_mean_start = getattr(args, "gaussian_mean_start", 0.0)
args.gaussian_mean_stop = getattr(args, "gaussian_mean_stop", 9.0)
args.N_vnode = getattr(args, "N_vnode", 1)
| 13,766 | Python | .py | 378 | 26.145503 | 148 | 0.564827 | dptech-corp/NAG2G | 8 | 4 | 2 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,261 | layers.py | dptech-corp_NAG2G/unimol_plus/unimol/models/layers.py | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from unicore import utils
from unicore.modules import softmax_dropout, SelfMultiheadAttention, LayerNorm
from unicore.utils import (
permute_final_dims,
)
from torch import Tensor
from typing import Callable, Optional
class Dropout(nn.Module):
def __init__(self, p):
super().__init__()
self.p = p
def forward(self, x, inplace: bool = False):
if self.p > 0 and self.training:
return F.dropout(x, p=self.p, training=True, inplace=inplace)
else:
return x
class Linear(nn.Linear):
def __init__(
self,
d_in: int,
d_out: int,
bias: bool = True,
init: str = "default",
):
super(Linear, self).__init__(d_in, d_out, bias=bias)
self.use_bias = bias
if self.use_bias:
with torch.no_grad():
self.bias.fill_(0)
if init == "default":
self._trunc_normal_init(1.0)
elif init == "relu":
self._trunc_normal_init(2.0)
elif init == "glorot":
self._glorot_uniform_init()
elif init == "gating":
self._zero_init(self.use_bias)
elif init == "normal":
self._normal_init()
elif init == "final":
self._zero_init(False)
else:
raise ValueError("Invalid init method.")
def _trunc_normal_init(self, scale=1.0):
# Constant from scipy.stats.truncnorm.std(a=-2, b=2, loc=0., scale=1.)
TRUNCATED_NORMAL_STDDEV_FACTOR = 0.87962566103423978
_, fan_in = self.weight.shape
scale = scale / max(1, fan_in)
std = (scale**0.5) / TRUNCATED_NORMAL_STDDEV_FACTOR
nn.init.trunc_normal_(self.weight, mean=0.0, std=std)
def _glorot_uniform_init(self):
nn.init.xavier_uniform_(self.weight, gain=1)
def _zero_init(self, use_bias=True):
with torch.no_grad():
self.weight.fill_(0.0)
if use_bias:
with torch.no_grad():
self.bias.fill_(1.0)
def _normal_init(self):
torch.nn.init.kaiming_normal_(self.weight, nonlinearity="linear")
class Embedding(nn.Embedding):
def __init__(
self,
num_embeddings: int,
embedding_dim: int,
padding_idx: int = None,
):
super(Embedding, self).__init__(
num_embeddings, embedding_dim, padding_idx=padding_idx
)
self._normal_init()
if padding_idx is not None:
self.weight.data[self.padding_idx].zero_()
def _normal_init(self, std=0.02):
nn.init.normal_(self.weight, mean=0.0, std=std)
class Transition(nn.Module):
def __init__(self, d_in, n, dropout=0.0):
super(Transition, self).__init__()
self.d_in = d_in
self.n = n
self.linear_1 = Linear(self.d_in, self.n * self.d_in, init="relu")
self.act = nn.GELU()
self.linear_2 = Linear(self.n * self.d_in, d_in, init="final")
self.dropout = dropout
def _transition(self, x):
x = self.linear_1(x)
x = self.act(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.linear_2(x)
return x
def forward(
self,
x: torch.Tensor,
) -> torch.Tensor:
x = self._transition(x=x)
return x
class Attention(nn.Module):
def __init__(
self,
q_dim: int,
k_dim: int,
v_dim: int,
pair_dim: int,
head_dim: int,
num_heads: int,
gating: bool = False,
dropout: float = 0.0,
):
super(Attention, self).__init__()
self.num_heads = num_heads
total_dim = head_dim * self.num_heads
self.gating = gating
self.linear_q = Linear(q_dim, total_dim, bias=False, init="glorot")
self.linear_k = Linear(k_dim, total_dim, bias=False, init="glorot")
self.linear_v = Linear(v_dim, total_dim, bias=False, init="glorot")
self.linear_o = Linear(total_dim, q_dim, init="final")
self.linear_g = None
if self.gating:
self.linear_g = Linear(q_dim, total_dim, init="gating")
# precompute the 1/sqrt(head_dim)
self.norm = head_dim**-0.5
self.dropout = dropout
self.linear_bias = Linear(pair_dim, num_heads)
def forward(
self,
q: torch.Tensor,
k: torch.Tensor,
v: torch.Tensor,
pair: torch.Tensor,
mask: torch.Tensor = None,
) -> torch.Tensor:
g = None
if self.linear_g is not None:
# gating, use raw query input
g = self.linear_g(q)
q = self.linear_q(q)
q *= self.norm
k = self.linear_k(k)
v = self.linear_v(v)
q = q.view(q.shape[:-1] + (self.num_heads, -1)).transpose(-2, -3).contiguous()
k = k.view(k.shape[:-1] + (self.num_heads, -1)).transpose(-2, -3).contiguous()
v = v.view(v.shape[:-1] + (self.num_heads, -1)).transpose(-2, -3)
attn = torch.matmul(q, k.transpose(-1, -2))
del q, k
bias = self.linear_bias(pair).permute(0, 3, 1, 2).contiguous()
attn = softmax_dropout(attn, self.dropout, self.training, mask=mask, bias=bias)
o = torch.matmul(attn, v)
del attn, v
o = o.transpose(-2, -3).contiguous()
o = o.view(*o.shape[:-2], -1)
if g is not None:
o = torch.sigmoid(g) * o
# merge heads
o = self.linear_o(o)
return o
class OuterProduct(nn.Module):
def __init__(self, d_atom, d_pair, d_hid=16, eps=1e-3):
super(OuterProduct, self).__init__()
self.d_atom = d_atom
self.d_pair = d_pair
self.d_hid = d_hid
self.eps = eps
self.linear_1 = nn.Linear(d_atom, d_hid)
self.linear_2 = nn.Linear(d_atom, d_hid)
self.linear_out = nn.Linear(d_hid**2, d_pair)
self.act = nn.GELU()
def _opm(self, a, b):
outer = torch.einsum("...bc,...de->...bdce", a, b)
outer = outer.reshape(outer.shape[:-2] + (-1,))
outer = self.linear_out(outer)
return outer
def forward(
self,
m: torch.Tensor,
mask: Optional[torch.Tensor] = None,
) -> torch.Tensor:
mask = mask.unsqueeze(-1)
mask = mask * (mask.size(-2) ** -0.5)
a = self.linear_1(m)
b = self.linear_2(m)
a = a * mask
b = b * mask
z = self._opm(a, b)
norm = torch.einsum("...bc,...dc->...bdc", mask, mask)
z /= self.eps + norm
return z
class AtomFeature(nn.Module):
"""
Compute atom features for each atom in the molecule.
"""
def __init__(
self,
num_atom,
num_degree,
hidden_dim,
N_vnode=1,
):
super(AtomFeature, self).__init__()
self.atom_encoder = Embedding(num_atom, hidden_dim, padding_idx=0)
self.degree_encoder = Embedding(num_degree, hidden_dim, padding_idx=0)
self.N_vnode = N_vnode
self.vnode_encoder = Embedding(self.N_vnode, hidden_dim)
def forward(self, batched_data):
x, degree = (
batched_data["atom_feat"],
batched_data["degree"],
)
n_graph, n_node = x.size()[:2]
node_feature = self.atom_encoder(x).sum(dim=-2) # [n_graph, n_node, n_hidden]
dtype = node_feature.dtype
degree_feature = self.degree_encoder(degree)
node_feature = node_feature + degree_feature
graph_token_feature = self.vnode_encoder.weight.unsqueeze(0).repeat(
n_graph, 1, 1
)
graph_node_feature = torch.cat([graph_token_feature, node_feature], dim=1)
return graph_node_feature.type(dtype)
class EdgeFeature(nn.Module):
"""
Compute attention bias for each head.
"""
def __init__(
self,
pair_dim,
num_edge,
num_spatial,
N_vnode=1,
):
super(EdgeFeature, self).__init__()
self.pair_dim = pair_dim
self.N_vnode = N_vnode
self.edge_encoder = Embedding(num_edge, pair_dim, padding_idx=0)
self.shorest_path_encoder = Embedding(num_spatial, pair_dim, padding_idx=0)
self.vnode_virtual_distance = Embedding(self.N_vnode, pair_dim)
def forward(self, batched_data, graph_attn_bias):
shortest_path = batched_data["shortest_path"]
edge_input = batched_data["edge_feat"]
n_graph = graph_attn_bias.shape[0]
n_node = graph_attn_bias.shape[-1] - self.N_vnode
graph_attn_bias[:, self.N_vnode:, self.N_vnode:, :] = self.shorest_path_encoder(shortest_path)
# reset spatial pos here
t = self.vnode_virtual_distance.weight.view(1, self.N_vnode, self.pair_dim)
graph_attn_bias[:, self.N_vnode:, :self.N_vnode, :] = t
t = self.vnode_virtual_distance.weight.view(self.N_vnode, 1, self.pair_dim)
graph_attn_bias[:, :self.N_vnode, :, :] = t
edge_input = self.edge_encoder(edge_input).mean(-2)
graph_attn_bias[:, self.N_vnode:, self.N_vnode:, :] = graph_attn_bias[:, self.N_vnode:, self.N_vnode:, :] + edge_input
return graph_attn_bias
class SE3InvariantKernel(nn.Module):
"""
Compute 3D attention bias according to the position information for each head.
"""
def __init__(
self,
pair_dim,
num_pair,
num_kernel,
std_width=1.0,
start=0.0,
stop=9.0,
):
super(SE3InvariantKernel, self).__init__()
self.num_kernel = num_kernel
self.gaussian = GaussianKernel(
self.num_kernel,
num_pair,
std_width=std_width,
start=start,
stop=stop,
)
self.out_proj = NonLinear(self.num_kernel, pair_dim)
def forward(self, dist, node_type_edge):
edge_feature = self.gaussian(
dist,
node_type_edge.long(),
)
edge_feature = self.out_proj(edge_feature)
return edge_feature
@torch.jit.script
def gaussian(x, mean, std:float):
pi = 3.14159
a = (2 * pi) ** 0.5
return torch.exp(-0.5 * (((x - mean) / std) ** 2)) / (a * std)
class GaussianKernel(nn.Module):
def __init__(self, K=128, num_pair=512, std_width=1.0, start=0.0, stop=9.0):
super().__init__()
self.K = K
std_width = std_width
start = start
stop = stop
mean = torch.linspace(start, stop, K)
self.std = (std_width * (mean[1] - mean[0])).item()
self.register_buffer("mean", mean)
self.mul = Embedding(num_pair, 1, padding_idx=0)
self.bias = Embedding(num_pair, 1, padding_idx=0)
nn.init.constant_(self.bias.weight, 0)
nn.init.constant_(self.mul.weight, 1.0)
def forward(self, x, atom_pair):
mul = self.mul(atom_pair).abs().sum(dim=-2)
bias = self.bias(atom_pair).sum(dim=-2)
x = mul * x.unsqueeze(-1) + bias
x = x.expand(-1, -1, -1, self.K)
mean = self.mean.float().view(-1)
return gaussian(x.float(), mean, self.std)
class NonLinear(nn.Module):
def __init__(self, input, output_size, hidden=None):
super(NonLinear, self).__init__()
if hidden is None:
hidden = input
self.layer1 = Linear(input, hidden, init="relu")
self.layer2 = Linear(hidden, output_size, init="final")
def forward(self, x):
x = self.layer1(x)
x = F.gelu(x)
x = self.layer2(x)
return x
def zero_init(self):
nn.init.zeros_(self.layer2.weight)
nn.init.zeros_(self.layer2.bias)
class EnergyHead(nn.Module):
def __init__(
self,
input_dim,
output_dim,
):
super().__init__()
self.layer_norm = LayerNorm(input_dim)
self.linear_in = Linear(input_dim, input_dim, init="relu")
self.linear_out = Linear(input_dim, output_dim, bias=True, init="final")
def forward(self, x):
x = x.type(self.linear_in.weight.dtype)
x = F.gelu(self.layer_norm(self.linear_in(x)))
x = self.linear_out(x)
return x
class PredictedLDDTHead(nn.Module):
def __init__(self, num_bins, d_in, d_hid):
super(PredictedLDDTHead, self).__init__()
self.num_bins = num_bins
self.d_in = d_in
self.d_hid = d_hid
self.layer_norm = LayerNorm(self.d_hid)
self.linear_1 = Linear(self.d_in, self.d_hid, init="relu")
self.linear_2 = Linear(self.d_hid, self.d_hid, init="relu")
self.act = nn.GELU()
self.linear_3 = Linear(self.d_hid, self.num_bins, init="final")
def forward(self, s):
s = self.linear_1(s)
s = self.act(self.layer_norm(s))
s = self.linear_2(s)
s = self.act(s)
s = self.linear_3(s)
return s
class MovementPredictionHead(nn.Module):
def __init__(
self,
embed_dim: int,
pair_dim: int,
num_head: int,
):
super().__init__()
self.layer_norm = LayerNorm(embed_dim)
self.embed_dim = embed_dim
self.q_proj = Linear(embed_dim, embed_dim, bias=False, init="glorot")
self.k_proj = Linear(embed_dim, embed_dim, bias=False, init="glorot")
self.v_proj = Linear(embed_dim, embed_dim, bias=False, init="glorot")
self.num_head = num_head
self.scaling = (embed_dim // num_head) ** -0.5
self.force_proj1 = Linear(embed_dim, 1, init="final")
self.force_proj2 = Linear(embed_dim, 1, init="final")
self.force_proj3 = Linear(embed_dim, 1, init="final")
self.linear_bias = Linear(pair_dim, num_head)
self.dropout = 0.1
def zero_init(self):
nn.init.zeros_(self.force_proj1.weight)
nn.init.zeros_(self.force_proj1.bias)
nn.init.zeros_(self.force_proj2.weight)
nn.init.zeros_(self.force_proj2.bias)
nn.init.zeros_(self.force_proj3.weight)
nn.init.zeros_(self.force_proj3.bias)
def forward(
self,
query: Tensor,
pair: Tensor,
attn_mask: Tensor,
delta_pos: Tensor,
) -> Tensor:
bsz, n_node, _ = query.size()
query = self.layer_norm(query)
q = (
self.q_proj(query).view(bsz, n_node, self.num_head, -1).transpose(1, 2)
* self.scaling
)
k = self.k_proj(query).view(bsz, n_node, self.num_head, -1).transpose(1, 2)
v = self.v_proj(query).view(bsz, n_node, self.num_head, -1).transpose(1, 2)
attn = q @ k.transpose(-1, -2) # [bsz, head, n, n]
bias = self.linear_bias(pair).permute(0, 3, 1, 2).contiguous()
attn_probs = softmax_dropout(
attn,
self.dropout,
self.training,
mask=attn_mask.contiguous(),
bias=bias.contiguous(),
).view(bsz, self.num_head, n_node, n_node)
rot_attn_probs = attn_probs.unsqueeze(-1) * delta_pos.unsqueeze(1).type_as(
attn_probs
) # [bsz, head, n, n, 3]
rot_attn_probs = rot_attn_probs.permute(0, 1, 4, 2, 3)
x = rot_attn_probs @ v.unsqueeze(2) # [bsz, head , 3, n, d]
x = x.permute(0, 3, 2, 1, 4).contiguous().view(bsz, n_node, 3, -1)
f1 = self.force_proj1(x[:, :, 0, :]).view(bsz, n_node, 1)
f2 = self.force_proj2(x[:, :, 1, :]).view(bsz, n_node, 1)
f3 = self.force_proj3(x[:, :, 2, :]).view(bsz, n_node, 1)
cur_force = torch.cat([f1, f2, f3], dim=-1).float()
return cur_force
class DropPath(torch.nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
def __init__(self, prob=None):
super(DropPath, self).__init__()
self.drop_prob = prob
def forward(self, x):
if self.drop_prob == 0.0 or not self.training:
return x
keep_prob = 1 - self.drop_prob
shape = (x.shape[0],) + (1,) * (
x.ndim - 1
) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
random_tensor.floor_() # binarize
output = x.div(keep_prob) * random_tensor
return output
def extra_repr(self) -> str:
return f"prob={self.drop_prob}"
class TriangleMultiplication(nn.Module):
def __init__(self, d_pair, d_hid):
super(TriangleMultiplication, self).__init__()
self.linear_ab_p = Linear(d_pair, d_hid * 2)
self.linear_ab_g = Linear(d_pair, d_hid * 2, init="gating")
self.linear_g = Linear(d_pair, d_pair, init="gating")
self.linear_z = Linear(d_hid, d_pair, init="final")
self.layer_norm_out = LayerNorm(d_hid)
def forward(
self,
z: torch.Tensor,
mask: Optional[torch.Tensor] = None,
) -> torch.Tensor:
mask = mask.unsqueeze(-1)
mask = mask * (mask.shape[-2] ** -0.5)
g = self.linear_g(z)
if self.training:
ab = self.linear_ab_p(z) * mask * torch.sigmoid(self.linear_ab_g(z))
else:
ab = self.linear_ab_p(z)
ab *= mask
ab *= torch.sigmoid(self.linear_ab_g(z))
a, b = torch.chunk(ab, 2, dim=-1)
del z, ab
a1 = permute_final_dims(a, (2, 0, 1))
b1 = b.transpose(-1, -3)
x = torch.matmul(a1, b1)
del a1, b1
b2 = permute_final_dims(b, (2, 0, 1))
a2 = a.transpose(-1, -3)
x = x + torch.matmul(a2, b2)
del a, b, a2, b2
x = permute_final_dims(x, (1, 2, 0))
x = self.layer_norm_out(x)
x = self.linear_z(x)
return g * x
class TransformerEncoderLayer(nn.Module):
"""
Implements a Transformer-M Encoder Layer.
"""
def __init__(
self,
embedding_dim: int = 768,
pair_dim: int = 64,
ffn_embedding_dim: int = 3072,
num_attention_heads: int = 8,
dropout: float = 0.1,
attention_dropout: float = 0.1,
activation_dropout: float = 0.1,
activation_fn: str = "relu",
droppath_prob: float = 0.0,
) -> None:
super().__init__()
# Initialize parameters
self.embedding_dim = embedding_dim
self.num_attention_heads = num_attention_heads
self.attention_dropout = attention_dropout
if droppath_prob > 0.0:
self.dropout_module = DropPath(droppath_prob)
else:
self.dropout_module = Dropout(dropout)
# Initialize blocks
self.activation_fn = utils.get_activation_fn(activation_fn)
head_dim = self.embedding_dim // self.num_attention_heads
self.self_attn = Attention(
self.embedding_dim,
self.embedding_dim,
self.embedding_dim,
pair_dim=pair_dim,
head_dim=head_dim,
num_heads=self.num_attention_heads,
gating=False,
dropout=attention_dropout,
)
# layer norm associated with the self attention layer
self.self_attn_layer_norm = LayerNorm(self.embedding_dim)
self.ffn = Transition(
self.embedding_dim,
ffn_embedding_dim // self.embedding_dim,
dropout=activation_dropout,
)
# layer norm associated with the position wise feed-forward NN
self.final_layer_norm = LayerNorm(self.embedding_dim)
self.opm = OuterProduct(self.embedding_dim, pair_dim, d_hid=32)
self.pair_layer_norm_opm = LayerNorm(pair_dim)
self.pair_layer_norm_ffn = LayerNorm(pair_dim)
self.pair_ffn = Transition(
pair_dim,
1,
dropout=activation_dropout,
)
self.pair_dropout = 0.25
self.pair_layer_norm_trimul = LayerNorm(pair_dim)
self.pair_tri_mul = TriangleMultiplication(pair_dim, 32)
def shared_dropout(self, x, shared_dim, dropout):
shape = list(x.shape)
shape[shared_dim] = 1
with torch.no_grad():
mask = x.new_ones(shape)
return F.dropout(mask, p=dropout, training=self.training) * x
def forward(
self,
x: torch.Tensor,
pair: torch.Tensor,
atom_mask: torch.Tensor,
pair_mask: torch.Tensor,
self_attn_mask: Optional[torch.Tensor] = None,
):
"""
LayerNorm is applied either before or after the self-attention/ffn
modules similar to the original Transformer implementation.
"""
residual = x
x = self.self_attn(
x,
x,
x,
pair=pair,
mask=self_attn_mask,
)
x = self.dropout_module(x)
x = residual + x
x = self.self_attn_layer_norm(x)
residual = x
x = self.ffn(x)
x = self.dropout_module(x)
x = residual + x
x = self.final_layer_norm(x)
# outer product
pair = pair + self.dropout_module(self.opm(x, atom_mask))
pair = self.pair_layer_norm_opm(pair)
# trimul
pair_update = self.shared_dropout(
self.pair_tri_mul(pair, pair_mask), -3, self.pair_dropout
)
pair = pair + pair_update
pair = self.pair_layer_norm_trimul(pair)
# ffn
pair = pair + self.dropout_module(self.pair_ffn(pair))
pair = self.pair_layer_norm_ffn(pair)
return x, pair
| 21,560 | Python | .py | 577 | 28.542461 | 126 | 0.56734 | dptech-corp/NAG2G | 8 | 4 | 2 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,262 | __init__.py | dptech-corp_NAG2G/unimol_plus/unimol/models/__init__.py | from pathlib import Path
import importlib
# automatically import any Python files in the criterions/ directory
for file in sorted(Path(__file__).parent.glob("*.py")):
if not file.name.startswith("_"):
importlib.import_module("unimol.models." + file.name[:-3]) | 272 | Python | .py | 6 | 42.333333 | 68 | 0.725564 | dptech-corp/NAG2G | 8 | 4 | 2 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,263 | unimolv2_encoder.py | dptech-corp_NAG2G/unimol_plus/unimol/models/unimolv2_encoder.py | import imp
from typing import Optional, Tuple
import numpy as np
import torch
import torch.nn as nn
from unicore.modules import LayerNorm
from .layers import (
TransformerEncoderLayer,
Dropout,
)
class UniMolv2Encoder(nn.Module):
def __init__(
self,
num_encoder_layers: int = 6,
embedding_dim: int = 768,
pair_dim: int = 64,
ffn_embedding_dim: int = 3072,
num_attention_heads: int = 8,
dropout: float = 0.1,
attention_dropout: float = 0.1,
activation_dropout: float = 0.1,
activation_fn: str = "gelu",
droppath_prob: float = 0.0,
) -> None:
super().__init__()
self.embedding_dim = embedding_dim
self.num_head = num_attention_heads
self.layer_norm = LayerNorm(embedding_dim)
self.pair_layer_norm = LayerNorm(pair_dim)
self.layers = nn.ModuleList([])
if droppath_prob > 0:
droppath_probs = [
x.item() for x in torch.linspace(0, droppath_prob, num_encoder_layers)
]
else:
droppath_probs = None
self.layers.extend(
[
TransformerEncoderLayer(
embedding_dim=embedding_dim,
pair_dim=pair_dim,
ffn_embedding_dim=ffn_embedding_dim,
num_attention_heads=num_attention_heads,
dropout=dropout,
attention_dropout=attention_dropout,
activation_dropout=activation_dropout,
activation_fn=activation_fn,
droppath_prob=droppath_probs[i]
if droppath_probs is not None
else 0,
)
for i in range(num_encoder_layers)
]
)
def forward(
self,
x,
pair,
atom_mask,
pair_mask,
attn_mask=None,
) -> Tuple[torch.Tensor, torch.Tensor]:
x = self.layer_norm(x)
pair = self.pair_layer_norm(pair)
for layer in self.layers:
x, pair = layer(
x,
pair,
atom_mask=atom_mask,
pair_mask=pair_mask,
self_attn_mask=attn_mask,
)
return x, pair
| 2,326 | Python | .py | 73 | 20.876712 | 86 | 0.530303 | dptech-corp/NAG2G | 8 | 4 | 2 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,264 | unimolv2.py | dptech-corp_NAG2G/unimol_plus/unimol/tasks/unimolv2.py | # Copyright (c) DP Techonology, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import numpy as np
from unicore.data import (
NestedDictionaryDataset,
EpochShuffleDataset,
)
from unimol.data import (
KeyDataset,
LMDBPCQDataset,
ConformerPCQSampleDataset,
ConformerPCQTTASampleDataset,
Unimolv2Features,
)
from unicore.tasks import UnicoreTask, register_task
logger = logging.getLogger(__name__)
@register_task("unimolv2")
class Unimolv2Task(UnicoreTask):
"""Task for training transformer auto-encoder models."""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument("data", help="downstream data path")
def __init__(self, args):
super().__init__(args)
self.seed = args.seed
@classmethod
def setup_task(cls, args, **kwargs):
return cls(args)
def load_dataset(self, split, force_valid=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the data scoure (e.g., bppp)
"""
split_path = os.path.join(self.args.data, split + ".lmdb")
dataset = LMDBPCQDataset(split_path)
is_train = (split == "train") and not force_valid
if is_train:
sample_dataset = ConformerPCQSampleDataset(
dataset,
self.seed,
"input_pos",
"label_pos",
)
else:
sample_dataset = ConformerPCQTTASampleDataset(
dataset,
self.seed,
"input_pos",
"label_pos",
)
raw_coord_dataset = KeyDataset(sample_dataset, "coordinates")
tgt_coord_dataset = KeyDataset(sample_dataset, "target_coordinates")
graph_features = Unimolv2Features(
sample_dataset,
raw_coord_dataset,
tgt_coord_dataset if split in ["train", "valid_our"] else None,
is_train=is_train,
label_prob=self.args.label_prob,
mid_prob=self.args.mid_prob,
mid_lower=self.args.mid_lower,
mid_upper=self.args.mid_upper,
noise=self.args.noise_scale,
seed=self.seed + 2,
)
nest_dataset = NestedDictionaryDataset(
{
"batched_data": graph_features,
},
)
if is_train:
nest_dataset = EpochShuffleDataset(
nest_dataset, len(nest_dataset), self.seed
)
self.datasets[split] = nest_dataset
def build_model(self, args):
from unicore import models
model = models.build_model(args, self)
return model
| 2,851 | Python | .py | 83 | 25.493976 | 76 | 0.604499 | dptech-corp/NAG2G | 8 | 4 | 2 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,265 | __init__.py | dptech-corp_NAG2G/unimol_plus/unimol/tasks/__init__.py | from pathlib import Path
import importlib
# automatically import any Python files in the criterions/ directory
for file in sorted(Path(__file__).parent.glob("*.py")):
if not file.name.startswith("_"):
importlib.import_module("unimol.tasks." + file.name[:-3])
| 272 | Python | .py | 6 | 42.166667 | 68 | 0.724528 | dptech-corp/NAG2G | 8 | 4 | 2 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,266 | unimolv2.py | dptech-corp_NAG2G/unimol_plus/unimol/losses/unimolv2.py | from dataclasses import dataclass
import math
import torch
import torch.nn.functional as F
import numpy as np
import pandas as pd
from unicore import metrics
from unicore.losses import UnicoreLoss, register_loss
from scipy.spatial.transform import Rotation as R
from typing import List, Callable, Any, Dict
import os
@register_loss("unimolv2")
class Unimolv2Loss(UnicoreLoss):
"""
Implementation for the loss used in masked graph model (MGM) training.
"""
def __init__(self, task):
super().__init__(task)
self.args = task.args
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
with torch.no_grad():
sample_size = sample["batched_data"]["atom_feat"].shape[0]
natoms = sample["batched_data"]["atom_feat"].shape[1]
# add gaussian noise
(
graph_output,
pos_pred,
dist_pred,
plddt_logits,
) = model(**sample)
targets = sample["batched_data"]["target"].float().view(-1)
per_data_loss = None
if graph_output is not None:
per_data_loss = torch.nn.L1Loss(reduction="none")(
graph_output.float(), targets
)
loss = per_data_loss.sum()
else:
loss = torch.tensor(0.0, device=targets.device)
atom_mask = sample["batched_data"]["atom_mask"].float()
pos_mask = atom_mask.unsqueeze(-1)
pos_target = sample["batched_data"]["pos_target"].float() * pos_mask
def get_pos_loss(pos_pred):
pos_pred = pos_pred.float() * pos_mask
center_loss = pos_pred.mean(dim=-2).square().sum()
pos_loss = torch.nn.L1Loss(reduction="none")(
pos_pred,
pos_target,
).sum(dim=(-1, -2))
pos_cnt = pos_mask.squeeze(-1).sum(dim=-1) + 1e-10
pos_loss = (pos_loss / pos_cnt).sum()
return pos_loss, center_loss
(pos_loss, center_loss) = get_pos_loss(pos_pred)
pair_mask = atom_mask.unsqueeze(-1) * atom_mask.unsqueeze(-2).float()
dist_target = (pos_target.unsqueeze(-2) - pos_target.unsqueeze(-3)).norm(dim=-1)
dist_target = dist_target * pair_mask
dist_cnt = pair_mask.sum(dim=(-1, -2)) + 1e-10
def get_dist_loss(dist_pred, return_sum=True):
dist_pred = dist_pred.float() * pair_mask
dist_loss = torch.nn.L1Loss(reduction="none")(
dist_pred,
dist_target,
).sum(dim=(-1, -2))
if return_sum:
return (dist_loss / dist_cnt).sum()
else:
return dist_loss / dist_cnt
dist_loss = get_dist_loss(dist_pred)
plddt_logits = plddt_logits.float()
cutoff = 15.0
num_bins = 50
eps = 1e-10
lddt = self.compute_lddt(
dist_pred.float(),
dist_target,
pair_mask,
cutoff=cutoff,
eps=eps,
).detach()
bin_index = torch.floor(lddt * num_bins).long()
bin_index = torch.clamp(bin_index, max=(num_bins - 1))
lddt_ca_one_hot = torch.nn.functional.one_hot(bin_index, num_classes=num_bins)
errors = self.softmax_cross_entropy(plddt_logits, lddt_ca_one_hot)
plddt_loss = self.masked_mean(atom_mask, errors, dim=-1, eps=eps).sum()
ca_lddt = self.masked_mean(atom_mask, lddt, dim=-1, eps=eps)
plddt = self.masked_mean(
atom_mask, self.predicted_lddt(plddt_logits), dim=-1, eps=eps
)
total_loss = (
loss
+ dist_loss
+ self.args.pos_loss_weight * (pos_loss + center_loss)
+ self.args.plddt_loss_weight * plddt_loss
)
logging_output = {
"loss": loss.data,
"dist_loss": dist_loss.data,
"pos_loss": pos_loss.data,
"center_loss": center_loss.data,
"total_loss": total_loss.data,
"plddt_loss": plddt_loss.data,
"ca_lddt_metric": ca_lddt.sum().data,
"plddt_metric": plddt.sum().data,
"sample_size": sample_size,
"nsentences": sample_size,
"ntokens": natoms,
"bsz": sample_size,
"n_atoms": natoms * sample_size,
}
if not torch.is_grad_enabled():
logging_output["id"] = sample["batched_data"]["id"].cpu().numpy()
if per_data_loss is None:
per_data_loss = 1.0 - ca_lddt
logging_output["per_data"] = per_data_loss.detach().cpu().numpy()
logging_output["plddt"] = plddt.detach().cpu().numpy()
logging_output["ca_lddt"] = ca_lddt.detach().cpu().numpy()
logging_output["is_pretrain"] = 1.0 if graph_output is None else 0.0
logging_output["total_loss"] = total_loss.data
return total_loss, sample_size, logging_output
@staticmethod
def reduce_metrics(logging_outputs, split="valid") -> None:
"""Aggregate logging outputs from data parallel training."""
is_pretrain = sum(log.get("is_pretrain", 0) for log in logging_outputs) > 0
if split != "train":
prefix = "" if not is_pretrain else "pretrain_"
id = np.concatenate([log["id"] for log in logging_outputs])
per_data = np.concatenate([log["per_data"] for log in logging_outputs])
plddt = np.concatenate([log["plddt"] for log in logging_outputs])
ca_lddt = np.concatenate([log["ca_lddt"] for log in logging_outputs])
df = pd.DataFrame(
{
"id": id,
"loss": per_data,
"plddt": plddt,
"ca_lddt": ca_lddt,
}
)
df_grouped = df.groupby(["id"])
df_min = df_grouped.agg("min")
df_mean = df_grouped.agg("mean")
df_median = df_grouped.agg("median")
df_plddt = (
df.sort_values(by=["id", "plddt"], ascending=[True, False])
.groupby("id", as_index=False)
.head(1)
)
df_ca_lddt = (
df.sort_values(by=["id", "ca_lddt"], ascending=[True, False])
.groupby("id", as_index=False)
.head(1)
)
assert len(df_min["loss"]) == len(df_plddt["loss"])
metrics.log_scalar(
prefix + "loss_by_plddt", df_plddt["loss"].mean(), 1, round=6
)
metrics.log_scalar(
prefix + "loss_by_ca_lddt", df_ca_lddt["loss"].mean(), 1, round=6
)
metrics.log_scalar(
prefix + "loss_by_min", df_min["loss"].mean(), 1, round=6
)
metrics.log_scalar(prefix + "loss_cnt", len(df_min["loss"]), 1, round=6)
metrics.log_scalar(
prefix + "loss_by_mean", df_mean["loss"].mean(), 1, round=6
)
metrics.log_scalar(
prefix + "loss_by_median", df_median["loss"].mean(), 1, round=6
)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
n_atoms = sum(log.get("n_atoms", 0) for log in logging_outputs)
for key in logging_outputs[0].keys():
if "loss" in key or "metric" in key:
total_loss_sum = sum(log.get(key, 0) for log in logging_outputs)
metrics.log_scalar(
key, total_loss_sum / sample_size, sample_size, round=6
)
metrics.log_scalar("n_atoms", n_atoms / sample_size, sample_size, round=6)
@staticmethod
def logging_outputs_can_be_summed(is_train) -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return is_train
def compute_lddt(
self,
dmat_pred,
dmat_true,
pair_mask: torch.Tensor,
cutoff: float = 15.0,
eps: float = 1e-10,
) -> torch.Tensor:
n = pair_mask.shape[-1]
dists_to_score = (
(dmat_true < cutoff)
* pair_mask
* (1.0 - torch.eye(n, device=pair_mask.device))
)
dist_l1 = torch.abs(dmat_true - dmat_pred)
score = (
(dist_l1 < 0.05).type(dist_l1.dtype)
+ (dist_l1 < 0.1).type(dist_l1.dtype)
+ (dist_l1 < 0.2).type(dist_l1.dtype)
+ (dist_l1 < 0.4).type(dist_l1.dtype)
)
score = score * 0.25
norm = 1.0 / (eps + torch.sum(dists_to_score, dim=-1))
score = norm * (eps + torch.sum(dists_to_score * score, dim=-1))
return score
def masked_mean(self, mask, value, dim, eps=1e-10, keepdim=False):
mask = mask.expand(*value.shape)
return torch.sum(mask * value, dim=dim, keepdim=keepdim) / (
eps + torch.sum(mask, dim=dim, keepdim=keepdim)
)
def softmax_cross_entropy(self, logits, labels):
loss = -1 * torch.sum(
labels * torch.nn.functional.log_softmax(logits.float(), dim=-1),
dim=-1,
)
return loss
def predicted_lddt(self, plddt_logits: torch.Tensor) -> torch.Tensor:
"""Computes per-residue pLDDT from logits.
Args:
logits: [num_res, num_bins] output from the PredictedLDDTHead.
Returns:
plddt: [num_res] per-residue pLDDT.
"""
num_bins = plddt_logits.shape[-1]
bin_probs = torch.nn.functional.softmax(plddt_logits.float(), dim=-1)
bin_width = 1.0 / num_bins
bounds = torch.arange(
start=0.5 * bin_width, end=1.0, step=bin_width, device=plddt_logits.device
)
plddt = torch.sum(
bin_probs * bounds.view(*((1,) * len(bin_probs.shape[:-1])), *bounds.shape),
dim=-1,
)
return plddt
| 10,294 | Python | .py | 245 | 31.012245 | 88 | 0.546298 | dptech-corp/NAG2G | 8 | 4 | 2 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,267 | __init__.py | dptech-corp_NAG2G/unimol_plus/unimol/losses/__init__.py | from pathlib import Path
import importlib
# automatically import any Python files in the criterions/ directory
for file in sorted(Path(__file__).parent.glob("*.py")):
if not file.name.startswith("_"):
importlib.import_module("unimol.losses." + file.name[:-3])
| 273 | Python | .py | 6 | 42.333333 | 68 | 0.725564 | dptech-corp/NAG2G | 8 | 4 | 2 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,268 | check_train_smi.py | dptech-corp_NAG2G/unimol_plus/examples/pcqm4m/check_train_smi.py | import gzip
import os, sys
import pickle
from tqdm import tqdm
from multiprocessing import Pool
import lmdb
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.Chem.rdMolAlign import GetBestAlignmentTransform
import numpy as np
lines = gzip.open("data.csv.gz", "r").readlines()
target = []
smiles = []
for i in range(1, len(lines)):
try:
s = lines[i].decode().split(",")
smiles.append(s[1])
target.append(float(s[2]))
except:
target.append(None)
del lines
label_env = lmdb.open(
"label_3D.lmdb",
subdir=False,
readonly=False,
lock=False,
readahead=False,
meminit=False,
max_readers=1,
map_size=int(100e9),
)
with label_env.begin() as txn:
train_keys = list(txn.cursor().iternext(values=False))
def get_by_key(env, key):
data = env.begin().get(key)
if data is None:
return data
else:
try:
return pickle.loads(gzip.decompress(data))
except:
return None
def process_one(key):
index = int.from_bytes(key, "big")
label_str = get_by_key(label_env, key)
label_mol = Chem.MolFromMolBlock(label_str)
label_mol = Chem.RemoveHs(label_mol)
ori_smi = Chem.MolToSmiles(Chem.SmilesToMol(smiles[index]))
label_smi = Chem.MolToSmiles(label_mol)
if ori_smi != label_smi:
print("smi mismatch", ori_smi, label_smi)
return 1
else:
return 0
i = 0
erorr_cnt = 0
with Pool(96) as pool:
for ret in tqdm(pool.imap(process_one, train_keys), total=len(train_keys)):
erorr_cnt += ret
# use `int.from_bytes(key, "big")` to decode from bytes
i += 1
print(erorr_cnt, i)
| 1,686 | Python | .py | 62 | 22.403226 | 79 | 0.659416 | dptech-corp/NAG2G | 8 | 4 | 2 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,269 | gen_label3d_lmdb.py | dptech-corp_NAG2G/unimol_plus/examples/pcqm4m/gen_label3d_lmdb.py | import gzip
import os, sys
import pickle
from tqdm import tqdm
from multiprocessing import Pool
import lmdb
from rdkit import Chem
import torch
split = torch.load("split_dict.pt")
train_index = split["train"]
os.system("rm -f label_3D.lmdb")
env_new = lmdb.open(
"label_3D.lmdb",
subdir=False,
readonly=False,
lock=False,
readahead=False,
meminit=False,
max_readers=1,
map_size=int(100e9),
)
txn_write = env_new.begin(write=True)
i = 0
with open("pcqm4m-v2-train.sdf", "r") as input:
cur_content = ""
for line in input:
cur_content += line
if line == "$$$$\n":
ret = gzip.compress(pickle.dumps(cur_content))
a = txn_write.put(int(train_index[i]).to_bytes(4, byteorder="big"), ret)
i += 1
cur_content = ""
if i % 10000 == 0:
txn_write.commit()
txn_write = env_new.begin(write=True)
print("processed {} molecules".format(i))
txn_write.commit()
env_new.close()
| 1,028 | Python | .py | 38 | 21.578947 | 84 | 0.61687 | dptech-corp/NAG2G | 8 | 4 | 2 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,270 | get_mul3d_lmdb.py | dptech-corp_NAG2G/unimol_plus/examples/pcqm4m/get_mul3d_lmdb.py | import gzip
import os, sys
import pickle
from tqdm import tqdm
from multiprocessing import Pool
import lmdb
# '2022.09.3'
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.Chem.rdMolAlign import GetBestAlignmentTransform
import numpy as np
import torch
split_key = sys.argv[1]
split = torch.load("split_dict.pt")
valid_index = split[split_key]
lines = gzip.open("data.csv.gz", "r").readlines()
target = []
smiles = []
for i in range(1, len(lines)):
try:
s = lines[i].decode().split(",")
smiles.append(s[1])
target.append(float(s[2]))
except:
target.append(None)
del lines
if split_key == "train":
label_env = lmdb.open(
"label_3D.lmdb",
subdir=False,
readonly=False,
lock=False,
readahead=False,
meminit=False,
max_readers=1,
map_size=int(100e9),
)
with label_env.begin() as txn:
train_keys = list(txn.cursor().iternext(values=False))
else:
train_keys = valid_index
def get_info(src_mol, perm=None):
atoms = np.array([x.GetSymbol() for x in src_mol.GetAtoms()])
pos = src_mol.GetConformer().GetPositions()
if perm is not None:
new_atoms = []
new_pos = np.zeros_like(pos)
for i in range(len(atoms)):
j = perm[i]
new_atoms.append(atoms[j])
new_pos[i, :] = pos[j, :]
return np.array(new_atoms), new_pos
else:
return atoms, pos
def align_to(src_mol, ref_mol):
t = GetBestAlignmentTransform(src_mol, ref_mol)
perm = {x[1]: x[0] for x in t[2]}
R = t[1][:3, :3].T
T = t[1][:3, 3].T
ref_atoms, ref_pos = get_info(ref_mol)
src_atoms, src_pos = get_info(src_mol, perm)
assert np.all(ref_atoms == src_atoms)
src_pos = src_pos @ R + T
def cal_rmsd(true_atom_pos, pred_atom_pos, eps: float = 1e-6):
sd = np.square(true_atom_pos - pred_atom_pos).sum(axis=-1)
msd = np.mean(sd)
return np.sqrt(msd + eps)
cur_rmsd = cal_rmsd(src_pos, ref_pos)
assert np.abs(cur_rmsd - t[0]) < 1e-2
return ref_atoms, src_pos, ref_pos
def rdkit_mmff(mol):
try:
AllChem.MMFFOptimizeMolecule(mol)
new_mol = rdkit_remove_hs(mol)
pos = new_mol.GetConformer().GetPositions()
return new_mol
except:
return rdkit_remove_hs(mol)
def read_smiles(smile):
try:
mol = Chem.MolFromSmiles(smile)
except:
print("warning: cannot sanitize smiles: ", smile)
mol = Chem.MolFromSmiles(smile, sanitize=False)
mol = Chem.AddHs(mol)
return mol
def read_mol_block(mol_block, removeHs=True):
try:
mol = Chem.MolFromMolBlock(mol_block, removeHs=removeHs)
except:
print("warning: cannot sanitize : ", mol_block)
mol = Chem.MolFromMolBlock(mol_block, sanitize=False, removeHs=removeHs)
return mol
def rdkit_remove_hs(mol):
try:
return Chem.RemoveHs(mol)
except:
return Chem.RemoveHs(mol, sanitize=False)
def rdkit_2d_gen(smile):
m = read_smiles(smile)
AllChem.Compute2DCoords(m)
m = rdkit_mmff(m)
pos = m.GetConformer().GetPositions()
return m
def rdkit_3d_gen(smile, seed):
mol = read_smiles(smile)
AllChem.EmbedMolecule(mol, randomSeed=seed, maxAttempts=1000)
mol = rdkit_mmff(mol)
pos = mol.GetConformer().GetPositions()
return mol
def mols_gen(smiles, index, seed=-1, num_confs=8, num_obabel_confs=0, label_mol=None):
si = 0
ref_mol = None
for i in range(5):
try:
ref_mol = rdkit_3d_gen(smiles, seed + i)
if label_mol is not None:
_, label_pos, _ = align_to(label_mol, ref_mol)
ref_rdkit = True
ref_2d = False
break
except:
ref_mol = None
si = i
if ref_mol is None:
try:
ref_mol = rdkit_2d_gen(smiles)
if label_mol is not None:
_, label_pos, _ = align_to(label_mol, ref_mol)
ref_rdkit = False
ref_2d = True
except:
return None, None, None, None, False
atoms, init_pos = get_info(ref_mol)
init_pos_list = [init_pos]
if label_mol is None:
label_pos = init_pos
if ref_2d:
return ref_mol, atoms, init_pos_list, label_pos, False
max_try = num_confs * 10
for i in range(max_try):
try:
cur_mol = rdkit_3d_gen(smiles, seed + i + 1 + si)
_, cur_pos, _ = align_to(cur_mol, ref_mol)
init_pos_list.append(cur_pos)
except:
pass
if len(init_pos_list) >= num_confs:
break
return ref_mol, atoms, init_pos_list, label_pos, True
def get_by_key(env, key):
data = env.begin().get(key)
if data is None:
return data
else:
try:
return pickle.loads(gzip.decompress(data))
except:
return None
# allowable multiple choice node and edge features
allowable_features = {
"possible_atomic_num_list": list(range(1, 119)) + ["misc"],
"possible_chirality_list": [
"CHI_UNSPECIFIED",
"CHI_TETRAHEDRAL_CW",
"CHI_TETRAHEDRAL_CCW",
"CHI_TRIGONALBIPYRAMIDAL",
"CHI_OCTAHEDRAL",
"CHI_SQUAREPLANAR",
"CHI_OTHER",
],
"possible_degree_list": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, "misc"],
"possible_formal_charge_list": [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, "misc"],
"possible_numH_list": [0, 1, 2, 3, 4, 5, 6, 7, 8, "misc"],
"possible_number_radical_e_list": [0, 1, 2, 3, 4, "misc"],
"possible_hybridization_list": ["SP", "SP2", "SP3", "SP3D", "SP3D2", "misc"],
"possible_is_aromatic_list": [False, True],
"possible_is_in_ring_list": [False, True],
"possible_bond_type_list": ["SINGLE", "DOUBLE", "TRIPLE", "AROMATIC", "misc"],
"possible_bond_stereo_list": [
"STEREONONE",
"STEREOZ",
"STEREOE",
"STEREOCIS",
"STEREOTRANS",
"STEREOANY",
],
"possible_is_conjugated_list": [False, True],
}
def safe_index(l, e):
"""
Return index of element e in list l. If e is not present, return the last index
"""
try:
return l.index(e)
except:
return len(l) - 1
def atom_to_feature_vector(atom):
"""
Converts rdkit atom object to feature list of indices
:param mol: rdkit atom object
:return: list
"""
atom_feature = [
safe_index(allowable_features["possible_atomic_num_list"], atom.GetAtomicNum()),
allowable_features["possible_chirality_list"].index(str(atom.GetChiralTag())),
safe_index(allowable_features["possible_degree_list"], atom.GetTotalDegree()),
safe_index(
allowable_features["possible_formal_charge_list"], atom.GetFormalCharge()
),
safe_index(allowable_features["possible_numH_list"], atom.GetTotalNumHs()),
safe_index(
allowable_features["possible_number_radical_e_list"],
atom.GetNumRadicalElectrons(),
),
safe_index(
allowable_features["possible_hybridization_list"],
str(atom.GetHybridization()),
),
allowable_features["possible_is_aromatic_list"].index(atom.GetIsAromatic()),
allowable_features["possible_is_in_ring_list"].index(atom.IsInRing()),
]
return atom_feature
def bond_to_feature_vector(bond):
"""
Converts rdkit bond object to feature list of indices
:param mol: rdkit bond object
:return: list
"""
bond_feature = [
safe_index(
allowable_features["possible_bond_type_list"], str(bond.GetBondType())
),
allowable_features["possible_bond_stereo_list"].index(str(bond.GetStereo())),
allowable_features["possible_is_conjugated_list"].index(bond.GetIsConjugated()),
]
return bond_feature
def get_graph(mol):
"""
Converts SMILES string to graph Data object
:input: SMILES string (str)
:return: graph object
"""
atom_features_list = []
for atom in mol.GetAtoms():
atom_features_list.append(atom_to_feature_vector(atom))
x = np.array(atom_features_list, dtype=np.int32)
# bonds
num_bond_features = 3 # bond type, bond stereo, is_conjugated
if len(mol.GetBonds()) > 0: # mol has bonds
edges_list = []
edge_features_list = []
for bond in mol.GetBonds():
i = bond.GetBeginAtomIdx()
j = bond.GetEndAtomIdx()
edge_feature = bond_to_feature_vector(bond)
# add edges in both directions
edges_list.append((i, j))
edge_features_list.append(edge_feature)
edges_list.append((j, i))
edge_features_list.append(edge_feature)
# data.edge_index: Graph connectivity in COO format with shape [2, num_edges]
edge_index = np.array(edges_list, dtype=np.int32).T
# data.edge_attr: Edge feature matrix with shape [num_edges, num_edge_features]
edge_attr = np.array(edge_features_list, dtype=np.int32)
else: # mol has no bonds
edge_index = np.empty((2, 0), dtype=np.int32)
edge_attr = np.empty((0, num_bond_features), dtype=np.int32)
return x, edge_index, edge_attr
def process_one(key):
if split_key == "train":
index = int.from_bytes(key, "big")
label_str = get_by_key(label_env, key)
label_mol = read_mol_block(label_str)
else:
index = int(key)
key = index.to_bytes(4, byteorder="big")
label_mol = None
ori_smi = smiles[index]
seed = int(index % 1000 + 1)
ref_mol, atoms, init_pos_list, label_pos, is_3d = mols_gen(
ori_smi, index, seed=seed, label_mol=label_mol
)
if label_pos is None or len(atoms) <= 0:
print(index, ori_smi)
return key, None, False
node_attr, edge_index, edge_attr = get_graph(ref_mol)
return (
key,
gzip.compress(
pickle.dumps(
{
"atoms": atoms,
"input_pos": init_pos_list,
"label_pos": label_pos,
"target": target[index],
"smi": ori_smi,
"node_attr": node_attr,
"edge_index": edge_index,
"edge_attr": edge_attr,
}
)
),
is_3d,
)
os.system(f"rm -f {split_key}.lmdb")
env_new = lmdb.open(
f"{split_key}.lmdb",
subdir=False,
readonly=False,
lock=False,
readahead=False,
meminit=False,
max_readers=1,
map_size=int(100e9),
)
txn_write = env_new.begin(write=True)
i = 0
error_cnt = 0
is_3d_cnt = 0
with Pool(112) as pool:
for ret in tqdm(
pool.imap_unordered(process_one, train_keys), total=len(train_keys)
):
key, val, is_3d = ret
if val is not None:
txn_write.put(key, val)
else:
error_cnt += 1
if is_3d:
is_3d_cnt += 1
# use `int.from_bytes(key, "big")` to decode from bytes
i += 1
if i % 10000 == 0:
txn_write.commit()
txn_write = env_new.begin(write=True)
txn_write.commit()
env_new.close()
print(error_cnt, is_3d_cnt)
| 11,291 | Python | .py | 339 | 25.961652 | 88 | 0.5942 | dptech-corp/NAG2G | 8 | 4 | 2 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,271 | preprocess_slurm.py | dptech-corp_NAG2G/unimol_plus/examples/molecules/preprocess_slurm.py | import os
import sys
import json
import glob
import pickle
import pandas as pd
import numpy as np
from rdkit import Chem
from tqdm import tqdm
from rdkit.Chem import Descriptors
from rdkit.Chem import AllChem
from rdkit import RDLogger
RDLogger.DisableLog('rdApp.*')
import warnings
import contextlib
warnings.filterwarnings(action='ignore')
from multiprocessing import Pool
import timeout_decorator
from sklearn.mixture import BayesianGaussianMixture
from rdkit.Chem import rdMolTransforms
import copy
@contextlib.contextmanager
def numpy_seed(seed, *addl_seeds):
"""Context manager which seeds the NumPy PRNG with the specified seed and
restores the state afterward"""
if seed is None:
yield
return
if len(addl_seeds) > 0:
seed = int(hash((seed, *addl_seeds)) % 1e6)
state = np.random.get_state()
np.random.seed(seed)
try:
yield
finally:
np.random.set_state(state)
def get_torsions(m):
m = Chem.RemoveHs(m)
torsionList = []
torsionSmarts = '[!$(*#*)&!D1]-&!@[!$(*#*)&!D1]'
torsionQuery = Chem.MolFromSmarts(torsionSmarts)
matches = m.GetSubstructMatches(torsionQuery)
for match in matches:
idx2 = match[0]
idx3 = match[1]
bond = m.GetBondBetweenAtoms(idx2, idx3)
jAtom = m.GetAtomWithIdx(idx2)
kAtom = m.GetAtomWithIdx(idx3)
for b1 in jAtom.GetBonds():
if (b1.GetIdx() == bond.GetIdx()):
continue
idx1 = b1.GetOtherAtomIdx(idx2)
for b2 in kAtom.GetBonds():
if ((b2.GetIdx() == bond.GetIdx())
or (b2.GetIdx() == b1.GetIdx())):
continue
idx4 = b2.GetOtherAtomIdx(idx3)
# skip 3-membered rings
if (idx4 == idx1):
continue
# skip torsions that include hydrogens
if ((m.GetAtomWithIdx(idx1).GetAtomicNum() == 1)
or (m.GetAtomWithIdx(idx4).GetAtomicNum() == 1)):
continue
if m.GetAtomWithIdx(idx4).IsInRing():
torsionList.append((idx4, idx3, idx2, idx1))
break
else:
torsionList.append((idx1, idx2, idx3, idx4))
break
break
return torsionList
def SetDihedral(conf, atom_idx, new_vale):
rdMolTransforms.SetDihedralRad(conf, atom_idx[0], atom_idx[1], atom_idx[2], atom_idx[3], new_vale)
def GetDihedral(conf, atom_idx):
return rdMolTransforms.GetDihedralRad(conf, atom_idx[0], atom_idx[1], atom_idx[2], atom_idx[3])
@timeout_decorator.timeout(20)
def inner_smi2coords(smi, num_confs=100, seed=42, cluster_size=10):
coordinate_list, rotable_bonds_list = [], []
mol = Chem.MolFromSmiles(smi)
AllChem.AddHs(mol, addCoords=True)
atoms = [atom.GetSymbol() for atom in mol.GetAtoms()]
wt = Descriptors.ExactMolWt(mol)
# skip for heavy molecules
if wt > 1000:
return None
# at least have two atoms
if len(atoms) < 2:
return None
# allconformers = AllChem.EmbedMultipleConfs(mol, numConfs=num_confs, randomSeed=seed, clearConfs=True, numThreads=1)
AllChem.EmbedMolecule(mol, randomSeed=seed)
rotable_bonds = get_torsions(mol)
for i in range(num_confs):
np.random.seed(i)
values = 3.1415926 * 2 * np.random.rand(len(rotable_bonds))
for idx in range(len(rotable_bonds)):
SetDihedral(mol.GetConformer(), rotable_bonds[idx], values[idx])
Chem.rdMolTransforms.CanonicalizeConformer(mol.GetConformer())
try:
AllChem.MMFFOptimizeMolecule(mol)
coordinate_list.append(mol.GetConformer().GetPositions().astype(np.float32))
rotable_bonds_value = [GetDihedral(mol.GetConformer(), rotable_bonds[idx]) for idx in range(len(rotable_bonds))]
rotable_bonds_list.append(rotable_bonds_value)
assert len(atoms) == len(coordinate_list[-1])
except:
continue
X = np.array(rotable_bonds_list)
clf = BayesianGaussianMixture(n_components=cluster_size, random_state=seed).fit(X)
probs = clf.predict_proba(X)
# filter redundant clusters
probs = probs[:,probs.mean(axis=0)!=0.0]
ids = probs.argmax(axis=0)
# padding to cluster_size
if len(ids) < cluster_size:
ids = ids + [ids[0]] * (cluster_size - len(ids))
cluster_coordinate_list = [coordinate_list[idx] for idx in ids]
return pickle.dumps({'atoms': atoms, 'coordinates': cluster_coordinate_list, 'id': smi}, protocol=-1)
def smi2coords(smi):
try:
return inner_smi2coords(smi)
except:
return None
def partion_csv(sep=100000):
smi_list = pd.read_csv('./clean_smi.csv.gz', names=['smi'])['smi'].tolist()
with numpy_seed(42):
np.random.shuffle(smi_list)
for i in range(0, len(smi_list), sep):
print(i, i+sep)
with open('./partions/smi_list_{}.txt'.format(i//sep), 'w') as f:
f.write('\n'.join(smi_list[i:i+sep]))
return
def write_partition(outpath='.', pid=0, nthreads=16):
with open('./partions/smi_list_{}.txt'.format(pid), 'r') as f:
smi_list = f.read().split('\n')[:100]
outputfilename = os.path.join(outpath, 'smi_{}.pkl'.format(pid))
try:
os.remove(outputfilename)
except:
pass
results = []
with Pool(nthreads) as pool:
for inner_output in tqdm(pool.imap(smi2coords, smi_list), total=len(smi_list)):
if inner_output is not None:
results.append(inner_output)
with open(outputfilename, 'wb') as f:
pickle.dump(results, f)
if __name__ == '__main__':
write_partition(outpath='./results', pid=int(sys.argv[1]), nthreads=16)
| 5,827 | Python | .py | 148 | 31.648649 | 124 | 0.6339 | dptech-corp/NAG2G | 8 | 4 | 2 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,272 | preprocess.py | dptech-corp_NAG2G/unimol_plus/examples/molecules/preprocess.py | import os
import sys
import json
import glob
import pickle
import lmdb
import pandas as pd
import numpy as np
from rdkit import Chem
from tqdm import tqdm
from rdkit.Chem import Descriptors
from rdkit.Chem import AllChem
from rdkit import RDLogger
RDLogger.DisableLog('rdApp.*')
import warnings
import contextlib
warnings.filterwarnings(action='ignore')
from multiprocessing import Pool
import timeout_decorator
from scipy.spatial.transform import Rotation
from sklearn.mixture import BayesianGaussianMixture
from rdkit.Chem import rdMolTransforms
import copy
@contextlib.contextmanager
def numpy_seed(seed, *addl_seeds):
"""Context manager which seeds the NumPy PRNG with the specified seed and
restores the state afterward"""
if seed is None:
yield
return
if len(addl_seeds) > 0:
seed = int(hash((seed, *addl_seeds)) % 1e6)
state = np.random.get_state()
np.random.seed(seed)
try:
yield
finally:
np.random.set_state(state)
def get_torsions(m):
m = Chem.RemoveHs(m)
torsionList = []
torsionSmarts = '[!$(*#*)&!D1]-&!@[!$(*#*)&!D1]'
torsionQuery = Chem.MolFromSmarts(torsionSmarts)
matches = m.GetSubstructMatches(torsionQuery)
for match in matches:
idx2 = match[0]
idx3 = match[1]
bond = m.GetBondBetweenAtoms(idx2, idx3)
jAtom = m.GetAtomWithIdx(idx2)
kAtom = m.GetAtomWithIdx(idx3)
for b1 in jAtom.GetBonds():
if (b1.GetIdx() == bond.GetIdx()):
continue
idx1 = b1.GetOtherAtomIdx(idx2)
for b2 in kAtom.GetBonds():
if ((b2.GetIdx() == bond.GetIdx())
or (b2.GetIdx() == b1.GetIdx())):
continue
idx4 = b2.GetOtherAtomIdx(idx3)
# skip 3-membered rings
if (idx4 == idx1):
continue
# skip torsions that include hydrogens
if ((m.GetAtomWithIdx(idx1).GetAtomicNum() == 1)
or (m.GetAtomWithIdx(idx4).GetAtomicNum() == 1)):
continue
if m.GetAtomWithIdx(idx4).IsInRing():
torsionList.append((idx4, idx3, idx2, idx1))
break
else:
torsionList.append((idx1, idx2, idx3, idx4))
break
break
return torsionList
def SetDihedral(conf, atom_idx, new_vale):
rdMolTransforms.SetDihedralRad(conf, atom_idx[0], atom_idx[1], atom_idx[2], atom_idx[3], new_vale)
def GetDihedral(conf, atom_idx):
return rdMolTransforms.GetDihedralRad(conf, atom_idx[0], atom_idx[1], atom_idx[2], atom_idx[3])
@timeout_decorator.timeout(20)
def inner_smi2coords(smi, num_confs=100, seed=42, cluster_size=10):
coordinate_list, rotable_bonds_list = [], []
mol = Chem.MolFromSmiles(smi)
AllChem.AddHs(mol, addCoords=True)
atoms = [atom.GetSymbol() for atom in mol.GetAtoms()]
wt = Descriptors.ExactMolWt(mol)
# skip for heavy molecules
if wt > 1000:
return None
# at least have two atoms
if len(atoms) < 2:
return None
# allconformers = AllChem.EmbedMultipleConfs(mol, numConfs=num_confs, randomSeed=seed, clearConfs=True, numThreads=1)
AllChem.EmbedMolecule(mol, randomSeed=seed)
rotable_bonds = get_torsions(mol)
for i in range(num_confs):
np.random.seed(i)
values = 3.1415926 * 2 * np.random.rand(len(rotable_bonds))
for idx in range(len(rotable_bonds)):
SetDihedral(mol.GetConformer(), rotable_bonds[idx], values[idx])
Chem.rdMolTransforms.CanonicalizeConformer(mol.GetConformer())
try:
AllChem.MMFFOptimizeMolecule(mol)
coordinate_list.append(mol.GetConformer().GetPositions().astype(np.float32))
rotable_bonds_value = [GetDihedral(mol.GetConformer(), rotable_bonds[idx]) for idx in range(len(rotable_bonds))]
rotable_bonds_list.append(rotable_bonds_value)
except:
continue
X = np.array(rotable_bonds_list)
clf = BayesianGaussianMixture(n_components=cluster_size, random_state=seed).fit(X)
probs = clf.predict_proba(X)
# filter redundant clusters
probs = probs[:,probs.mean(axis=0)!=0.0]
ids = probs.argmax(axis=0)
# padding to cluster_size
if len(ids) < cluster_size:
ids = ids + [ids[0]] * (cluster_size - len(ids))
cluster_coordinate_list = [coordinate_list[idx] for idx in ids]
print(ids)
return pickle.dumps({'atoms': atoms, 'coordinates': cluster_coordinate_list, 'id': smi}, protocol=-1)
def smi2coords(smi):
try:
return inner_smi2coords(smi)
except:
return None
def get_train_val(smi_list, val_size=100000):
with numpy_seed(42):
val_smi = np.random.choice(smi_list, replace=False, size=val_size)
np.random.shuffle(val_smi)
train_smi = list(set(smi_list) - set(val_smi))
np.random.shuffle(train_smi)
return train_smi, val_smi
def write_lmdb(outpath='.', nthreads=16):
small_size = 10000
smi_list = pd.read_csv('./clean_smi.csv.gz', names=['smi'], nrows=100000)['smi'].tolist()
print('original size: ', len(smi_list))
train_smi, val_smi = get_train_val(smi_list, val_size=100000)
print('train size: {}; val size: {}'.format(len(train_smi), len(val_smi)))
task_list = [('valid.lmdb', val_smi), \
('train.small.lmdb', train_smi[:small_size]), \
('train.lmdb', train_smi), \
]
for name, smi_list in task_list:
outputfilename = os.path.join(outpath, name)
try:
os.remove(outputfilename)
except:
pass
env_new = lmdb.open(
outputfilename,
subdir=False,
readonly=False,
lock=False,
readahead=False,
meminit=False,
max_readers=1,
map_size=int(100e9),
)
txn_write = env_new.begin(write=True)
with Pool(nthreads) as pool:
i = 0
for inner_output in tqdm(pool.imap(smi2coords, smi_list), total=len(smi_list)):
if inner_output is not None:
txn_write.put(f'{i}'.encode("ascii"), inner_output)
i += 1
if i % 10000 == 0:
txn_write.commit()
txn_write = env_new.begin(write=True)
print('{} process {} lines'.format(name, i))
txn_write.commit()
env_new.close()
if __name__ == '__main__':
# write_lmdb(outpath='.', node_idx=int(sys.argv[1]), nthreads=int(sys.argv[2]))
smi = 'CC(=O)c1ccc2c(c1)N(c3ccccc3S2)CCCN4CCN(CC4)CCO'
inner_smi2coords(smi, num_confs=1000, seed=42, cluster_size=10)
# write_lmdb(outpath='./', nthreads=60) | 6,891 | Python | .py | 175 | 30.851429 | 124 | 0.615362 | dptech-corp/NAG2G | 8 | 4 | 2 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,273 | preprocess.py | dptech-corp_NAG2G/unimol_plus/examples/mol_conformers/preprocess.py | import os
import sys
import json
import glob
import pickle
import lmdb
import pandas as pd
import numpy as np
from rdkit import Chem
from tqdm import tqdm
from rdkit.Chem import Descriptors
from rdkit.Chem import AllChem
from rdkit import RDLogger
RDLogger.DisableLog('rdApp.*')
import warnings
import contextlib
warnings.filterwarnings(action='ignore')
from multiprocessing import Pool
import timeout_decorator
from scipy.spatial.transform import Rotation
from sklearn.mixture import BayesianGaussianMixture
from rdkit.Chem import rdMolTransforms
import copy
@contextlib.contextmanager
def numpy_seed(seed, *addl_seeds):
"""Context manager which seeds the NumPy PRNG with the specified seed and
restores the state afterward"""
if seed is None:
yield
return
if len(addl_seeds) > 0:
seed = int(hash((seed, *addl_seeds)) % 1e6)
state = np.random.get_state()
np.random.seed(seed)
try:
yield
finally:
np.random.set_state(state)
def get_torsions(m):
m = Chem.RemoveHs(m)
torsionList = []
torsionSmarts = '[!$(*#*)&!D1]-&!@[!$(*#*)&!D1]'
torsionQuery = Chem.MolFromSmarts(torsionSmarts)
matches = m.GetSubstructMatches(torsionQuery)
for match in matches:
idx2 = match[0]
idx3 = match[1]
bond = m.GetBondBetweenAtoms(idx2, idx3)
jAtom = m.GetAtomWithIdx(idx2)
kAtom = m.GetAtomWithIdx(idx3)
for b1 in jAtom.GetBonds():
if (b1.GetIdx() == bond.GetIdx()):
continue
idx1 = b1.GetOtherAtomIdx(idx2)
for b2 in kAtom.GetBonds():
if ((b2.GetIdx() == bond.GetIdx())
or (b2.GetIdx() == b1.GetIdx())):
continue
idx4 = b2.GetOtherAtomIdx(idx3)
# skip 3-membered rings
if (idx4 == idx1):
continue
# skip torsions that include hydrogens
if ((m.GetAtomWithIdx(idx1).GetAtomicNum() == 1)
or (m.GetAtomWithIdx(idx4).GetAtomicNum() == 1)):
continue
if m.GetAtomWithIdx(idx4).IsInRing():
torsionList.append((idx4, idx3, idx2, idx1))
break
else:
torsionList.append((idx1, idx2, idx3, idx4))
break
break
return torsionList
def SetDihedral(conf, atom_idx, new_vale):
rdMolTransforms.SetDihedralRad(conf, atom_idx[0], atom_idx[1], atom_idx[2], atom_idx[3], new_vale)
def GetDihedral(conf, atom_idx):
return rdMolTransforms.GetDihedralRad(conf, atom_idx[0], atom_idx[1], atom_idx[2], atom_idx[3])
@timeout_decorator.timeout(20)
def inner_smi2coords(smi, num_confs=100, seed=42):
coordinate_list, target_coordinate_list = [], []
can_smi = Chem.CanonSmiles(smi)
mol = Chem.MolFromSmiles(can_smi)
AllChem.AddHs(mol, addCoords=True)
atoms = [atom.GetSymbol() for atom in mol.GetAtoms()]
wt = Descriptors.ExactMolWt(mol)
# skip for heavy molecules
if wt > 1000:
return None
# at least have two atoms
if len(atoms) < 2:
return None
# allconformers = AllChem.EmbedMultipleConfs(mol, numConfs=num_confs, randomSeed=seed, clearConfs=True, numThreads=1)
AllChem.EmbedMolecule(mol, randomSeed=seed)
rotable_bonds = get_torsions(mol)
for i in range(num_confs):
np.random.seed(i)
values = 3.1415926 * 2 * np.random.rand(len(rotable_bonds))
for idx in range(len(rotable_bonds)):
SetDihedral(mol.GetConformer(), rotable_bonds[idx], values[idx])
Chem.rdMolTransforms.CanonicalizeConformer(mol.GetConformer())
try:
coordinate_list.append(mol.GetConformer().GetPositions().astype(np.float32))
AllChem.MMFFOptimizeMolecule(mol)
target_coordinate_list.append(mol.GetConformer().GetPositions().astype(np.float32))
except:
continue
return pickle.dumps({'atoms': atoms,
'coordinates': coordinate_list,
'target_coordinates': target_coordinate_list,
'can_smi': can_smi,
'id': smi, }, protocol=-1)
def smi2coords(smi):
try:
return inner_smi2coords(smi)
except:
return None
def get_train_val(smi_list, val_size=100000):
with numpy_seed(42):
val_smi = np.random.choice(smi_list, replace=False, size=val_size)
np.random.shuffle(val_smi)
train_smi = list(set(smi_list) - set(val_smi))
np.random.shuffle(train_smi)
return train_smi, val_smi
def write_lmdb(outpath='.', nthreads=16, nrows=1000000):
small_size = 10000
val_size = min(10000, int(nrows*0.01))
smi_list = pd.read_csv('../molecules/clean_smi.csv.gz', names=['smi'], nrows=nrows)['smi'].tolist()
print('original size: ', len(smi_list))
train_smi, val_smi = get_train_val(smi_list, val_size=val_size)
print('train size: {}; val size: {}'.format(len(train_smi), len(val_smi)))
task_list = [('valid.lmdb', val_smi), \
('train.small.lmdb', train_smi[:small_size]), \
('train.lmdb', train_smi), \
]
for name, smi_list in task_list:
outputfilename = os.path.join(outpath, name)
try:
os.remove(outputfilename)
except:
pass
env_new = lmdb.open(
outputfilename,
subdir=False,
readonly=False,
lock=False,
readahead=False,
meminit=False,
max_readers=1,
map_size=int(100e9),
)
txn_write = env_new.begin(write=True)
with Pool(nthreads) as pool:
i = 0
for inner_output in tqdm(pool.imap(smi2coords, smi_list), total=len(smi_list)):
if inner_output is not None:
txn_write.put(f'{i}'.encode("ascii"), inner_output)
i += 1
if i % 10000 == 0:
txn_write.commit()
txn_write = env_new.begin(write=True)
print('{} process {} lines'.format(name, i))
txn_write.commit()
env_new.close()
def write_v2(lmdb_inpath, lmdb_outpath):
env = lmdb.open(
lmdb_inpath,
subdir=False,
readonly=True,
lock=False,
readahead=False,
meminit=False,
max_readers=256,
)
txn = env.begin()
_keys = list(txn.cursor().iternext(values=False))
env_new = lmdb.open(
lmdb_outpath,
subdir=False,
readonly=False,
lock=False,
readahead=False,
meminit=False,
max_readers=1,
map_size=int(100e9),
)
txn_write = env_new.begin(write=True)
i = 0
for idx in tqdm(range(len(_keys))):
datapoint_pickled = txn.get(f'{idx}'.encode("ascii"))
data = pickle.loads(datapoint_pickled)
if len(data['coordinates']) > 0:
if len(data['coordinates']) == len(data['target_coordinates']):
txn_write.put(f'{i}'.encode("ascii"), pickle.dumps(data, protocol=-1))
i += 1
else:
print('miss shape size: ', data['id'])
else:
print('miss size: ', data['id'])
if i % 10000 == 0:
txn_write.commit()
txn_write = env_new.begin(write=True)
txn_write.commit()
env_new.close()
env.close()
def check(lmdb_path):
env = lmdb.open(
lmdb_path,
subdir=False,
readonly=True,
lock=False,
readahead=False,
meminit=False,
max_readers=256,
)
txn = env.begin()
_keys = list(txn.cursor().iternext(values=False))
cnt = 0
for idx in tqdm(range(len(_keys))):
datapoint_pickled = txn.get(f'{idx}'.encode("ascii"))
data = pickle.loads(datapoint_pickled)
# assert len(data["coordinates"]) == len(data["target_coordinates"]), print(data['id'], len(data["coordinates"]), len(data["target_coordinates"]))
if len(data['coordinates']) != len(data['target_coordinates']):
cnt += 1
print(cnt)
env.close()
if __name__ == '__main__':
# smi = 'CC(=O)c1ccc2c(c1)N(c3ccccc3S2)CCCN4CCN(CC4)CCO'
# inner_smi2coords(smi, num_confs=1000, seed=42, cluster_size=10)
# write_lmdb(outpath='./', nthreads=60, nrows=1000000)
write_v2(lmdb_inpath='./train.lmdb', lmdb_outpath='./train.v2.lmdb')
write_v2(lmdb_inpath='./valid.lmdb', lmdb_outpath='./valid.v2.lmdb')
write_v2(lmdb_inpath='./train.small.lmdb', lmdb_outpath='./train.small.v2.lmdb')
# check('./train.lmdb') | 8,758 | Python | .py | 234 | 28.628205 | 154 | 0.595199 | dptech-corp/NAG2G | 8 | 4 | 2 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,274 | replace_pos_pred.py | dptech-corp_NAG2G/unimol_plus/scripts/replace_pos_pred.py | import logging
import os
import sys
import pickle
import torch
import lmdb
import gzip
import numpy as np
from unicore import checkpoint_utils, distributed_utils, options, utils
from unicore.logging import progress_bar
from unicore import tasks
from multiprocessing import Pool
from tqdm import tqdm
input_data = sys.argv[1]
output_data = sys.argv[2]
subset = sys.argv[3]
top = int(sys.argv[4])
def load(num_pickles=8, top=-1):
pred_pos_dict = {}
for i in range(num_pickles):
pickle_path = os.path.join(output_data, subset + "_{}.pkl".format(i))
pickle_data = pickle.load(open(pickle_path, "rb"))
for x in pickle_data:
id, pos_pred, plddt = x
for j in range(len(id)):
cur_id = int(id[j])
if cur_id not in pred_pos_dict:
pred_pos_dict[cur_id] = []
pred_pos_dict[cur_id].append([pos_pred[j], plddt[j]])
if top > 0:
top_pred_pos_dict = {}
for key in pred_pos_dict:
cur_list = pred_pos_dict[key]
cur_list.sort(key=lambda x: x[1], reverse=True)
top_pred_pos_dict[key] = [x[0] for x in cur_list[:top]]
return top_pred_pos_dict
else:
top_pred_pos_dict = {}
for key in pred_pos_dict:
cur_list = pred_pos_dict[key]
top_pred_pos_dict[key] = [x[0] for x in cur_list]
return top_pred_pos_dict
pred_pos_dict = load(top=top)
split_path = os.path.join(input_data, subset + ".lmdb")
input_env = lmdb.open(
split_path,
subdir=False,
readonly=True,
lock=False,
readahead=False,
meminit=False,
max_readers=256,
)
with input_env.begin() as txn:
keys = list(txn.cursor().iternext(values=False))
os.system("mkdir -p {}".format(output_data))
save_path = os.path.join(output_data, subset + ".lmdb")
os.system("rm -f {}".format(save_path))
output_env = lmdb.open(
save_path,
subdir=False,
readonly=False,
lock=False,
readahead=False,
meminit=False,
max_readers=1,
map_size=int(100e9),
)
txn_write = output_env.begin(write=True)
print("start to write lmdb")
def process(key):
datapoint_pickled = input_env.begin().get(key)
data = pickle.loads(gzip.decompress(datapoint_pickled))
cur_id = int.from_bytes(key, "big") # int(np.frombuffer(key, dtype=np.int64))
old_pos = data["input_pos"]
num_atoms = old_pos[0].shape[0]
new_pos = [x[:num_atoms, :] for x in pred_pos_dict[cur_id]]
# assert len(old_pos) == len(new_pos)
data["input_pos"] = new_pos
val = gzip.compress(pickle.dumps(data))
return key, val
i = 0
with Pool(64) as pool:
for ret in tqdm(pool.imap_unordered(process, keys), total=len(keys)):
key, val = ret
txn_write.put(key, val)
if (i + 1) % 10000 == 0:
txn_write.commit()
txn_write = output_env.begin(write=True)
i += 1
txn_write.commit()
output_env.close()
print("Done inference! ")
| 2,985 | Python | .py | 93 | 26.612903 | 82 | 0.630775 | dptech-corp/NAG2G | 8 | 4 | 2 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,275 | replace_keys.py | dptech-corp_NAG2G/unimol_plus/scripts/replace_keys.py | import logging
import os
import sys
import pickle
import torch
import lmdb
import gzip
import numpy as np
from unicore import checkpoint_utils, distributed_utils, options, utils
from unicore.logging import progress_bar
from unicore import tasks
input_data = sys.argv[1]
output_data = sys.argv[2]
subset = sys.argv[3]
split_path = os.path.join(input_data, subset + ".lmdb")
input_env = lmdb.open(
split_path,
subdir=False,
readonly=True,
lock=False,
readahead=False,
meminit=False,
max_readers=256,
)
with input_env.begin() as txn:
keys = list(txn.cursor().iternext(values=False))
os.system("mkdir -p {}".format(output_data))
save_path = os.path.join(output_data, subset + ".lmdb")
os.system("rm -f {}".format(save_path))
output_env = lmdb.open(
save_path,
subdir=False,
readonly=False,
lock=False,
readahead=False,
meminit=False,
max_readers=1,
map_size=int(100e9),
)
txn_write = output_env.begin(write=True)
print("start to write lmdb")
for i, key in enumerate(keys):
val = input_env.begin().get(key)
cur_id = int(np.frombuffer(key, dtype=np.int64))
new_key = cur_id.to_bytes(4, "big")
txn_write.put(new_key, val)
if (i + 1) % 10000 == 0:
txn_write.commit()
txn_write = output_env.begin(write=True)
print("Done {} datapoints".format(i + 1))
txn_write.commit()
output_env.close()
print("Done! ")
| 1,408 | Python | .py | 53 | 23.45283 | 71 | 0.696521 | dptech-corp/NAG2G | 8 | 4 | 2 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,276 | __init__.py | nkchocoai_ComfyUI-PromptUtilities/__init__.py | import configparser
import os
from .py.node.const import *
from .py.node.format import *
from .py.node.preset import *
from .py.node.weight import *
from .py.node.replace import *
from .py.node.random import *
from .py.server import *
from .py.preset import PresetManager
NODE_CLASS_MAPPINGS = {
"PromptUtilitiesFormatString": PromptUtilitiesFormatString,
"PromptUtilitiesJoinStringList": PromptUtilitiesJoinStringList,
"PromptUtilitiesLoadPreset": PromptUtilitiesLoadPreset,
"PromptUtilitiesLoadPresetAdvanced": PromptUtilitiesLoadPresetAdvanced,
"PromptUtilitiesRandomPreset": PromptUtilitiesRandomPreset,
"PromptUtilitiesRandomPresetAdvanced": PromptUtilitiesRandomPresetAdvanced,
"PromptUtilitiesConstString": PromptUtilitiesConstString,
"PromptUtilitiesConstStringMultiLine": PromptUtilitiesConstStringMultiLine,
"PromptUtilitiesPromptWeight": PromptUtilitiesPromptWeight,
"PromptUtilitiesRoundPromptWeight": PromptUtilitiesRoundPromptWeight,
"PromptUtilitiesReplaceOrInsertTag": PromptUtilitiesReplaceOrInsertTag,
"PromptUtilitiesSampleTags": PromptUtilitiesSampleTags,
"PromptUtilitiesSampleTagsWithWeight": PromptUtilitiesSampleTagsWithWeight,
}
NODE_DISPLAY_NAME_MAPPINGS = {
"PromptUtilitiesFormatString": "Format String",
"PromptUtilitiesJoinStringList": "Join String List",
"PromptUtilitiesLoadPreset": "Load Preset",
"PromptUtilitiesLoadPresetAdvanced": "Load Preset (Advanced)",
"PromptUtilitiesRandomPreset": "Random Preset",
"PromptUtilitiesRandomPresetAdvanced": "Random Preset (Advanced)",
"PromptUtilitiesConstString": "Const String",
"PromptUtilitiesConstStringMultiLine": "Const String (multi line)",
"PromptUtilitiesPromptWeight": "Prompt Weight",
"PromptUtilitiesRoundPromptWeight": "Round Prompt Weight",
"PromptUtilitiesReplaceOrInsertTag": "Replace or Insert Tag",
"PromptUtilitiesSampleTags": "Sample Tags",
"PromptUtilitiesSampleTagsWithWeight": "Sample Tags With Weight",
}
WEB_DIRECTORY = "./js"
__all__ = ["NODE_CLASS_MAPPINGS", "NODE_DISPLAY_NAME_MAPPINGS", "WEB_DIRECTORY"]
# output csv presets as wildcards.
config = configparser.ConfigParser()
config.read(os.path.join(os.path.dirname(__file__), "config.ini"))
if config.has_section("default"):
default_config = config["default"]
output_dir = default_config.get("output_csv_presets_as_wildcards")
if output_dir:
output_dir = os.path.abspath(output_dir)
print("output presets as wildcard.", output_dir)
PresetManager.output_as_wildcard(output_dir)
| 2,578 | Python | .py | 52 | 45.769231 | 80 | 0.793651 | nkchocoai/ComfyUI-PromptUtilities | 8 | 4 | 3 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,277 | preset.py | nkchocoai_ComfyUI-PromptUtilities/py/preset.py | import csv
import json
import os
import yaml
import numpy as np
import folder_paths
class PresetManagerBase:
_presets = None
custom_nodes_dir = folder_paths.get_folder_paths("custom_nodes")[0]
file_extensions = []
@classmethod
def get_presets_dir(cls):
return os.path.join(
cls.custom_nodes_dir, "ComfyUI-PromptUtilities", cls.presets_dir
)
@classmethod
def get_presets(cls):
if cls._presets is None:
cls.load_presets()
return cls._presets
@classmethod
def get_preset(cls, key):
presets = cls.get_presets()
return presets[key]
@classmethod
def get_presets_by_filename(cls, filename):
presets = cls.get_presets()
presets_by_name = [
v for k, v in presets.items() if k.split(" : ")[0] == filename
]
return presets_by_name
@classmethod
def get_preset_filename_list(cls):
files, _ = folder_paths.recursive_search(
cls.get_presets_dir(), excluded_dir_names=[".git"]
)
return folder_paths.filter_files_extensions(files, cls.file_extensions)
@classmethod
def load_presets(cls):
cls._presets = dict()
preset_filename_list = cls.get_preset_filename_list()
for preset_filename in preset_filename_list:
with open(
os.path.join(cls.get_presets_dir(), preset_filename),
"r",
encoding="utf-8",
) as f:
cls.load_file(f, preset_filename)
class PresetManager(PresetManagerBase):
presets_dir = "presets"
file_extensions = [".csv", ".yml"]
@classmethod
def load_file(cls, f, preset_filename):
ext = os.path.splitext(preset_filename)[1]
if ext == ".csv":
reader = csv.DictReader(f)
for row in reader:
cls._presets[f"{preset_filename[:-4]} : {row['name']}"] = row["prompt"]
elif ext == ".yml":
data = yaml.safe_load(f)
if isinstance(data, list):
for row in data:
cls._presets[f"{preset_filename[:-4]} : {row.split(',')[0]}"] = row
elif isinstance(data, dict):
for k, v in data.items():
if isinstance(v, dict):
for k2, v2 in v.items():
cls._presets[f"{preset_filename[:-4]} : {k}.{k2}"] = v2
elif isinstance(v, list):
for row in v:
cls._presets[
f"{preset_filename[:-4]} : {k}.{row.split(',')[0]}"
] = row
else:
cls._presets[f"{preset_filename[:-4]} : {k}"] = v
@classmethod
def random_preset(cls, filename, seed):
random_gen = np.random.default_rng(seed)
presets = cls.get_presets_by_filename(filename[:-4])
preset = random_gen.choice(presets)
return preset
@classmethod
def output_as_wildcard(cls, output_dir):
preset_filename_list = cls.get_preset_filename_list()
for preset_filename in preset_filename_list:
ext = os.path.splitext(preset_filename)[1]
if ext == ".csv":
preset_file_path = os.path.join(cls.get_presets_dir(), preset_filename)
with open(preset_file_path, "r", encoding="utf-8") as f_in:
reader = csv.DictReader(f_in)
output_file_path = os.path.join(
output_dir, preset_filename[:-4] + ".txt"
)
os.makedirs(os.path.dirname(output_file_path), exist_ok=True)
with open(output_file_path, "w", encoding="utf-8") as f_out:
print("write:", output_file_path)
f_out.write("\n".join([row["prompt"] for row in reader]))
elif ext == ".yml":
preset_file_path = os.path.join(cls.get_presets_dir(), preset_filename)
with open(preset_file_path, "r", encoding="utf-8") as f_in:
data = yaml.safe_load(f_in)
output_file_path = os.path.join(
output_dir, preset_filename[:-4] + ".txt"
)
with open(output_file_path, "w", encoding="utf-8") as f_out:
if isinstance(data, list):
items = data
elif isinstance(data, dict):
items = []
for v in data.values():
if isinstance(v, dict):
for v2 in v.values():
items.append(v2)
elif isinstance(v, list):
for row in v:
items.append(row)
else:
items.append(v)
print("write:", output_file_path)
f_out.write("\n".join([item for item in items]))
class PresetManagerAdvanced(PresetManagerBase):
presets_dir = "advanced_presets"
file_extensions = [".json"]
@classmethod
def load_file(cls, f, preset_filename):
data = json.load(f)
for k, v in data.items():
cls._presets[f"{preset_filename[:-5]} : {k}"] = v
@classmethod
def parse_preset(cls, preset):
positive_prompt = preset.get("positive_prompt", "")
negative_prompt = preset.get("negative_prompt", "")
lora = preset.get("lora", None)
lora_name, strength_model, sterngth_clip = cls.load_lora(lora)
loras = preset.get("loras", None)
if loras is None and lora is not None:
lora_stack = [(lora_name, strength_model, sterngth_clip)]
else:
lora_stack = cls.load_loras(loras)
return (
positive_prompt,
negative_prompt,
lora_name,
strength_model,
sterngth_clip,
lora_stack,
)
@classmethod
def load_lora(cls, lora):
if lora is None:
return "None", 0, 0
lora_name = lora.get("lora_name", "None")
if "weight" in lora:
strength_model = lora.get("weight", 0)
sterngth_clip = lora.get("weight", 0)
else:
strength_model = lora.get("strength_model", 0)
sterngth_clip = lora.get("strength_clip", 0)
return lora_name, strength_model, sterngth_clip
@classmethod
def load_loras(cls, loras):
lora_stack = []
if loras is None:
return lora_stack
for lora in loras:
lora_name, strength_model, sterngth_clip = cls.load_lora(lora)
lora_stack.append((lora_name, strength_model, sterngth_clip))
return lora_stack
@classmethod
def random_preset(cls, filename, seed):
random_gen = np.random.default_rng(seed)
presets = cls.get_presets_by_filename(filename[:-5])
preset = random_gen.choice(presets)
return preset
| 7,260 | Python | .py | 174 | 28.534483 | 87 | 0.520771 | nkchocoai/ComfyUI-PromptUtilities | 8 | 4 | 3 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,278 | server.py | nkchocoai_ComfyUI-PromptUtilities/py/server.py | import json
from aiohttp import web
import server
from .preset import PresetManager, PresetManagerAdvanced
@server.PromptServer.instance.routes.post("/prompt_utilities/refresh")
async def refresh_preset_manager(request):
PresetManager.load_presets()
PresetManagerAdvanced.load_presets()
return web.Response(status=200)
def on_prompt(json_data):
prompt = json_data["prompt"]
for k, v in prompt.items():
if "class_type" in v and (
v["class_type"]
in ["PromptUtilitiesRandomPreset", "PromptUtilitiesRandomPresetAdvanced"]
):
inputs = v["inputs"]
input_seed = int(inputs["seed"])
input_filename = inputs["filename"]
if v["class_type"] == "PromptUtilitiesRandomPreset":
inputs["choice_preset"] = PresetManager.random_preset(
input_filename, input_seed
)
elif v["class_type"] == "PromptUtilitiesRandomPresetAdvanced":
preset = PresetManagerAdvanced.random_preset(input_filename, input_seed)
inputs["choice_preset"] = json.dumps(preset)
server.PromptServer.instance.send_sync(
"prompt-utilities-feedback",
{
"node_id": k,
"widget_name": "choice_preset",
"type": "STRING",
"value": inputs["choice_preset"],
},
)
return json_data
server.PromptServer.instance.add_on_prompt_handler(on_prompt)
| 1,554 | Python | .py | 37 | 30.972973 | 88 | 0.603586 | nkchocoai/ComfyUI-PromptUtilities | 8 | 4 | 3 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,279 | weight.py | nkchocoai_ComfyUI-PromptUtilities/py/node/weight.py | import re
from .base import BaseNode
class PromptUtilitiesPromptWeight(BaseNode):
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"prompt1": ("STRING", {"default": "", "multiline": False}),
"weight1": (
"FLOAT",
{"default": 1.0, "min": -100, "max": 100, "step": 0.1},
),
},
"optional": {
"prompt2": ("STRING", {"default": "", "multiline": False}),
"weight2": (
"FLOAT",
{"default": 1.0, "min": -100, "max": 100, "step": 0.1},
),
"prompt3": ("STRING", {"default": "", "multiline": False}),
"weight3": (
"FLOAT",
{"default": 1.0, "min": -100, "max": 100, "step": 0.1},
),
"prompt4": ("STRING", {"default": "", "multiline": False}),
"weight4": (
"FLOAT",
{"default": 1.0, "min": -100, "max": 100, "step": 0.1},
),
"prompt_weight": (
"STRING",
{"default": "", "multiline": False},
),
},
}
RETURN_TYPES = ("STRING",)
RETURN_NAMES = ("prompt",)
FUNCTION = "gen_prompt_weight"
def gen_prompt_weight(self, **kwargs):
prompts = []
if kwargs.get("prompt_weight", ""):
prompts.append(kwargs["prompt_weight"])
for i in range(4):
prompt = kwargs[f"prompt{i+1}"]
weight = kwargs[f"weight{i+1}"]
if prompt == "" or weight == 0.0:
continue
if weight == 1.0:
prompts.append(prompt)
else:
prompts.append(f"({prompt}:{weight})")
return (", ".join(prompts),)
class PromptUtilitiesRoundPromptWeight(BaseNode):
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"prompt": ("STRING", {"default": "", "multiline": False}),
"n": ("INT", {"default": 3, "min": 0, "max": 100}),
},
}
RETURN_TYPES = ("STRING",)
RETURN_NAMES = ("prompt",)
FUNCTION = "round_prompt_weight"
def round_prompt_weight(self, prompt, n):
def round_match(match):
number = float(match.group(1))
rounded = round(number, n)
return f"{rounded:.10f}".rstrip("0").rstrip(".")
pattern = r"([-+]?\d*\.\d+)"
return (re.sub(pattern, round_match, prompt),)
| 2,639 | Python | .py | 71 | 24.422535 | 75 | 0.435395 | nkchocoai/ComfyUI-PromptUtilities | 8 | 4 | 3 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,280 | const.py | nkchocoai_ComfyUI-PromptUtilities/py/node/const.py | from .base import BaseNode
class PromptUtilitiesConstStringBase(BaseNode):
RETURN_TYPES = ("STRING",)
FUNCTION = "get_string"
def get_string(self, string):
return (string,)
class PromptUtilitiesConstString(PromptUtilitiesConstStringBase):
@classmethod
def INPUT_TYPES(cls):
return {
"required": {
"string": ("STRING", {"default": "", "multiline": False})
}
}
class PromptUtilitiesConstStringMultiLine(PromptUtilitiesConstStringBase):
@classmethod
def INPUT_TYPES(cls):
return {
"required": {
"string": ("STRING", {"default": "", "multiline": True})
}
} | 706 | Python | .py | 22 | 24.136364 | 74 | 0.605302 | nkchocoai/ComfyUI-PromptUtilities | 8 | 4 | 3 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,281 | format.py | nkchocoai_ComfyUI-PromptUtilities/py/node/format.py | from .base import BaseNode
class PromptUtilitiesFormatString(BaseNode):
@classmethod
def INPUT_TYPES(s):
input_types = {
"required": {
"prompt": ("STRING", {"default": "[1], [2]", "display": "prompt"}),
},
"optional": {
"arg1": ("STRING", {"forceInput": True}),
},
}
return input_types
RETURN_TYPES = ("STRING",)
FUNCTION = "format"
def format(self, prompt, **kwargs):
result = prompt
for i in range(1, len(kwargs) + 1):
result = result.replace(f"[{i}]", kwargs[f"arg{i}"])
return (result,)
class PromptUtilitiesJoinStringList(BaseNode):
@classmethod
def INPUT_TYPES(s):
input_types = {
"required": {
"separator": ("STRING", {"default": ", ", "display": "separator"}),
},
"optional": {
"arg1": ("STRING", {"forceInput": True}),
},
}
return input_types
RETURN_TYPES = ("STRING",)
FUNCTION = "join"
def join(self, separator, **kwargs):
# join without empty strings.
result = separator.join([s for s in kwargs.values() if s])
return (result,)
| 1,258 | Python | .py | 38 | 23.736842 | 83 | 0.514876 | nkchocoai/ComfyUI-PromptUtilities | 8 | 4 | 3 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,282 | preset.py | nkchocoai_ComfyUI-PromptUtilities/py/node/preset.py | import json
import numpy as np
from .base import BaseNode
from ..preset import PresetManager, PresetManagerAdvanced
import folder_paths
class PromptUtilitiesLoadPreset(BaseNode):
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"preset": (list(PresetManager.get_presets().keys()), ),
}
}
RETURN_TYPES = ("STRING",)
FUNCTION = "load_preset"
def load_preset(self, preset):
prompt = PresetManager.get_preset(preset)
return (prompt,)
class PromptUtilitiesLoadPresetAdvanced(BaseNode):
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"preset": (list(PresetManagerAdvanced.get_presets().keys()), ),
}
}
RETURN_TYPES = ("STRING","STRING",folder_paths.get_filename_list("loras"),"FLOAT","FLOAT","LORA_STACK",)
RETURN_NAMES = ("positive prompt","negative prompt","lora name","strength model","strength clip","lora stack",)
FUNCTION = "load_preset"
def load_preset(self, preset):
preset_ = PresetManagerAdvanced.get_preset(preset)
positive_prompt, negative_prompt, lora_name, strength_model, sterngth_clip, lora_stack = \
PresetManagerAdvanced.parse_preset(preset_)
return (positive_prompt, negative_prompt, lora_name, strength_model, sterngth_clip, lora_stack)
class PromptUtilitiesRandomPreset(BaseNode):
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"filename": (list(PresetManager.get_preset_filename_list()), ),
"choice_preset": ("STRING", {"multiline": True, "dynamicPrompts": False}),
"seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff})
}
}
RETURN_TYPES = ("STRING",)
FUNCTION = "random_preset"
def random_preset(self, **kwargs):
choice_preset = kwargs['choice_preset']
return (choice_preset,)
class PromptUtilitiesRandomPresetAdvanced(BaseNode):
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"filename": (list(PresetManagerAdvanced.get_preset_filename_list()), ),
"choice_preset": ("STRING", {"multiline": True, "dynamicPrompts": False}),
"seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff})
}
}
RETURN_TYPES = ("STRING","STRING",folder_paths.get_filename_list("loras"),"FLOAT","FLOAT","LORA_STACK",)
RETURN_NAMES = ("positive prompt","negative prompt","lora name","strength model","strength clip","lora stack",)
FUNCTION = "random_preset"
def random_preset(self, **kwargs):
choice_preset = kwargs['choice_preset']
positive_prompt, negative_prompt, lora_name, strength_model, sterngth_clip, lora_stack = \
PresetManagerAdvanced.parse_preset(json.loads(choice_preset))
return (positive_prompt, negative_prompt, lora_name, strength_model, sterngth_clip, lora_stack) | 3,092 | Python | .py | 67 | 36.761194 | 115 | 0.633526 | nkchocoai/ComfyUI-PromptUtilities | 8 | 4 | 3 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,283 | replace.py | nkchocoai_ComfyUI-PromptUtilities/py/node/replace.py | from .base import BaseNode
import re
from comfy.sd1_clip import token_weights
class PromptUtilitiesReplaceOrInsertTag(BaseNode):
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"text": (
"STRING",
{"multiline": True, "default": "", "forceInput": True},
),
"pattern": ("STRING", {"default": "", "multiline": False}),
"value": ("STRING", {"default": "", "multiline": False}),
"mode": (["replace", "insert"],),
"inherit_weight": ("BOOLEAN", {"default": False}),
},
}
RETURN_TYPES = ("STRING",)
FUNCTION = "replace_tag"
def replace_tag(self, text, pattern, value, mode, inherit_weight):
text = text.replace("\(", "{[{[")
text = text.replace("\)", "}]}]")
weights = token_weights(text, 1.0)
tags = []
for t, weight in weights:
for tag in t.split(","):
tag = tag.strip()
if tag:
tags.append((tag, weight))
tags2 = []
for i, v in enumerate(tags):
if re.match(pattern, v[0]):
w = 1.0
if inherit_weight:
w = v[1]
if mode == "insert":
tags2.append(v)
tags2.append((value, w))
else:
tags2.append(v)
result = []
for tag, weight in tags2:
if weight == 1.0:
result.append(tag)
else:
result.append(f"({tag}:{weight})")
s = ", ".join(result)
s = s.replace("{[{[", "\(")
s = s.replace("}]}]", "\)")
return (s,)
| 1,772 | Python | .py | 51 | 22.568627 | 75 | 0.439067 | nkchocoai/ComfyUI-PromptUtilities | 8 | 4 | 3 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,284 | random.py | nkchocoai_ComfyUI-PromptUtilities/py/node/random.py | from .base import BaseNode
import numpy as np
class PromptUtilitiesSampleTags(BaseNode):
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"tags": ("STRING", {"default": "", "multiline": True}),
"tags_delimiter": (("new line", ","),),
"seed": ("INT", {"default": 0, "min": 0, "max": 0xFFFFFFFFFFFFFFFF}),
"max_k": ("INT", {"default": 1, "min": 1, "max": 0xFFFFFFFFFFFFFFFF}),
"min_k": ("INT", {"default": 1, "min": 0, "max": 0xFFFFFFFFFFFFFFFF}),
}
}
RETURN_TYPES = ("STRING",)
FUNCTION = "sample_tags"
def sample_tags(self, tags, tags_delimiter, seed, max_k, min_k):
sampled_tags = self._sample_tags(tags, tags_delimiter, seed, max_k, min_k)
return (", ".join(sampled_tags),)
def _sample_tags(self, tags_s, tags_delimiter, seed, max_k, min_k):
assert max_k >= min_k, "max_k must be greater than or equal to min_k."
if tags_delimiter == "new line":
tags = [tag.strip() for tag in tags_s.split("\n")]
elif tags_delimiter == ",":
tags = [tag.strip() for tag in tags_s.split(",")]
random_gen = np.random.default_rng(seed)
k = random_gen.integers(min_k, max_k + 1)
random_gen.shuffle(tags)
return tags[:k]
class PromptUtilitiesSampleTagsWithWeight(PromptUtilitiesSampleTags):
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"tags": ("STRING", {"default": "", "multiline": True}),
"tags_delimiter": (("new line", ","),),
"seed": ("INT", {"default": 0, "min": 0, "max": 0xFFFFFFFFFFFFFFFF}),
"max_k": ("INT", {"default": 1, "min": 1, "max": 0xFFFFFFFFFFFFFFFF}),
"min_k": ("INT", {"default": 1, "min": 0, "max": 0xFFFFFFFFFFFFFFFF}),
"max_w": (
"FLOAT",
{"default": 1.0, "min": -100, "max": 100, "step": 0.01},
),
"min_w": (
"FLOAT",
{"default": 0.8, "min": -100, "max": 100, "step": 0.01},
),
"step_w": (
"FLOAT",
{"default": 0.1, "min": -100, "max": 100, "step": 0.01},
),
}
}
RETURN_TYPES = ("STRING",)
FUNCTION = "sample_tags"
def sample_tags(
self, tags, tags_delimiter, seed, max_k, min_k, max_w, min_w, step_w
):
sampled_tags = self._sample_tags(tags, tags_delimiter, seed, max_k, min_k)
random_gen = np.random.default_rng(seed)
weights = random_gen.choice(
np.arange(min_w, max_w, step_w),
size=len(sampled_tags),
)
sampled_tags = [f"({t}:{round(w, 2)})" for t, w in zip(sampled_tags, weights)]
return (", ".join(sampled_tags),)
| 2,937 | Python | .py | 66 | 33.015152 | 86 | 0.496327 | nkchocoai/ComfyUI-PromptUtilities | 8 | 4 | 3 | GPL-3.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,285 | manage.py | Aftendo_Afternote/manage.py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ugoflip.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 663 | Python | .py | 18 | 30.944444 | 73 | 0.677067 | Aftendo/Afternote | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,286 | urls.py | Aftendo_Afternote/flip/urls.py | from django.urls import path
from . import views
urlpatterns = [
path('auth', views.auth, name='auth'),
path('<str:country>/<str:file>.txt', views.content, name="eula"),
path('<str:country>/confirm/<str:file>.txt', views.content, name="garbage"),
path('flipnote/<str:file>.ppm', views.ppmloader, name="ppmloader"),
path('flipnote/<str:file>.info', views.info, name="info"),
path('flipnote/<str:file>.dl', views.dl, name="info"),
path('flipnote/<str:file>.htm', views.flipnote_info, name="info"),
path('flipnote/<str:file>.star', views.star, name="star"),
path('eula_list.tsv', views.eula_list, name="propaganda"),
path('index.ugo', views.index, name="index"),
path('newest.ugo', views.newest_list, name="newest"),
path('hot.ugo', views.popular_list, name="hot"),
path('liked.ugo', views.liked_list, name="liked"),
path('channels.ugo', views.categories, name="categories"),
path('channels/search.ugo', views.others, name="others"),
path('channels/<str:internal_id>.ugo', views.channels, name="channels"),
path('channel/<str:internal_id>.ugo', views.channel, name="channel"),
path('channel/<str:internal_id>.post', views.post_flip, name="post_flip"),
path('signin.htm', views.signin, name="signin"),
path('signin/step1.kbd', views.signin_step1, name="step1"),
path('signin/step2.kbd', views.signin_step2, name="step2"),
path('error_get.htm', views.error_get, name="error_get"),
path('static/<str:dir>/<str:file>', views.static, name="static")
] | 1,536 | Python | .py | 27 | 52.444444 | 80 | 0.669761 | Aftendo/Afternote | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,287 | apps.py | Aftendo_Afternote/flip/apps.py | from django.apps import AppConfig
class FlipConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'flip'
| 140 | Python | .py | 4 | 31.5 | 56 | 0.768657 | Aftendo/Afternote | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,288 | views.py | Aftendo_Afternote/flip/views.py | from django.shortcuts import render
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from django.core.exceptions import ObjectDoesNotExist
from util.ugo import UgoMenu
from util.ppm import PPMParser
import os.path, io, random, string, datetime
from django.contrib.auth import authenticate
from db.models import *
@csrf_exempt
def auth(request, reg):
resp = HttpResponse(content_type="text/plain; charset=utf-16le")
if "X-DSi-SID" in request.headers and "X-DSi-MAC" in request.headers and "X-DSi-ID" in request.headers:
resp.headers['X-DSi-SID'] = 'nobitches?'
resp.headers['X-DSi-New-Notices'] = 0
resp.headers['X-DSi-Unread-Notices'] = 0
try:
session = Session.objects.get(token=request.headers['X-DSi-SID'])
session.fsid = request.headers['X-DSi-ID']
session.mac = request.headers['X-DSi-MAC']
session.save()
except:
resp.headers['X-DSi-Dialog-Type'] = 1
resp.write("Invalid Session. Please reboot the app.")
return resp
try:
user = User.objects.get(fsid=request.headers['X-DSi-ID'], mac=request.headers['X-DSi-MAC'])
sessions = Session.objects.filter(user=user).delete()
if user.ban:
resp.headers['X-DSi-Dialog-Type'] = 1
resp.write("Yo mama so stupid,\nthat you got banned off Afternote")
return resp
session.user = user
session.save()
except ObjectDoesNotExist:
pass
else:
token = ''.join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(16))
Session.objects.create(token=token)
resp.headers['X-DSi-Auth-Challenge'] = 'aftendoo'
resp.headers['X-DSi-SID'] = token
resp.headers['X-DSi-New-Notices'] = 0
resp.headers['X-DSi-Unread-Notices'] = 0
#resp.headers['X-DSi-Dialog-Type'] = 1
return resp
@csrf_exempt
def content(request, reg, country, file):
if os.path.exists("./cfg/"+file+".txt"):
with open("./cfg/"+file+".txt", "r+") as file:
return HttpResponse(file.read(), content_type='text/plain; charset=utf-16le')
else:
return HttpResponse("WARNING: cfg/"+file+".txt does not exists.", content_type='text/plain; charset=utf-16le')
@csrf_exempt
def eula_list(request, reg):
return HttpResponse("RQBuAGcAbABpAHMAaAA= en", content_type='text/plain; charset=utf-16le')
@csrf_exempt
def index(request, reg):
try:
session = Session.objects.get(token=request.headers['X-DSi-SID'])
except:
return HttpResponse(status=403)
ugo = UgoMenu()
ugo.set_type("0")
if session.user != None:
ugo.add_item({"label": "Browse Flipnotes", "url": request.build_absolute_uri('/')+"ds/v2-eu/hot.uls", "icon": "100"})
ugo.add_item({"label": "Channels", "url": request.build_absolute_uri('/')+"ds/v2-eu/channels.uls", "icon": "101"})
else:
ugo.add_item({"label": "Sign In", "url": request.build_absolute_uri('/')+"ds/v2-eu/signin.htm", "icon": "104"})
return HttpResponse(ugo.get_ugo())
@csrf_exempt
def categories(request, reg):
categories = Category.objects.all()[:8]
ugo = UgoMenu()
ugo.set_type("0")
for category in categories:
ugo.add_item({"label": category.name, "url": request.build_absolute_uri('/')+"ds/v2-eu/channels/"+category.internal_id+".uls"})
return HttpResponse(ugo.get_ugo())
@csrf_exempt
def channels(request, reg, internal_id):
try:
category = Category.objects.get(internal_id=internal_id)
except:
return HttpResponse(status=403)
channels = Channel.objects.filter(category=category)
ugo = UgoMenu()
ugo.set_meta("uppertitle", category.name)
ugo.set_type("1")
for channel in channels:
ugo.add_item({"label": channel.name, "url": request.build_absolute_uri('/')+"ds/v2-eu/channel/"+channel.internal_id+".uls"})
return HttpResponse(ugo.get_ugo())
@csrf_exempt
def others(request, reg):
categories = Category.objects.all()[8:]
ugo = UgoMenu()
ugo.set_meta("uppertitle", "Other channels")
ugo.set_type("1")
for category in categories:
ugo.add_item({"label": category.name, "url": request.build_absolute_uri('/')+"ds/v2-eu/channels/"+category.internal_id+".uls"})
return HttpResponse(ugo.get_ugo())
@csrf_exempt
def channel(request, reg, internal_id):
try:
channel = Channel.objects.get(internal_id=internal_id)
except:
return HttpResponse(status=403)
if not request.GET.get("page"):
page = 0
else:
page = int(request.GET.get("page"))
flip_count = Flipnote.objects.filter(channel=channel).count()
flips = Flipnote.objects.filter(channel=channel).order_by("-id")[page*50:50]
ugo = UgoMenu()
ugo.set_type("2")
ugo.set_meta("uppertitle", channel.name)
ugo.set_meta("uppersubleft", "Flipnotes")
ugo.set_meta("uppersubright", str(flip_count))
if not channel.locked:
ugo.add_button({"label": "Post here", "url": request.build_absolute_uri('/')+"ds/v2-eu/channel/"+channel.internal_id+".post"})
if page != 0:
ugo.add_item({"label": "Previous", "url": request.build_absolute_uri('/')+"ds/v2-eu/channel/"+channel.internal_id+".uls?page="+str(page-1)})
for flip in flips:
if os.path.exists("./files/ppm/"+flip.real_filename+".ppm"):
with open("./files/ppm/"+flip.real_filename+".ppm", "rb+") as file:
parser = PPMParser()
if parser.load(file):
ugo.add_item({"url": request.build_absolute_uri('/')+"ds/v2-eu/flipnote/"+parser.current_filename+".ppm", "file": "./files/ppm/"+parser.current_filename+".ppm", "lock": str(flip.is_locked), "counter": str(flip.star+flip.green_star+flip.red_star+flip.blue_star+flip.purple_star), "icon": "3"}, False)
else:
continue
else:
continue
if flip_count > (page+1)*50:
ugo.add_item({"label": "Next", "url": request.build_absolute_uri('/')+"ds/v2-eu/channel/"+channel.internal_id+".uls?page="+str(page+1)})
return HttpResponse(ugo.get_ugo())
@csrf_exempt
def newest_list(request, reg):
return flip_list(request, True)
@csrf_exempt
def popular_list(request, reg):
return flip_list(request, None)
@csrf_exempt
def liked_list(request, reg):
return flip_list(request, False)
"""
type argument: None is Popular Flipnotes, False is Most Liked, and True is New Flipnotes
"""
@csrf_exempt
def flip_list(request, type=None):
if not request.GET.get("page"):
page = 0
else:
page = int(request.GET.get("page"))
flip_count = Flipnote.objects.all().count()
ugo = UgoMenu()
ugo.set_type("2")
if type==None:
ugo.set_meta("uppertitle", "Popular Flipnotes")
ugo.set_meta("uppersubbottom", "The most popular recent flipnotes.")
flips = Flipnote.objects.all().order_by("-views")[page*50:50]
selected_newest = "0"
selected_popular = "1"
selected_liked = "0"
elif type==False:
ugo.set_meta("uppertitle", "Liked Flipnotes")
ugo.set_meta("uppersubbottom", "The most liked recent flipnotes.")
flips = Flipnote.objects.all().order_by("-total")[page*50:50]
selected_newest = "0"
selected_popular = "0"
selected_liked = "1"
elif type==True:
ugo.set_meta("uppertitle", "New Flipnotes")
ugo.set_meta("uppersubbottom", "The most recent flipnotes.")
flips = Flipnote.objects.all().order_by("-id")[page*50:50]
selected_newest = "1"
selected_popular = "0"
selected_liked = "0"
ugo.set_meta("uppersubleft", "Flipnotes")
ugo.set_meta("uppersubright", str(flip_count))
ugo.add_dropdown({"label": "New Flipnotes", "url": request.build_absolute_uri('/')+"ds/v2-eu/newest.uls", "selected": selected_newest})
ugo.add_dropdown({"label": "Most Popular", "url": request.build_absolute_uri('/')+"ds/v2-eu/hot.uls", "selected": selected_popular})
ugo.add_dropdown({"label": "Most Liked", "url": request.build_absolute_uri('/')+"ds/v2-eu/liked.uls", "selected": selected_liked})
if page != 0:
ugo.add_item({"label": "Previous", "url": request.build_absolute_uri('/')+"ds/v2-eu/hot.uls?page="+str(page-1)})
for flip in flips:
if not flip.channel.show_in_frontpage:
continue
if type==None or type==False:
now = datetime.date.today()
time_between = now - flip.date
if int(time_between.days) > 7:
continue
if os.path.exists("./files/ppm/"+flip.real_filename+".ppm"):
with open("./files/ppm/"+flip.real_filename+".ppm", "rb+") as file:
parser = PPMParser()
if parser.load(file):
ugo.add_item({"url": request.build_absolute_uri('/')+"ds/v2-eu/flipnote/"+parser.current_filename+".ppm", "file": "./files/ppm/"+parser.current_filename+".ppm", "counter": str(flip.star+flip.green_star+flip.red_star+flip.blue_star+flip.purple_star), "icon": "3"}, False)
else:
continue
else:
continue
if flip_count > (page+1)*50:
ugo.add_item({"label": "Next", "url": request.build_absolute_uri('/')+"ds/v2-eu/hot.uls?page="+str(page+1)})
return HttpResponse(ugo.get_ugo())
@csrf_exempt
def info(request, reg, file):
try:
session = Session.objects.get(token=request.headers['X-DSi-SID'])
except:
return HttpResponse(status=403)
try:
flip = Flipnote.objects.get(real_filename=file)
except ObjectDoesNotExist:
return HttpResponse(status=404)
if session.user != flip.made_by:
flip.views += 1
flip.save()
return HttpResponse("0\n0\n", content_type="text/plain; charset=utf-16le")
@csrf_exempt
def dl(request, reg, file):
try:
session = Session.objects.get(token=request.headers['X-DSi-SID'])
except:
return HttpResponse(status=403)
try:
flip = Flipnote.objects.get(real_filename=file)
except ObjectDoesNotExist:
return HttpResponse(status=404)
if session.user != flip.made_by:
flip.saved += 1
flip.save()
return HttpResponse("nice", content_type="text/plain; charset=utf-16le")
@csrf_exempt
def star(request, reg, file):
try:
session = Session.objects.get(token=request.headers['X-DSi-SID'])
except:
return HttpResponse(status=403)
try:
flip = Flipnote.objects.get(real_filename=file)
except ObjectDoesNotExist:
return HttpResponse(status=404)
if "X-Hatena-Star-Count" not in request.headers:
return HttpResponse(status=403)
star = int(request.headers["X-Hatena-Star-Count"])
if star < 1 or star > 65535:
return HttpResponse(status=403)
try:
star_log = StarLog.objects.get(user=session.user, flipnote=flip)
except ObjectDoesNotExist:
star_log = StarLog.objects.create(user=session.user, flipnote=flip)
star_log = StarLog.objects.get(user=session.user, flipnote=flip)
if request.GET.get("starcolor"):
user = User.objects.get(id=session.user.id)
star_type = request.GET.get("starcolor")
if star_type == "green":
if star > session.user.green_star:
return HttpResponse(status=403)
user.green_star -= star
star_log.green_star += star
flip.green_star += star
elif star_type == "red":
if star > session.user.red_star:
return HttpResponse(status=403)
user.red_star -= star
star_log.red_star += star
flip.red_star += star
elif star_type == "blue":
if star > session.user.blue_star:
return HttpResponse(status=403)
user.blue_star -= star
star_log.blue_star += star
flip.blue_star += star
elif star_type == "purple":
if star > session.user.purple_star:
return HttpResponse(status=403)
user.purple_star -= star
star_log.purple_star += star
flip.purple_star += star
else:
return HttpResponse(status=403)
flip.total += star
user.save()
star_log.save()
flip.save()
return HttpResponse("nice star you got here, can I have it?", content_type="text/plain; charset=utf-16le")
else:
if star_log.star >= 10:
return HttpResponse("nice", content_type="text/plain; charset=utf-16le")
else:
if star_log.star + star > 10:
flip.star += (10 - star_log.star)
flip.total += (10 - star_log.star)
flip.save()
star_log.star = 10
star_log.save()
return HttpResponse("hi potential reverse engineer", content_type="text/plain; charset=utf-16le")
star_log.star += star
star_log.save()
flip.star += star
flip.total += star
flip.save()
return HttpResponse("nice", content_type="text/plain; charset=utf-16le")
@csrf_exempt
def flipnote_info(request, reg, file):
try:
session = Session.objects.get(token=request.headers['X-DSi-SID'])
except:
return HttpResponse(status=403)
try:
flip = Flipnote.objects.get(real_filename=file)
except ObjectDoesNotExist:
return HttpResponse(status=404)
return render(request, "details.html", {"BASE_URI": request.build_absolute_uri('/'),"flipnote": flip, "ppmUri": request.build_absolute_uri('/')+"ds/v2-eu/flipnote/"+file+".ppm", "session": session}, content_type="text/html; charset=utf-8")
@csrf_exempt
def signin(request, reg):
try:
session = Session.objects.get(token=request.headers['X-DSi-SID'])
except:
return HttpResponse(status=403)
if request.GET.get("finish"):
return render(request, "signin/finish.html")
elif not request.GET.get("step2"):
return render(request, "signin/step1.html", {"BASE_URI": request.build_absolute_uri('/')})
else:
return render(request, "signin/step2.html", {"BASE_URI": request.build_absolute_uri('/')})
@csrf_exempt
def signin_step1(request, reg):
try:
session = Session.objects.get(token=request.headers['X-Dsi-Sid'])
except:
return HttpResponse(status=403)
resp = HttpResponse()
if request.method == 'POST':
if request.headers['X-Email-Addr'] != "":
try:
user = User.objects.get(username=request.headers['X-Email-Addr'])
session.temp = user.username
session.save()
resp.headers['X-DSi-Forwarder'] = request.build_absolute_uri('/')+"ds/v2-eu/signin.htm?step2=true"
except ObjectDoesNotExist:
resp.headers['X-DSi-Forwarder'] = request.build_absolute_uri('/')+"ds/v2-eu/error_get.htm?error=Invalid+username"
else:
resp.headers['X-DSi-Forwarder'] = request.build_absolute_uri('/')+"ds/v2-eu/error_get.htm?error=Empty+username+please+try+again"
else:
resp.headers['X-DSi-Forwarder'] = request.build_absolute_uri('/')+"ds/v2-eu/error_get.htm?error=An+error+has+occured."
return resp
@csrf_exempt
def signin_step2(request, reg):
try:
session = Session.objects.get(token=request.headers['X-Dsi-Sid'])
except:
return HttpResponse(status=403)
resp = HttpResponse()
if request.method == 'POST':
if request.headers['X-Email-Addr'] != "":
user = authenticate(username=session.temp, password=request.headers['X-Email-Addr'])
if user != None:
try:
test = User.objects.get(fsid=session.fsid)
test.fsid = None
test.save()
except ObjectDoesNotExist:
pass
try:
test = User.objects.get(mac=session.mac)
test.mac = None
test.save()
except ObjectDoesNotExist:
pass
user = User.objects.get(username=session.temp)
user.fsid = session.fsid
user.mac = session.mac
user.save()
resp.headers['X-DSi-Forwarder'] = request.build_absolute_uri('/')+"ds/v2-eu/signin.htm?finish=true"
else:
resp.headers['X-DSi-Forwarder'] = request.build_absolute_uri('/')+"ds/v2-eu/error_get.htm?error=Invalid+password+please+try+again"
else:
resp.headers['X-DSi-Forwarder'] = request.build_absolute_uri('/')+"ds/v2-eu/error_get.htm?error=Empty+password+please+try+again"
else:
resp.headers['X-DSi-Forwarder'] = request.build_absolute_uri('/')+"ds/v2-eu/error_get.htm?error=An+error+has+occured."
return resp
@csrf_exempt
def error_get(request, reg):
return render(request, "error.html", {"errMsg": request.GET.get("error")})
@csrf_exempt
def ppmloader(request, reg, file):
if os.path.exists("./files/ppm/"+file+".ppm"):
with open("./files/ppm/"+file+".ppm", "rb+") as file:
return HttpResponse(file.read(), content_type="text/plain; charset=utf-16le")
else:
resp = HttpResponse()
resp.status_code = 404
return resp
@csrf_exempt
def post_flip(request, reg, internal_id):
try:
session = Session.objects.get(token=request.headers['X-DSi-SID'])
except:
return HttpResponse(status=403)
try:
channel = Channel.objects.get(internal_id=internal_id)
except:
return HttpResponse(status=403)
if channel.locked:
return HttpResponse(status=403)
if request.method == 'POST':
if request.body != "":
parser = PPMParser()
if parser.load(io.BytesIO(request.body)):
try:
Flipnote.objects.get(real_filename=parser.current_filename)
return HttpResponse()
except ObjectDoesNotExist:
pass
with open("./files/ppm/"+parser.current_filename+".ppm", "wb") as file:
file.write(request.body)
file.close()
if parser.lock < 0 or parser.lock > 1:
return HttpResponse(status=403)
Flipnote.objects.create(real_filename=parser.current_filename, is_locked=parser.lock, made_by=session.user, channel=channel)
return HttpResponse()
else:
return HttpResponse(status=403)
else:
return HttpResponse(status=400)
else:
return HttpResponse(status=405)
@csrf_exempt
def static(request, reg, dir, file):
if os.path.exists("./ds-static/"+dir+"/"+file):
with open("./ds-static/"+dir+"/"+file, "rb+") as file:
return HttpResponse(file.read(), content_type="text/css; charset=utf-8")
else:
resp = HttpResponse()
resp.status_code = 404
return resp | 19,233 | Python | .py | 435 | 35.537931 | 319 | 0.618078 | Aftendo/Afternote | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,289 | models.py | Aftendo_Afternote/db/models.py | from django.db import models
from django.contrib.auth.models import AbstractUser
# Create your models here.
class User(AbstractUser):
fsid = models.CharField(max_length=16, null=True, unique=True, blank=True)
mac = models.CharField(max_length=12, null=True, unique=True, blank=True)
ban = models.BooleanField(default=False, null=False)
green_star = models.IntegerField(blank=False, null=False, default=0)
red_star = models.IntegerField(blank=False, null=False, default=0)
blue_star = models.IntegerField(blank=False, null=False, default=0)
purple_star = models.IntegerField(blank=False, null=False, default=0)
def __str__(self):
return self.username
class Session(models.Model):
id = models.IntegerField(primary_key=True, blank=True)
token = models.CharField(max_length=16, null=False, unique=True)
user = models.ForeignKey(User, null=True, on_delete=models.CASCADE)
temp = models.CharField(max_length=400, null=False, default="")
fsid = models.CharField(max_length=16, null=False)
mac = models.CharField(max_length=12, null=False)
def __str__(self):
return "Session "+self.token
class Category(models.Model):
id = models.IntegerField(primary_key=True, blank=True)
internal_id = models.CharField(max_length=16, null=False)
name = models.CharField(max_length=32, null=False)
def __str__(self):
return self.name
class Channel(models.Model):
id = models.IntegerField(primary_key=True, blank=True)
internal_id = models.CharField(max_length=16, null=False)
name = models.CharField(max_length=32, null=False)
category = models.ForeignKey(Category, null=False, on_delete=models.CASCADE)
show_in_frontpage = models.BooleanField(null=False, default=True)
locked = models.BooleanField(null=False, default=False)
def __str__(self):
return self.name
class Flipnote(models.Model):
id = models.IntegerField(primary_key=True, blank=True)
real_filename = models.CharField(max_length=24, null=False, unique=True)
views = models.IntegerField(null=False, default=0)
saved = models.IntegerField(null=False, default=0)
is_locked = models.IntegerField(null=False, default=0)
made_by = models.ForeignKey(User, null=False, on_delete=models.CASCADE)
channel = models.ForeignKey(Channel, null=False, on_delete=models.CASCADE)
star = models.IntegerField(blank=False, null=False, default=0)
green_star = models.IntegerField(blank=False, null=False, default=0)
red_star = models.IntegerField(blank=False, null=False, default=0)
blue_star = models.IntegerField(blank=False, null=False, default=0)
purple_star = models.IntegerField(blank=False, null=False, default=0)
total = models.IntegerField(blank=False, null=False, default=0)
date = models.DateField(auto_now_add=True, null=False)
def __str__(self):
return self.real_filename
class StarLog(models.Model):
id = models.IntegerField(primary_key=True, blank=True)
user = models.ForeignKey(User, null=True, on_delete=models.CASCADE)
flipnote = models.ForeignKey(Flipnote, null=True, on_delete=models.CASCADE)
star = models.IntegerField(blank=False, null=False, default=0)
green_star = models.IntegerField(blank=False, null=False, default=0)
red_star = models.IntegerField(blank=False, null=False, default=0)
blue_star = models.IntegerField(blank=False, null=False, default=0)
purple_star = models.IntegerField(blank=False, null=False, default=0)
def __str__(self):
return "Star log of "+self.user.username+" for flipnote "+self.flipnote.real_filename | 3,629 | Python | .py | 65 | 50.861538 | 93 | 0.734384 | Aftendo/Afternote | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,290 | apps.py | Aftendo_Afternote/db/apps.py | from django.apps import AppConfig
class DbConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'db'
| 136 | Python | .py | 4 | 30.5 | 56 | 0.761538 | Aftendo/Afternote | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,291 | admin.py | Aftendo_Afternote/db/admin.py | from django.contrib import admin
from .models import *
# Register your models here.
admin.site.register(Flipnote)
admin.site.register(User)
admin.site.register(Session)
admin.site.register(Category)
admin.site.register(Channel)
admin.site.register(StarLog) | 258 | Python | .py | 9 | 27.555556 | 32 | 0.842742 | Aftendo/Afternote | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,292 | 0012_flipnote_channel.py | Aftendo_Afternote/db/migrations/0012_flipnote_channel.py | # Generated by Django 4.1.7 on 2024-01-14 18:53
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('db', '0011_channel_category'),
]
operations = [
migrations.AddField(
model_name='flipnote',
name='channel',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='db.channel'),
preserve_default=False,
),
]
| 508 | Python | .py | 15 | 26.666667 | 109 | 0.645492 | Aftendo/Afternote | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,293 | 0013_user_blue_star_user_green_star_user_purple_star_and_more.py | Aftendo_Afternote/db/migrations/0013_user_blue_star_user_green_star_user_purple_star_and_more.py | # Generated by Django 4.1.7 on 2024-01-15 00:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('db', '0012_flipnote_channel'),
]
operations = [
migrations.AddField(
model_name='user',
name='blue_star',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='user',
name='green_star',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='user',
name='purple_star',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='user',
name='red_star',
field=models.IntegerField(default=0),
),
]
| 832 | Python | .py | 28 | 20.25 | 49 | 0.553191 | Aftendo/Afternote | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,294 | 0002_session.py | Aftendo_Afternote/db/migrations/0002_session.py | # Generated by Django 4.1.7 on 2024-01-14 11:14
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('db', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Session',
fields=[
('id', models.IntegerField(blank=True, primary_key=True, serialize=False)),
('session_id', models.CharField(max_length=16)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 666 | Python | .py | 18 | 28.833333 | 118 | 0.625194 | Aftendo/Afternote | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,295 | 0011_channel_category.py | Aftendo_Afternote/db/migrations/0011_channel_category.py | # Generated by Django 4.1.7 on 2024-01-14 18:32
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('db', '0010_category_channel'),
]
operations = [
migrations.AddField(
model_name='channel',
name='category',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='db.category'),
preserve_default=False,
),
]
| 509 | Python | .py | 15 | 26.733333 | 110 | 0.646217 | Aftendo/Afternote | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,296 | 0006_session_temp_alter_session_token.py | Aftendo_Afternote/db/migrations/0006_session_temp_alter_session_token.py | # Generated by Django 4.1.7 on 2024-01-14 12:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('db', '0005_rename_session_id_session_token'),
]
operations = [
migrations.AddField(
model_name='session',
name='temp',
field=models.CharField(default='', max_length=400),
),
migrations.AlterField(
model_name='session',
name='token',
field=models.CharField(max_length=16, unique=True),
),
]
| 570 | Python | .py | 18 | 23.277778 | 63 | 0.590494 | Aftendo/Afternote | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,297 | 0018_channel_locked_channel_show_in_frontpage.py | Aftendo_Afternote/db/migrations/0018_channel_locked_channel_show_in_frontpage.py | # Generated by Django 4.1.7 on 2024-01-15 23:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('db', '0017_flipnote_date'),
]
operations = [
migrations.AddField(
model_name='channel',
name='locked',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='channel',
name='show_in_frontpage',
field=models.BooleanField(default=True),
),
]
| 543 | Python | .py | 18 | 21.777778 | 53 | 0.590385 | Aftendo/Afternote | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,298 | 0001_initial.py | Aftendo_Afternote/db/migrations/0001_initial.py | # Generated by Django 4.1.7 on 2024-01-14 03:06
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='Flipnote',
fields=[
('id', models.IntegerField(blank=True, primary_key=True, serialize=False)),
('real_filename', models.CharField(max_length=24, unique=True)),
('views', models.IntegerField(default=0)),
('saved', models.IntegerField(default=0)),
('is_locked', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=150, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('fsid', models.CharField(max_length=16, unique=True)),
('mac', models.CharField(max_length=12, unique=True)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
| 3,478 | Python | .py | 50 | 57.16 | 329 | 0.635009 | Aftendo/Afternote | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
2,287,299 | 0005_rename_session_id_session_token.py | Aftendo_Afternote/db/migrations/0005_rename_session_id_session_token.py | # Generated by Django 4.1.7 on 2024-01-14 11:57
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('db', '0004_user_ban'),
]
operations = [
migrations.RenameField(
model_name='session',
old_name='session_id',
new_name='token',
),
]
| 352 | Python | .py | 13 | 19.846154 | 47 | 0.583832 | Aftendo/Afternote | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:09 PM (Europe/Amsterdam) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.