repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
adcgan | adcgan-main/BigGAN-PyTorch/inception_tf13.py | ''' Tensorflow inception score code
Derived from https://github.com/openai/improved-gan
Code derived from tensorflow/tensorflow/models/image/imagenet/classify_image.py
THIS CODE REQUIRES TENSORFLOW 1.3 or EARLIER to run in PARALLEL BATCH MODE
To use this code, run sample.py on your model with --sample_npz, and then
pass the experiment name in the --experiment_name.
This code also saves pool3 stats to an npz file for FID calculation
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import sys
import tarfile
import math
from tqdm import tqdm, trange
from argparse import ArgumentParser
import numpy as np
from six.moves import urllib
import tensorflow as tf
MODEL_DIR = ''
DATA_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'
softmax = None
def prepare_parser():
usage = 'Parser for TF1.3- Inception Score scripts.'
parser = ArgumentParser(description=usage)
parser.add_argument(
'--experiment_name', type=str, default='',
help='Which experiment''s samples.npz file to pull and evaluate')
parser.add_argument(
'--experiment_root', type=str, default='samples',
help='Default location where samples are stored (default: %(default)s)')
parser.add_argument(
'--batch_size', type=int, default=500,
help='Default overall batchsize (default: %(default)s)')
return parser
def run(config):
# Inception with TF1.3 or earlier.
# Call this function with list of images. Each of elements should be a
# numpy array with values ranging from 0 to 255.
def get_inception_score(images, splits=10):
assert(type(images) == list)
assert(type(images[0]) == np.ndarray)
assert(len(images[0].shape) == 3)
assert(np.max(images[0]) > 10)
assert(np.min(images[0]) >= 0.0)
inps = []
for img in images:
img = img.astype(np.float32)
inps.append(np.expand_dims(img, 0))
bs = config['batch_size']
with tf.Session() as sess:
preds, pools = [], []
n_batches = int(math.ceil(float(len(inps)) / float(bs)))
for i in trange(n_batches):
inp = inps[(i * bs):min((i + 1) * bs, len(inps))]
inp = np.concatenate(inp, 0)
pred, pool = sess.run([softmax, pool3], {'ExpandDims:0': inp})
preds.append(pred)
pools.append(pool)
preds = np.concatenate(preds, 0)
scores = []
for i in range(splits):
part = preds[(i * preds.shape[0] // splits):((i + 1) * preds.shape[0] // splits), :]
kl = part * (np.log(part) - np.log(np.expand_dims(np.mean(part, 0), 0)))
kl = np.mean(np.sum(kl, 1))
scores.append(np.exp(kl))
return np.mean(scores), np.std(scores), np.squeeze(np.concatenate(pools, 0))
# Init inception
def _init_inception():
global softmax, pool3
if not os.path.exists(MODEL_DIR):
os.makedirs(MODEL_DIR)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(MODEL_DIR, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (
filename, float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(MODEL_DIR)
with tf.gfile.FastGFile(os.path.join(
MODEL_DIR, 'classify_image_graph_def.pb'), 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
# Works with an arbitrary minibatch size.
with tf.Session() as sess:
pool3 = sess.graph.get_tensor_by_name('pool_3:0')
ops = pool3.graph.get_operations()
for op_idx, op in enumerate(ops):
for o in op.outputs:
shape = o.get_shape()
shape = [s.value for s in shape]
new_shape = []
for j, s in enumerate(shape):
if s == 1 and j == 0:
new_shape.append(None)
else:
new_shape.append(s)
o._shape = tf.TensorShape(new_shape)
w = sess.graph.get_operation_by_name("softmax/logits/MatMul").inputs[1]
logits = tf.matmul(tf.squeeze(pool3), w)
softmax = tf.nn.softmax(logits)
# if softmax is None: # No need to functionalize like this.
_init_inception()
fname = '%s/%s/samples.npz' % (config['experiment_root'], config['experiment_name'])
print('loading %s ...'%fname)
ims = np.load(fname)['x']
import time
t0 = time.time()
inc_mean, inc_std, pool_activations = get_inception_score(list(ims.swapaxes(1,2).swapaxes(2,3)), splits=10)
t1 = time.time()
print('Saving pool to numpy file for FID calculations...')
np.savez('%s/%s/TF_pool.npz' % (config['experiment_root'], config['experiment_name']), **{'pool_mean': np.mean(pool_activations,axis=0), 'pool_var': np.cov(pool_activations, rowvar=False)})
print('Inception took %3f seconds, score of %3f +/- %3f.'%(t1-t0, inc_mean, inc_std))
def main():
# parse command line and run
parser = prepare_parser()
config = vars(parser.parse_args())
print(config)
run(config)
if __name__ == '__main__':
main() | 5,363 | 37.869565 | 191 | py |
adcgan | adcgan-main/BigGAN-PyTorch/train_fns.py | ''' train_fns.py
Functions for the main loop of training different conditional image models
'''
import torch
import torch.nn as nn
import torchvision
import os
import utils
import losses
# Dummy training function for debugging
def dummy_training_function():
def train(x, y):
return {}
return train
def GAN_training_function(G, D, GD, z_, y_, ema, state_dict, config):
def train(x, y):
G.optim.zero_grad()
D.optim.zero_grad()
# How many chunks to split x and y into?
x = torch.split(x, config['batch_size'])
y = torch.split(y, config['batch_size'])
counter = 0
# Optionally toggle D and G's "require_grad"
if config['toggle_grads']:
utils.toggle_grad(D, True)
utils.toggle_grad(G, False)
for step_index in range(config['num_D_steps']):
# If accumulating gradients, loop multiple times before an optimizer step
D.optim.zero_grad()
for accumulation_index in range(config['num_D_accumulations']):
z_.sample_()
y_.sample_()
(D_fake, D_real), (D_adc_fake, D_adc_real), (D_ac_fake, D_ac_real), (D_mi_fake, D_mi_real), (D_am_fake, D_am_real) = GD(z_[:config['batch_size']], y_[:config['batch_size']],
x[counter], y[counter], train_G=False,
split_D=config['split_D'])
# Compute components of D's loss, average them, and divide by
# the number of gradient accumulations
D_loss_real, D_loss_fake = losses.discriminator_loss(D_fake, D_real)
D_loss = (D_loss_real + D_loss_fake) / float(config['num_D_accumulations'])
D_aux_loss = torch.tensor(0, device=D_loss.device)
if config['loss'] == 'acgan':
D_ac_loss = losses.classifier_loss_dis(D_ac_real, y[counter], config['hinge'])
D_aux_loss = D_ac_loss
elif config['loss'] == 'tacgan':
D_ac_loss = losses.classifier_loss_dis(D_ac_real, y[counter], config['hinge'])
D_mi_loss = losses.classifier_loss_dis(D_mi_fake, y_[:config['batch_size']], config['hinge'])
D_aux_loss = D_ac_loss + D_mi_loss
elif config['loss'] == 'amgan':
D_loss = D_loss.detach()
D_aux_loss = losses.classifier_loss_dis(D_am_real, y[counter], config['hinge']) + \
losses.classifier_loss_dis(D_am_fake, torch.ones_like(y_[:config['batch_size']]) * utils.nclass_dict[config['dataset']], config['hinge'])
elif config['loss'] == 'adcgan':
D_adc_loss_real = losses.classifier_loss_dis(D_adc_real, y[counter] * 2, config['hinge'])
D_adc_loss_fake = losses.classifier_loss_dis(D_adc_fake, y_[:config['batch_size']] * 2 + 1, config['hinge'])
D_aux_loss = D_adc_loss_real + D_adc_loss_fake
D_aux_loss = config['D_lambda'] * D_aux_loss / float(config['num_D_accumulations'])
(D_loss + D_aux_loss).backward()
counter += 1
# Optionally apply ortho reg in D
if config['D_ortho'] > 0.0:
# Debug print to indicate we're using ortho reg in D.
print('using modified ortho reg in D')
utils.ortho(D, config['D_ortho'])
D.optim.step()
# Optionally toggle "requires_grad"
if config['toggle_grads']:
utils.toggle_grad(D, False)
utils.toggle_grad(G, True)
# Zero G's gradients by default before training G, for safety
G.optim.zero_grad()
# If accumulating gradients, loop multiple times
for accumulation_index in range(config['num_G_accumulations']):
z_.sample_()
y_.sample_()
D_fake, D_adc_fake, D_ac_fake, D_mi_fake, D_am_fake = GD(z_, y_, train_G=True, split_D=config['split_D'])
G_loss = losses.generator_loss(D_fake) / float(config['num_G_accumulations'])
G_aux_loss = torch.tensor(0., device=G_loss.device)
if config['loss'] == 'acgan':
G_ac_loss = losses.classifier_loss_gen(D_ac_fake, y_, config['hinge'])
G_aux_loss = G_ac_loss
elif config['loss'] == 'tacgan':
G_ac_loss = losses.classifier_loss_gen(D_ac_fake, y_, config['hinge'])
G_mi_loss = losses.classifier_loss_gen(D_mi_fake, y_, config['hinge'])
G_aux_loss = G_ac_loss - G_mi_loss
elif config['loss'] == 'amgan':
G_loss = G_loss.detach()
G_aux_loss = losses.classifier_loss_gen(D_am_fake, y_, config['hinge'])
elif config['loss'] == 'adcgan':
G_adc_loss_pos = losses.classifier_loss_gen(D_adc_fake, y_ * 2, config['hinge'])
G_adc_loss_neg = losses.classifier_loss_gen(D_adc_fake, y_ * 2 + 1, config['hinge'])
G_aux_loss = G_adc_loss_pos - G_adc_loss_neg
G_aux_loss = config['G_lambda'] * G_aux_loss / float(config['num_G_accumulations'])
(G_loss + G_aux_loss).backward()
# Optionally apply modified ortho reg in G
if config['G_ortho'] > 0.0:
print('using modified ortho reg in G') # Debug print to indicate we're using ortho reg in G
# Don't ortho reg shared, it makes no sense. Really we should blacklist any embeddings for this
utils.ortho(G, config['G_ortho'],
blacklist=[param for param in G.shared.parameters()])
G.optim.step()
# If we have an ema, update it, regardless of if we test with it or not
if config['ema']:
ema.update(state_dict['itr'])
# out = {'G_loss': float(G_loss.item()),
# 'D_loss_real': float(D_loss_real.item()),
# 'D_loss_fake': float(D_loss_fake.item()),
# 'G_aux_loss': float(G_aux_loss.item()),
# 'D_aux_loss': float(D_aux_loss.item())}
# shorten for small screen
out = {'G': float(G_loss.item()),
'DR': float(D_loss_real.item()),
'DF': float(D_loss_fake.item()),
'GA': float(G_aux_loss.item()),
'DA': float(D_aux_loss.item())}
# Return G's loss and the components of D's loss.
return out
return train
''' This function takes in the model, saves the weights (multiple copies if
requested), and prepares sample sheets: one consisting of samples given
a fixed noise seed (to show how the model evolves throughout training),
a set of full conditional sample sheets, and a set of interp sheets. '''
def save_and_sample(G, D, G_ema, z_, y_, fixed_z, fixed_y,
state_dict, config, experiment_name):
utils.save_weights(G, D, state_dict, config['weights_root'],
experiment_name, None, G_ema if config['ema'] else None)
# Save an additional copy to mitigate accidental corruption if process
# is killed during a save (it's happened to me before -.-)
if config['num_save_copies'] > 0:
utils.save_weights(G, D, state_dict, config['weights_root'],
experiment_name,
'copy%d' % state_dict['save_num'],
G_ema if config['ema'] else None)
state_dict['save_num'] = (state_dict['save_num'] + 1 ) % config['num_save_copies']
# Use EMA G for samples or non-EMA?
which_G = G_ema if config['ema'] and config['use_ema'] else G
# Accumulate standing statistics?
if config['accumulate_stats']:
utils.accumulate_standing_stats(G_ema if config['ema'] and config['use_ema'] else G,
z_, y_, config['n_classes'],
config['num_standing_accumulations'])
# Save a random sample sheet with fixed z and y
with torch.no_grad():
if config['parallel']:
fixed_Gz = nn.parallel.data_parallel(which_G, (fixed_z, which_G.shared(fixed_y)))
else:
fixed_Gz = which_G(fixed_z, which_G.shared(fixed_y))
if not os.path.isdir('%s/%s' % (config['samples_root'], experiment_name)):
os.mkdir('%s/%s' % (config['samples_root'], experiment_name))
image_filename = '%s/%s/fixed_samples%d.jpg' % (config['samples_root'],
experiment_name,
state_dict['itr'])
torchvision.utils.save_image(fixed_Gz.float().cpu(), image_filename,
nrow=int(fixed_Gz.shape[0] **0.5), normalize=True)
# For now, every time we save, also save sample sheets
utils.sample_sheet(which_G,
classes_per_sheet=utils.classes_per_sheet_dict[config['dataset']],
num_classes=config['n_classes'],
samples_per_class=10, parallel=config['parallel'],
samples_root=config['samples_root'],
experiment_name=experiment_name,
folder_number=state_dict['itr'],
z_=z_)
return
# Also save interp sheets
for fix_z, fix_y in zip([False, False, True], [False, True, False]):
utils.interp_sheet(which_G,
num_per_sheet=16,
num_midpoints=8,
num_classes=config['n_classes'],
parallel=config['parallel'],
samples_root=config['samples_root'],
experiment_name=experiment_name,
folder_number=state_dict['itr'],
sheet_number=0,
fix_z=fix_z, fix_y=fix_y, device='cuda')
''' This function runs the inception metrics code, checks if the results
are an improvement over the previous best (either in IS or FID,
user-specified), logs the results, and saves a best_ copy if it's an
improvement. '''
def test(G, D, G_ema, z_, y_, state_dict, config, sample, get_inception_metrics,
experiment_name, test_log):
print('Gathering inception metrics...')
if config['accumulate_stats']:
utils.accumulate_standing_stats(G_ema if config['ema'] and config['use_ema'] else G,
z_, y_, config['n_classes'],
config['num_standing_accumulations'])
IS_mean, IS_std, FID = get_inception_metrics(sample,
config['num_inception_images'],
num_splits=10)
print('Itr %d: PYTORCH UNOFFICIAL Inception Score is %3.3f +/- %3.3f, PYTORCH UNOFFICIAL FID is %5.4f' % (state_dict['itr'], IS_mean, IS_std, FID))
# If improved over previous best metric, save approrpiate copy
if ((config['which_best'] == 'IS' and IS_mean > state_dict['best_IS'])
or (config['which_best'] == 'FID' and FID < state_dict['best_FID'])):
print('%s improved over previous best, saving checkpoint...' % config['which_best'])
utils.save_weights(G, D, state_dict, config['weights_root'],
experiment_name, 'best%d' % state_dict['save_best_num'],
G_ema if config['ema'] else None)
state_dict['save_best_num'] = (state_dict['save_best_num'] + 1 ) % config['num_best_copies']
state_dict['best_IS'] = max(state_dict['best_IS'], IS_mean)
state_dict['best_FID'] = min(state_dict['best_FID'], FID)
# Log results to file
test_log.log(itr=int(state_dict['itr']), IS_mean=float(IS_mean),
IS_std=float(IS_std), FID=float(FID))
| 11,139 | 47.017241 | 181 | py |
adcgan | adcgan-main/BigGAN-PyTorch/BigGAN.py | import numpy as np
import math
import functools
import torch
import torch.nn as nn
from torch.nn import init
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter as P
import layers
from sync_batchnorm import SynchronizedBatchNorm2d as SyncBatchNorm2d
# Architectures for G
# Attention is passed in in the format '32_64' to mean applying an attention
# block at both resolution 32x32 and 64x64. Just '64' will apply at 64x64.
def G_arch(ch=64, attention='64', ksize='333333', dilation='111111'):
arch = {}
arch[512] = {'in_channels' : [ch * item for item in [16, 16, 8, 8, 4, 2, 1]],
'out_channels' : [ch * item for item in [16, 8, 8, 4, 2, 1, 1]],
'upsample' : [True] * 7,
'resolution' : [8, 16, 32, 64, 128, 256, 512],
'attention' : {2**i: (2**i in [int(item) for item in attention.split('_')])
for i in range(3,10)}}
arch[256] = {'in_channels' : [ch * item for item in [16, 16, 8, 8, 4, 2]],
'out_channels' : [ch * item for item in [16, 8, 8, 4, 2, 1]],
'upsample' : [True] * 6,
'resolution' : [8, 16, 32, 64, 128, 256],
'attention' : {2**i: (2**i in [int(item) for item in attention.split('_')])
for i in range(3,9)}}
arch[128] = {'in_channels' : [ch * item for item in [16, 16, 8, 4, 2]],
'out_channels' : [ch * item for item in [16, 8, 4, 2, 1]],
'upsample' : [True] * 5,
'resolution' : [8, 16, 32, 64, 128],
'attention' : {2**i: (2**i in [int(item) for item in attention.split('_')])
for i in range(3,8)}}
arch[64] = {'in_channels' : [ch * item for item in [16, 16, 8, 4]],
'out_channels' : [ch * item for item in [16, 8, 4, 2]],
'upsample' : [True] * 4,
'resolution' : [8, 16, 32, 64],
'attention' : {2**i: (2**i in [int(item) for item in attention.split('_')])
for i in range(3,7)}}
arch[32] = {'in_channels' : [ch * item for item in [4, 4, 4]],
'out_channels' : [ch * item for item in [4, 4, 4]],
'upsample' : [True] * 3,
'resolution' : [8, 16, 32],
'attention' : {2**i: (2**i in [int(item) for item in attention.split('_')])
for i in range(3,6)}}
return arch
class Generator(nn.Module):
def __init__(self, G_ch=64, dim_z=128, bottom_width=4, resolution=128,
G_kernel_size=3, G_attn='64', n_classes=1000,
num_G_SVs=1, num_G_SV_itrs=1,
G_shared=True, shared_dim=0, hier=False,
cross_replica=False, mybn=False,
G_activation=nn.ReLU(inplace=False),
G_lr=5e-5, G_B1=0.0, G_B2=0.999, adam_eps=1e-8,
BN_eps=1e-5, SN_eps=1e-12, G_mixed_precision=False, G_fp16=False,
G_init='ortho', skip_init=False, no_optim=False,
G_param='SN', norm_style='bn',
**kwargs):
super(Generator, self).__init__()
# Channel width mulitplier
self.ch = G_ch
# Dimensionality of the latent space
self.dim_z = dim_z
# The initial spatial dimensions
self.bottom_width = bottom_width
# Resolution of the output
self.resolution = resolution
# Kernel size?
self.kernel_size = G_kernel_size
# Attention?
self.attention = G_attn
# number of classes, for use in categorical conditional generation
self.n_classes = n_classes
# Use shared embeddings?
self.G_shared = G_shared
# Dimensionality of the shared embedding? Unused if not using G_shared
self.shared_dim = shared_dim if shared_dim > 0 else dim_z
# Hierarchical latent space?
self.hier = hier
# Cross replica batchnorm?
self.cross_replica = cross_replica
# Use my batchnorm?
self.mybn = mybn
# nonlinearity for residual blocks
self.activation = G_activation
# Initialization style
self.init = G_init
# Parameterization style
self.G_param = G_param
# Normalization style
self.norm_style = norm_style
# Epsilon for BatchNorm?
self.BN_eps = BN_eps
# Epsilon for Spectral Norm?
self.SN_eps = SN_eps
# fp16?
self.fp16 = G_fp16
# Architecture dict
self.arch = G_arch(self.ch, self.attention)[resolution]
# If using hierarchical latents, adjust z
if self.hier:
# Number of places z slots into
self.num_slots = len(self.arch['in_channels']) + 1
self.z_chunk_size = (self.dim_z // self.num_slots)
# Recalculate latent dimensionality for even splitting into chunks
self.dim_z = self.z_chunk_size * self.num_slots
else:
self.num_slots = 1
self.z_chunk_size = 0
# Which convs, batchnorms, and linear layers to use
if self.G_param == 'SN':
self.which_conv = functools.partial(layers.SNConv2d,
kernel_size=3, padding=1,
num_svs=num_G_SVs, num_itrs=num_G_SV_itrs,
eps=self.SN_eps)
self.which_linear = functools.partial(layers.SNLinear,
num_svs=num_G_SVs, num_itrs=num_G_SV_itrs,
eps=self.SN_eps)
else:
self.which_conv = functools.partial(nn.Conv2d, kernel_size=3, padding=1)
self.which_linear = nn.Linear
# We use a non-spectral-normed embedding here regardless;
# For some reason applying SN to G's embedding seems to randomly cripple G
self.which_embedding = nn.Embedding
bn_linear = (functools.partial(self.which_linear, bias=False) if self.G_shared
else self.which_embedding)
self.which_bn = functools.partial(layers.ccbn,
which_linear=bn_linear,
cross_replica=self.cross_replica,
mybn=self.mybn,
input_size=(self.shared_dim + self.z_chunk_size if self.G_shared
else self.n_classes),
norm_style=self.norm_style,
eps=self.BN_eps)
# Prepare model
# If not using shared embeddings, self.shared is just a passthrough
self.shared = (self.which_embedding(n_classes, self.shared_dim) if G_shared
else layers.identity())
# First linear layer
self.linear = self.which_linear(self.dim_z // self.num_slots,
self.arch['in_channels'][0] * (self.bottom_width **2))
# self.blocks is a doubly-nested list of modules, the outer loop intended
# to be over blocks at a given resolution (resblocks and/or self-attention)
# while the inner loop is over a given block
self.blocks = []
for index in range(len(self.arch['out_channels'])):
self.blocks += [[layers.GBlock(in_channels=self.arch['in_channels'][index],
out_channels=self.arch['out_channels'][index],
which_conv=self.which_conv,
which_bn=self.which_bn,
activation=self.activation,
upsample=(functools.partial(F.interpolate, scale_factor=2)
if self.arch['upsample'][index] else None))]]
# If attention on this block, attach it to the end
if self.arch['attention'][self.arch['resolution'][index]]:
print('Adding attention layer in G at resolution %d' % self.arch['resolution'][index])
self.blocks[-1] += [layers.Attention(self.arch['out_channels'][index], self.which_conv)]
# Turn self.blocks into a ModuleList so that it's all properly registered.
self.blocks = nn.ModuleList([nn.ModuleList(block) for block in self.blocks])
# output layer: batchnorm-relu-conv.
# Consider using a non-spectral conv here
self.output_layer = nn.Sequential(layers.bn(self.arch['out_channels'][-1],
cross_replica=self.cross_replica,
mybn=self.mybn),
self.activation,
self.which_conv(self.arch['out_channels'][-1], 3))
# Initialize weights. Optionally skip init for testing.
if not skip_init:
self.init_weights()
# Set up optimizer
# If this is an EMA copy, no need for an optim, so just return now
if no_optim:
return
self.lr, self.B1, self.B2, self.adam_eps = G_lr, G_B1, G_B2, adam_eps
if G_mixed_precision:
print('Using fp16 adam in G...')
import utils
self.optim = utils.Adam16(params=self.parameters(), lr=self.lr,
betas=(self.B1, self.B2), weight_decay=0,
eps=self.adam_eps)
else:
self.optim = optim.Adam(params=self.parameters(), lr=self.lr,
betas=(self.B1, self.B2), weight_decay=0,
eps=self.adam_eps)
# LR scheduling, left here for forward compatibility
# self.lr_sched = {'itr' : 0}# if self.progressive else {}
# self.j = 0
# Initialize
def init_weights(self):
self.param_count = 0
for module in self.modules():
if (isinstance(module, nn.Conv2d)
or isinstance(module, nn.Linear)
or isinstance(module, nn.Embedding)):
if self.init == 'ortho':
init.orthogonal_(module.weight)
elif self.init == 'N02':
init.normal_(module.weight, 0, 0.02)
elif self.init in ['glorot', 'xavier']:
init.xavier_uniform_(module.weight)
else:
print('Init style not recognized...')
self.param_count += sum([p.data.nelement() for p in module.parameters()])
print('Param count for G''s initialized parameters: %d' % self.param_count)
# Note on this forward function: we pass in a y vector which has
# already been passed through G.shared to enable easy class-wise
# interpolation later. If we passed in the one-hot and then ran it through
# G.shared in this forward function, it would be harder to handle.
def forward(self, z, y):
# If hierarchical, concatenate zs and ys
if self.hier:
zs = torch.split(z, self.z_chunk_size, 1)
z = zs[0]
ys = [torch.cat([y, item], 1) for item in zs[1:]]
else:
ys = [y] * len(self.blocks)
# First linear layer
h = self.linear(z)
# Reshape
h = h.view(h.size(0), -1, self.bottom_width, self.bottom_width)
# Loop over blocks
for index, blocklist in enumerate(self.blocks):
# Second inner loop in case block has multiple layers
for block in blocklist:
h = block(h, ys[index])
# Apply batchnorm-relu-conv-tanh at output
return torch.tanh(self.output_layer(h))
# Discriminator architecture, same paradigm as G's above
def D_arch(ch=64, attention='64',ksize='333333', dilation='111111'):
arch = {}
arch[256] = {'in_channels' : [3] + [ch*item for item in [1, 2, 4, 8, 8, 16]],
'out_channels' : [item * ch for item in [1, 2, 4, 8, 8, 16, 16]],
'downsample' : [True] * 6 + [False],
'resolution' : [128, 64, 32, 16, 8, 4, 4 ],
'attention' : {2**i: 2**i in [int(item) for item in attention.split('_')]
for i in range(2,8)}}
arch[128] = {'in_channels' : [3] + [ch*item for item in [1, 2, 4, 8, 16]],
'out_channels' : [item * ch for item in [1, 2, 4, 8, 16, 16]],
'downsample' : [True] * 5 + [False],
'resolution' : [64, 32, 16, 8, 4, 4],
'attention' : {2**i: 2**i in [int(item) for item in attention.split('_')]
for i in range(2,8)}}
arch[64] = {'in_channels' : [3] + [ch*item for item in [1, 2, 4, 8]],
'out_channels' : [item * ch for item in [1, 2, 4, 8, 16]],
'downsample' : [True] * 4 + [False],
'resolution' : [32, 16, 8, 4, 4],
'attention' : {2**i: 2**i in [int(item) for item in attention.split('_')]
for i in range(2,7)}}
arch[32] = {'in_channels' : [3] + [item * ch for item in [4, 4, 4]],
'out_channels' : [item * ch for item in [4, 4, 4, 4]],
'downsample' : [True, True, False, False],
'resolution' : [16, 16, 16, 16],
'attention' : {2**i: 2**i in [int(item) for item in attention.split('_')]
for i in range(2,6)}}
return arch
class Discriminator(nn.Module):
def __init__(self, D_ch=64, D_wide=True, resolution=128,
D_kernel_size=3, D_attn='64', n_classes=1000,
num_D_SVs=1, num_D_SV_itrs=1, D_activation=nn.ReLU(inplace=False),
D_lr=2e-4, D_B1=0.0, D_B2=0.999, adam_eps=1e-8,
SN_eps=1e-12, output_dim=1, D_mixed_precision=False, D_fp16=False,
D_init='ortho', skip_init=False, D_param='SN', projection=False, **kwargs):
super(Discriminator, self).__init__()
# Width multiplier
self.ch = D_ch
# Use Wide D as in BigGAN and SA-GAN or skinny D as in SN-GAN?
self.D_wide = D_wide
# Resolution
self.resolution = resolution
# Kernel size
self.kernel_size = D_kernel_size
# Attention?
self.attention = D_attn
# Number of classes
self.n_classes = n_classes
# Activation
self.activation = D_activation
# Initialization style
self.init = D_init
# Parameterization style
self.D_param = D_param
# Epsilon for Spectral Norm?
self.SN_eps = SN_eps
# Fp16?
self.fp16 = D_fp16
# Architecture
self.arch = D_arch(self.ch, self.attention)[resolution]
# Projection head?
self.projection = projection
# Which convs, batchnorms, and linear layers to use
# No option to turn off SN in D right now
if self.D_param == 'SN':
self.which_conv = functools.partial(layers.SNConv2d,
kernel_size=3, padding=1,
num_svs=num_D_SVs, num_itrs=num_D_SV_itrs,
eps=self.SN_eps)
self.which_linear = functools.partial(layers.SNLinear,
num_svs=num_D_SVs, num_itrs=num_D_SV_itrs,
eps=self.SN_eps)
self.which_embedding = functools.partial(layers.SNEmbedding,
num_svs=num_D_SVs, num_itrs=num_D_SV_itrs,
eps=self.SN_eps)
# Prepare model
# self.blocks is a doubly-nested list of modules, the outer loop intended
# to be over blocks at a given resolution (resblocks and/or self-attention)
self.blocks = []
for index in range(len(self.arch['out_channels'])):
self.blocks += [[layers.DBlock(in_channels=self.arch['in_channels'][index],
out_channels=self.arch['out_channels'][index],
which_conv=self.which_conv,
wide=self.D_wide,
activation=self.activation,
preactivation=(index > 0),
downsample=(nn.AvgPool2d(2) if self.arch['downsample'][index] else None))]]
# If attention on this block, attach it to the end
if self.arch['attention'][self.arch['resolution'][index]]:
print('Adding attention layer in D at resolution %d' % self.arch['resolution'][index])
self.blocks[-1] += [layers.Attention(self.arch['out_channels'][index],
self.which_conv)]
# Turn self.blocks into a ModuleList so that it's all properly registered.
self.blocks = nn.ModuleList([nn.ModuleList(block) for block in self.blocks])
# Linear output layer. The output dimension is typically 1, but may be
# larger if we're e.g. turning this into a VAE with an inference output
self.linear = self.which_linear(self.arch['out_channels'][-1], output_dim)
self.adc = self.which_linear(self.arch['out_channels'][-1], n_classes * 2)
self.ac = self.which_linear(self.arch['out_channels'][-1], n_classes)
self.mi = self.which_linear(self.arch['out_channels'][-1], n_classes)
self.am = self.which_linear(self.arch['out_channels'][-1], n_classes + 1)
# Embedding for projection discrimination
self.embed = self.which_embedding(self.n_classes, self.arch['out_channels'][-1])
# Initialize weights
if not skip_init:
self.init_weights()
# Set up optimizer
self.lr, self.B1, self.B2, self.adam_eps = D_lr, D_B1, D_B2, adam_eps
if D_mixed_precision:
print('Using fp16 adam in D...')
import utils
self.optim = utils.Adam16(params=self.parameters(), lr=self.lr,
betas=(self.B1, self.B2), weight_decay=0, eps=self.adam_eps)
else:
self.optim = optim.Adam(params=self.parameters(), lr=self.lr,
betas=(self.B1, self.B2), weight_decay=0, eps=self.adam_eps)
# LR scheduling, left here for forward compatibility
# self.lr_sched = {'itr' : 0}# if self.progressive else {}
# self.j = 0
# Initialize
def init_weights(self):
self.param_count = 0
for module in self.modules():
if (isinstance(module, nn.Conv2d)
or isinstance(module, nn.Linear)
or isinstance(module, nn.Embedding)):
if self.init == 'ortho':
init.orthogonal_(module.weight)
elif self.init == 'N02':
init.normal_(module.weight, 0, 0.02)
elif self.init in ['glorot', 'xavier']:
init.xavier_uniform_(module.weight)
else:
print('Init style not recognized...')
self.param_count += sum([p.data.nelement() for p in module.parameters()])
print('Param count for D''s initialized parameters: %d' % self.param_count)
def forward(self, x, y=None):
# Stick x into h for cleaner for loops without flow control
h = x
# Loop over blocks
for index, blocklist in enumerate(self.blocks):
for block in blocklist:
h = block(h)
# Apply global sum pooling as in SN-GAN
h = torch.sum(self.activation(h), [2, 3])
# Get initial class-unconditional output
out = self.linear(h)
adc = self.adc(h)
ac = self.ac(h)
mi = self.mi(h)
am = self.am(h)
# Get projection of final featureset onto class vectors and add to evidence
if self.projection:
out = out + torch.sum(self.embed(y) * h, 1, keepdim=True)
return out, adc, ac, mi, am
# Parallelized G_D to minimize cross-gpu communication
# Without this, Generator outputs would get all-gathered and then rebroadcast.
class G_D(nn.Module):
def __init__(self, G, D):
super(G_D, self).__init__()
self.G = G
self.D = D
def forward(self, z, gy, x=None, dy=None, train_G=False, return_G_z=False,
split_D=False):
# If training G, enable grad tape
with torch.set_grad_enabled(train_G):
# Get Generator output given noise
G_z = self.G(z, self.G.shared(gy))
# Cast as necessary
if self.G.fp16 and not self.D.fp16:
G_z = G_z.float()
if self.D.fp16 and not self.G.fp16:
G_z = G_z.half()
# Split_D means to run D once with real data and once with fake,
# rather than concatenating along the batch dimension.
if split_D:
D_fake = self.D(G_z, gy)
if x is not None:
D_real = self.D(x, dy)
return D_fake, D_real
else:
if return_G_z:
return D_fake, G_z
else:
return D_fake
# If real data is provided, concatenate it with the Generator's output
# along the batch dimension for improved efficiency.
else:
D_input = torch.cat([G_z, x], 0) if x is not None else G_z
D_class = torch.cat([gy, dy], 0) if dy is not None else gy
# Get Discriminator output
D_out, D_adc, D_ac, D_mi, D_am = self.D(D_input, D_class)
if x is not None:
return torch.split(D_out, [G_z.shape[0], x.shape[0]]), torch.split(D_adc, [G_z.shape[0], x.shape[0]]), torch.split(D_ac, [G_z.shape[0], x.shape[0]]), torch.split(D_mi, [G_z.shape[0], x.shape[0]]), torch.split(D_am, [G_z.shape[0], x.shape[0]]) # D_fake, D_real
else:
if return_G_z:
return D_out, G_z
else:
return D_out, D_adc, D_ac, D_mi, D_am
| 20,469 | 43.307359 | 267 | py |
adcgan | adcgan-main/BigGAN-PyTorch/utils.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
''' Utilities file
This file contains utility functions for bookkeeping, logging, and data loading.
Methods which directly affect training should either go in layers, the model,
or train_fns.py.
'''
from __future__ import print_function
import sys
import os
import numpy as np
import time
import datetime
import json
import pickle
from argparse import ArgumentParser
import animal_hash
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
import datasets as dset
def prepare_parser():
usage = 'Parser for all scripts.'
parser = ArgumentParser(description=usage)
### Dataset/Dataloader stuff ###
parser.add_argument(
'--dataset', type=str, default='I128_hdf5',
help='Which Dataset to train on, out of I128, I256, C10, C100;'
'Append "_hdf5" to use the hdf5 version for ISLVRC '
'(default: %(default)s)')
parser.add_argument(
'--augment', action='store_true', default=False,
help='Augment with random crops and flips (default: %(default)s)')
parser.add_argument(
'--num_workers', type=int, default=8,
help='Number of dataloader workers; consider using less for HDF5 '
'(default: %(default)s)')
parser.add_argument(
'--no_pin_memory', action='store_false', dest='pin_memory', default=True,
help='Pin data into memory through dataloader? (default: %(default)s)')
parser.add_argument(
'--shuffle', action='store_true', default=False,
help='Shuffle the data (strongly recommended)? (default: %(default)s)')
parser.add_argument(
'--load_in_mem', action='store_true', default=False,
help='Load all data into memory? (default: %(default)s)')
parser.add_argument(
'--use_multiepoch_sampler', action='store_true', default=False,
help='Use the multi-epoch sampler for dataloader? (default: %(default)s)')
### Model stuff ###
parser.add_argument(
'--model', type=str, default='BigGAN',
help='Name of the model module (default: %(default)s)')
parser.add_argument(
'--G_param', type=str, default='SN',
help='Parameterization style to use for G, spectral norm (SN) or SVD (SVD)'
' or None (default: %(default)s)')
parser.add_argument(
'--D_param', type=str, default='SN',
help='Parameterization style to use for D, spectral norm (SN) or SVD (SVD)'
' or None (default: %(default)s)')
parser.add_argument(
'--G_ch', type=int, default=64,
help='Channel multiplier for G (default: %(default)s)')
parser.add_argument(
'--D_ch', type=int, default=64,
help='Channel multiplier for D (default: %(default)s)')
parser.add_argument(
'--G_depth', type=int, default=1,
help='Number of resblocks per stage in G? (default: %(default)s)')
parser.add_argument(
'--D_depth', type=int, default=1,
help='Number of resblocks per stage in D? (default: %(default)s)')
parser.add_argument(
'--D_thin', action='store_false', dest='D_wide', default=True,
help='Use the SN-GAN channel pattern for D? (default: %(default)s)')
parser.add_argument(
'--G_shared', action='store_true', default=False,
help='Use shared embeddings in G? (default: %(default)s)')
parser.add_argument(
'--shared_dim', type=int, default=0,
help='G''s shared embedding dimensionality; if 0, will be equal to dim_z. '
'(default: %(default)s)')
parser.add_argument(
'--dim_z', type=int, default=128,
help='Noise dimensionality: %(default)s)')
parser.add_argument(
'--z_var', type=float, default=1.0,
help='Noise variance: %(default)s)')
parser.add_argument(
'--hier', action='store_true', default=False,
help='Use hierarchical z in G? (default: %(default)s)')
parser.add_argument(
'--cross_replica', action='store_true', default=False,
help='Cross_replica batchnorm in G?(default: %(default)s)')
parser.add_argument(
'--mybn', action='store_true', default=False,
help='Use my batchnorm (which supports standing stats?) %(default)s)')
parser.add_argument(
'--G_nl', type=str, default='relu',
help='Activation function for G (default: %(default)s)')
parser.add_argument(
'--D_nl', type=str, default='relu',
help='Activation function for D (default: %(default)s)')
parser.add_argument(
'--G_attn', type=str, default='64',
help='What resolutions to use attention on for G (underscore separated) '
'(default: %(default)s)')
parser.add_argument(
'--D_attn', type=str, default='64',
help='What resolutions to use attention on for D (underscore separated) '
'(default: %(default)s)')
parser.add_argument(
'--norm_style', type=str, default='bn',
help='Normalizer style for G, one of bn [batchnorm], in [instancenorm], '
'ln [layernorm], gn [groupnorm] (default: %(default)s)')
### Model init stuff ###
parser.add_argument(
'--seed', type=int, default=0,
help='Random seed to use; affects both initialization and '
' dataloading. (default: %(default)s)')
parser.add_argument(
'--G_init', type=str, default='ortho',
help='Init style to use for G (default: %(default)s)')
parser.add_argument(
'--D_init', type=str, default='ortho',
help='Init style to use for D(default: %(default)s)')
parser.add_argument(
'--skip_init', action='store_true', default=False,
help='Skip initialization, ideal for testing when ortho init was used '
'(default: %(default)s)')
### Optimizer stuff ###
parser.add_argument(
'--G_lr', type=float, default=5e-5,
help='Learning rate to use for Generator (default: %(default)s)')
parser.add_argument(
'--D_lr', type=float, default=2e-4,
help='Learning rate to use for Discriminator (default: %(default)s)')
parser.add_argument(
'--G_B1', type=float, default=0.0,
help='Beta1 to use for Generator (default: %(default)s)')
parser.add_argument(
'--D_B1', type=float, default=0.0,
help='Beta1 to use for Discriminator (default: %(default)s)')
parser.add_argument(
'--G_B2', type=float, default=0.999,
help='Beta2 to use for Generator (default: %(default)s)')
parser.add_argument(
'--D_B2', type=float, default=0.999,
help='Beta2 to use for Discriminator (default: %(default)s)')
### Batch size, parallel, and precision stuff ###
parser.add_argument(
'--batch_size', type=int, default=64,
help='Default overall batchsize (default: %(default)s)')
parser.add_argument(
'--G_batch_size', type=int, default=0,
help='Batch size to use for G; if 0, same as D (default: %(default)s)')
parser.add_argument(
'--num_G_accumulations', type=int, default=1,
help='Number of passes to accumulate G''s gradients over '
'(default: %(default)s)')
parser.add_argument(
'--num_D_steps', type=int, default=2,
help='Number of D steps per G step (default: %(default)s)')
parser.add_argument(
'--num_D_accumulations', type=int, default=1,
help='Number of passes to accumulate D''s gradients over '
'(default: %(default)s)')
parser.add_argument(
'--split_D', action='store_true', default=False,
help='Run D twice rather than concatenating inputs? (default: %(default)s)')
parser.add_argument(
'--num_epochs', type=int, default=100,
help='Number of epochs to train for (default: %(default)s)')
parser.add_argument(
'--parallel', action='store_true', default=False,
help='Train with multiple GPUs (default: %(default)s)')
parser.add_argument(
'--G_fp16', action='store_true', default=False,
help='Train with half-precision in G? (default: %(default)s)')
parser.add_argument(
'--D_fp16', action='store_true', default=False,
help='Train with half-precision in D? (default: %(default)s)')
parser.add_argument(
'--D_mixed_precision', action='store_true', default=False,
help='Train with half-precision activations but fp32 params in D? '
'(default: %(default)s)')
parser.add_argument(
'--G_mixed_precision', action='store_true', default=False,
help='Train with half-precision activations but fp32 params in G? '
'(default: %(default)s)')
parser.add_argument(
'--accumulate_stats', action='store_true', default=False,
help='Accumulate "standing" batchnorm stats? (default: %(default)s)')
parser.add_argument(
'--num_standing_accumulations', type=int, default=16,
help='Number of forward passes to use in accumulating standing stats? '
'(default: %(default)s)')
### Bookkeping stuff ###
parser.add_argument(
'--G_eval_mode', action='store_true', default=False,
help='Run G in eval mode (running/standing stats?) at sample/test time? '
'(default: %(default)s)')
parser.add_argument(
'--save_every', type=int, default=2000,
help='Save every X iterations (default: %(default)s)')
parser.add_argument(
'--num_save_copies', type=int, default=2,
help='How many copies to save (default: %(default)s)')
parser.add_argument(
'--num_best_copies', type=int, default=2,
help='How many previous best checkpoints to save (default: %(default)s)')
parser.add_argument(
'--which_best', type=str, default='FID',
help='Which metric to use to determine when to save new "best"'
'checkpoints, one of IS or FID (default: %(default)s)')
parser.add_argument(
'--no_fid', action='store_true', default=False,
help='Calculate IS only, not FID? (default: %(default)s)')
parser.add_argument(
'--test_every', type=int, default=5000,
help='Test every X iterations (default: %(default)s)')
parser.add_argument(
'--num_inception_images', type=int, default=50000,
help='Number of samples to compute inception metrics with '
'(default: %(default)s)')
parser.add_argument(
'--hashname', action='store_true', default=False,
help='Use a hash of the experiment name instead of the full config '
'(default: %(default)s)')
parser.add_argument(
'--base_root', type=str, default='',
help='Default location to store all weights, samples, data, and logs '
' (default: %(default)s)')
parser.add_argument(
'--data_root', type=str, default='data',
help='Default location where data is stored (default: %(default)s)')
parser.add_argument(
'--weights_root', type=str, default='weights',
help='Default location to store weights (default: %(default)s)')
parser.add_argument(
'--logs_root', type=str, default='logs',
help='Default location to store logs (default: %(default)s)')
parser.add_argument(
'--samples_root', type=str, default='samples',
help='Default location to store samples (default: %(default)s)')
parser.add_argument(
'--pbar', type=str, default='mine',
help='Type of progressbar to use; one of "mine" or "tqdm" '
'(default: %(default)s)')
parser.add_argument(
'--name_suffix', type=str, default='',
help='Suffix for experiment name for loading weights for sampling '
'(consider "best0") (default: %(default)s)')
parser.add_argument(
'--experiment_name', type=str, default='',
help='Optionally override the automatic experiment naming with this arg. '
'(default: %(default)s)')
parser.add_argument(
'--config_from_name', action='store_true', default=False,
help='Use a hash of the experiment name instead of the full config '
'(default: %(default)s)')
### EMA Stuff ###
parser.add_argument(
'--ema', action='store_true', default=False,
help='Keep an ema of G''s weights? (default: %(default)s)')
parser.add_argument(
'--ema_decay', type=float, default=0.9999,
help='EMA decay rate (default: %(default)s)')
parser.add_argument(
'--use_ema', action='store_true', default=False,
help='Use the EMA parameters of G for evaluation? (default: %(default)s)')
parser.add_argument(
'--ema_start', type=int, default=0,
help='When to start updating the EMA weights (default: %(default)s)')
### Numerical precision and SV stuff ###
parser.add_argument(
'--adam_eps', type=float, default=1e-8,
help='epsilon value to use for Adam (default: %(default)s)')
parser.add_argument(
'--BN_eps', type=float, default=1e-5,
help='epsilon value to use for BatchNorm (default: %(default)s)')
parser.add_argument(
'--SN_eps', type=float, default=1e-8,
help='epsilon value to use for Spectral Norm(default: %(default)s)')
parser.add_argument(
'--num_G_SVs', type=int, default=1,
help='Number of SVs to track in G (default: %(default)s)')
parser.add_argument(
'--num_D_SVs', type=int, default=1,
help='Number of SVs to track in D (default: %(default)s)')
parser.add_argument(
'--num_G_SV_itrs', type=int, default=1,
help='Number of SV itrs in G (default: %(default)s)')
parser.add_argument(
'--num_D_SV_itrs', type=int, default=1,
help='Number of SV itrs in D (default: %(default)s)')
### Ortho reg stuff ###
parser.add_argument(
'--G_ortho', type=float, default=0.0, # 1e-4 is default for BigGAN
help='Modified ortho reg coefficient in G(default: %(default)s)')
parser.add_argument(
'--D_ortho', type=float, default=0.0,
help='Modified ortho reg coefficient in D (default: %(default)s)')
parser.add_argument(
'--toggle_grads', action='store_true', default=True,
help='Toggle D and G''s "requires_grad" settings when not training them? '
' (default: %(default)s)')
### Classification stuff ###
parser.add_argument(
'--loss', type=str, default='adcgan',
help='[adcgan, pdgan, acgan, tacgan, adcpdgan]. '
'(default: %(default)s)')
parser.add_argument(
'--hinge', action='store_true', default=False,
help='Using hinge loss for classification? (default: %(default)s)')
parser.add_argument(
'--G_lambda', type=float, default=1.0,
help='Classification task lambda coefficient for G (default: %(default)s)')
parser.add_argument(
'--D_lambda', type=float, default=1.0,
help='Classification task lambda coefficient for D (default: %(default)s)')
### Which train function ###
parser.add_argument(
'--which_train_fn', type=str, default='GAN',
help='How2trainyourbois (default: %(default)s)')
### Resume training stuff
parser.add_argument(
'--load_weights', type=str, default='',
help='Suffix for which weights to load (e.g. best0, copy0) '
'(default: %(default)s)')
parser.add_argument(
'--resume', action='store_true', default=False,
help='Resume training? (default: %(default)s)')
### Log stuff ###
parser.add_argument(
'--logstyle', type=str, default='%3.3e',
help='What style to use when logging training metrics?'
'One of: %#.#f/ %#.#e (float/exp, text),'
'pickle (python pickle),'
'npz (numpy zip),'
'mat (MATLAB .mat file) (default: %(default)s)')
parser.add_argument(
'--log_G_spectra', action='store_true', default=False,
help='Log the top 3 singular values in each SN layer in G? '
'(default: %(default)s)')
parser.add_argument(
'--log_D_spectra', action='store_true', default=False,
help='Log the top 3 singular values in each SN layer in D? '
'(default: %(default)s)')
parser.add_argument(
'--sv_log_interval', type=int, default=10,
help='Iteration interval for logging singular values '
' (default: %(default)s)')
return parser
# Arguments for sample.py; not presently used in train.py
def add_sample_parser(parser):
parser.add_argument(
'--sample_npz', action='store_true', default=False,
help='Sample "sample_num_npz" images and save to npz? '
'(default: %(default)s)')
parser.add_argument(
'--sample_num_npz', type=int, default=50000,
help='Number of images to sample when sampling NPZs '
'(default: %(default)s)')
parser.add_argument(
'--sample_sheets', action='store_true', default=False,
help='Produce class-conditional sample sheets and stick them in '
'the samples root? (default: %(default)s)')
parser.add_argument(
'--sample_interps', action='store_true', default=False,
help='Produce interpolation sheets and stick them in '
'the samples root? (default: %(default)s)')
parser.add_argument(
'--sample_sheet_folder_num', type=int, default=-1,
help='Number to use for the folder for these sample sheets '
'(default: %(default)s)')
parser.add_argument(
'--sample_random', action='store_true', default=False,
help='Produce a single random sheet? (default: %(default)s)')
parser.add_argument(
'--sample_trunc_curves', type=str, default='',
help='Get inception metrics with a range of variances?'
'To use this, specify a startpoint, step, and endpoint, e.g. '
'--sample_trunc_curves 0.2_0.1_1.0 for a startpoint of 0.2, '
'endpoint of 1.0, and stepsize of 1.0. Note that this is '
'not exactly identical to using tf.truncated_normal, but should '
'have approximately the same effect. (default: %(default)s)')
parser.add_argument(
'--sample_inception_metrics', action='store_true', default=False,
help='Calculate Inception metrics with sample.py? (default: %(default)s)')
return parser
# Convenience dicts
dset_dict = {'I32': dset.ImageFolder, 'I64': dset.ImageFolder,
'I128': dset.ImageFolder, 'I256': dset.ImageFolder,
'I32_hdf5': dset.ILSVRC_HDF5, 'I64_hdf5': dset.ILSVRC_HDF5,
'I128_hdf5': dset.ILSVRC_HDF5, 'I256_hdf5': dset.ILSVRC_HDF5,
'C10': dset.CIFAR10, 'C100': dset.CIFAR100,
'TI200': dset.ImageFolder, 'TI200_valid': dset.ImageFolder}
imsize_dict = {'I32': 32, 'I32_hdf5': 32,
'I64': 64, 'I64_hdf5': 64,
'I128': 128, 'I128_hdf5': 128,
'I256': 256, 'I256_hdf5': 256,
'C10': 32, 'C100': 32,
'TI200': 64, 'TI200_valid': 64}
root_dict = {'I32': 'ImageNet', 'I32_hdf5': 'ILSVRC32.hdf5',
'I64': 'ImageNet', 'I64_hdf5': 'ILSVRC64.hdf5',
'I128': 'ImageNet', 'I128_hdf5': 'ILSVRC128.hdf5',
'I256': 'ImageNet', 'I256_hdf5': 'ILSVRC256.hdf5',
'C10': 'cifar', 'C100': 'cifar',
'TI200': 'tiny_imagenet/train', 'TI200_valid': 'tiny_imagenet/valid'}
nclass_dict = {'I32': 1000, 'I32_hdf5': 1000,
'I64': 1000, 'I64_hdf5': 1000,
'I128': 1000, 'I128_hdf5': 1000,
'I256': 1000, 'I256_hdf5': 1000,
'C10': 10, 'C100': 100,
'TI200': 200, 'TI200_valid': 200}
# Number of classes to put per sample sheet
classes_per_sheet_dict = {'I32': 50, 'I32_hdf5': 50,
'I64': 50, 'I64_hdf5': 50,
'I128': 20, 'I128_hdf5': 20,
'I256': 20, 'I256_hdf5': 20,
'C10': 10, 'C100': 100,
'TI200': 100, 'TI200_valid': 100}
activation_dict = {'inplace_relu': nn.ReLU(inplace=True),
'relu': nn.ReLU(inplace=False),
'ir': nn.ReLU(inplace=True),}
class CenterCropLongEdge(object):
"""Crops the given PIL Image on the long edge.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made.
"""
def __call__(self, img):
"""
Args:
img (PIL Image): Image to be cropped.
Returns:
PIL Image: Cropped image.
"""
return transforms.functional.center_crop(img, min(img.size))
def __repr__(self):
return self.__class__.__name__
class RandomCropLongEdge(object):
"""Crops the given PIL Image on the long edge with a random start point.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made.
"""
def __call__(self, img):
"""
Args:
img (PIL Image): Image to be cropped.
Returns:
PIL Image: Cropped image.
"""
size = (min(img.size), min(img.size))
# Only step forward along this edge if it's the long edge
i = (0 if size[0] == img.size[0]
else np.random.randint(low=0,high=img.size[0] - size[0]))
j = (0 if size[1] == img.size[1]
else np.random.randint(low=0,high=img.size[1] - size[1]))
return transforms.functional.crop(img, i, j, size[0], size[1])
def __repr__(self):
return self.__class__.__name__
# multi-epoch Dataset sampler to avoid memory leakage and enable resumption of
# training from the same sample regardless of if we stop mid-epoch
class MultiEpochSampler(torch.utils.data.Sampler):
r"""Samples elements randomly over multiple epochs
Arguments:
data_source (Dataset): dataset to sample from
num_epochs (int) : Number of times to loop over the dataset
start_itr (int) : which iteration to begin from
"""
def __init__(self, data_source, num_epochs, start_itr=0, batch_size=128):
self.data_source = data_source
self.num_samples = len(self.data_source)
self.num_epochs = num_epochs
self.start_itr = start_itr
self.batch_size = batch_size
if not isinstance(self.num_samples, int) or self.num_samples <= 0:
raise ValueError("num_samples should be a positive integeral "
"value, but got num_samples={}".format(self.num_samples))
def __iter__(self):
n = len(self.data_source)
# Determine number of epochs
num_epochs = int(np.ceil((n * self.num_epochs
- (self.start_itr * self.batch_size)) / float(n)))
# Sample all the indices, and then grab the last num_epochs index sets;
# This ensures if we're starting at epoch 4, we're still grabbing epoch 4's
# indices
out = [torch.randperm(n) for epoch in range(self.num_epochs)][-num_epochs:]
# Ignore the first start_itr % n indices of the first epoch
out[0] = out[0][(self.start_itr * self.batch_size % n):]
# if self.replacement:
# return iter(torch.randint(high=n, size=(self.num_samples,), dtype=torch.int64).tolist())
# return iter(.tolist())
output = torch.cat(out).tolist()
print('Length dataset output is %d' % len(output))
return iter(output)
def __len__(self):
return len(self.data_source) * self.num_epochs - self.start_itr * self.batch_size
# Convenience function to centralize all data loaders
def get_data_loaders(dataset, data_root=None, augment=False, batch_size=64,
num_workers=8, shuffle=True, load_in_mem=False, hdf5=False,
pin_memory=True, drop_last=True, start_itr=0,
num_epochs=500, use_multiepoch_sampler=False,
**kwargs):
# Append /FILENAME.hdf5 to root if using hdf5
data_root += '/%s' % root_dict[dataset]
print('Using dataset root location %s' % data_root)
which_dataset = dset_dict[dataset]
norm_mean = [0.5,0.5,0.5]
norm_std = [0.5,0.5,0.5]
image_size = imsize_dict[dataset]
# For image folder datasets, name of the file where we store the precomputed
# image locations to avoid having to walk the dirs every time we load.
dataset_kwargs = {'index_filename': '%s_imgs.npz' % dataset, 'train': kwargs.get('train', True)}
# HDF5 datasets have their own inbuilt transform, no need to train_transform
if 'hdf5' in dataset:
train_transform = None
else:
if augment:
print('Data will be augmented...')
if dataset in ['C10', 'C100']:
train_transform = [transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip()]
else:
train_transform = [RandomCropLongEdge(),
transforms.Resize(image_size),
transforms.RandomHorizontalFlip()]
else:
print('Data will not be augmented...')
if dataset in ['C10', 'C100']:
train_transform = []
else:
train_transform = [CenterCropLongEdge(), transforms.Resize(image_size)]
# train_transform = [transforms.Resize(image_size), transforms.CenterCrop]
train_transform = transforms.Compose(train_transform + [
transforms.ToTensor(),
transforms.Normalize(norm_mean, norm_std)])
train_set = which_dataset(root=data_root, transform=train_transform,
load_in_mem=load_in_mem, **dataset_kwargs)
# Prepare loader; the loaders list is for forward compatibility with
# using validation / test splits.
loaders = []
if use_multiepoch_sampler:
print('Using multiepoch sampler from start_itr %d...' % start_itr)
loader_kwargs = {'num_workers': num_workers, 'pin_memory': pin_memory}
sampler = MultiEpochSampler(train_set, num_epochs, start_itr, batch_size)
train_loader = DataLoader(train_set, batch_size=batch_size,
sampler=sampler, **loader_kwargs)
else:
loader_kwargs = {'num_workers': num_workers, 'pin_memory': pin_memory,
'drop_last': drop_last} # Default, drop last incomplete batch
train_loader = DataLoader(train_set, batch_size=batch_size,
shuffle=shuffle, **loader_kwargs)
loaders.append(train_loader)
return loaders
# Utility file to seed rngs
def seed_rng(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
np.random.seed(seed)
# Utility to peg all roots to a base root
# If a base root folder is provided, peg all other root folders to it.
def update_config_roots(config):
if config['base_root']:
print('Pegging all root folders to base root %s' % config['base_root'])
for key in ['data', 'weights', 'logs', 'samples']:
config['%s_root' % key] = '%s/%s' % (config['base_root'], key)
return config
# Utility to prepare root folders if they don't exist; parent folder must exist
def prepare_root(config):
for key in ['weights_root', 'logs_root', 'samples_root']:
if not os.path.exists(config[key]):
print('Making directory %s for %s...' % (config[key], key))
os.mkdir(config[key])
# Simple wrapper that applies EMA to a model. COuld be better done in 1.0 using
# the parameters() and buffers() module functions, but for now this works
# with state_dicts using .copy_
class ema(object):
def __init__(self, source, target, decay=0.9999, start_itr=0):
self.source = source
self.target = target
self.decay = decay
# Optional parameter indicating what iteration to start the decay at
self.start_itr = start_itr
# Initialize target's params to be source's
self.source_dict = self.source.state_dict()
self.target_dict = self.target.state_dict()
print('Initializing EMA parameters to be source parameters...')
with torch.no_grad():
for key in self.source_dict:
self.target_dict[key].data.copy_(self.source_dict[key].data)
# target_dict[key].data = source_dict[key].data # Doesn't work!
def update(self, itr=None):
# If an iteration counter is provided and itr is less than the start itr,
# peg the ema weights to the underlying weights.
if itr and itr < self.start_itr:
decay = 0.0
else:
decay = self.decay
with torch.no_grad():
for key in self.source_dict:
self.target_dict[key].data.copy_(self.target_dict[key].data * decay
+ self.source_dict[key].data * (1 - decay))
# Apply modified ortho reg to a model
# This function is an optimized version that directly computes the gradient,
# instead of computing and then differentiating the loss.
def ortho(model, strength=1e-4, blacklist=[]):
with torch.no_grad():
for param in model.parameters():
# Only apply this to parameters with at least 2 axes, and not in the blacklist
if len(param.shape) < 2 or any([param is item for item in blacklist]):
continue
w = param.view(param.shape[0], -1)
grad = (2 * torch.mm(torch.mm(w, w.t())
* (1. - torch.eye(w.shape[0], device=w.device)), w))
param.grad.data += strength * grad.view(param.shape)
# Default ortho reg
# This function is an optimized version that directly computes the gradient,
# instead of computing and then differentiating the loss.
def default_ortho(model, strength=1e-4, blacklist=[]):
with torch.no_grad():
for param in model.parameters():
# Only apply this to parameters with at least 2 axes & not in blacklist
if len(param.shape) < 2 or param in blacklist:
continue
w = param.view(param.shape[0], -1)
grad = (2 * torch.mm(torch.mm(w, w.t())
- torch.eye(w.shape[0], device=w.device), w))
param.grad.data += strength * grad.view(param.shape)
# Convenience utility to switch off requires_grad
def toggle_grad(model, on_or_off):
for param in model.parameters():
param.requires_grad = on_or_off
# Function to join strings or ignore them
# Base string is the string to link "strings," while strings
# is a list of strings or Nones.
def join_strings(base_string, strings):
return base_string.join([item for item in strings if item])
# Save a model's weights, optimizer, and the state_dict
def save_weights(G, D, state_dict, weights_root, experiment_name,
name_suffix=None, G_ema=None):
root = '/'.join([weights_root, experiment_name])
if not os.path.exists(root):
os.mkdir(root)
if name_suffix:
print('Saving weights to %s/%s...' % (root, name_suffix))
else:
print('Saving weights to %s...' % root)
torch.save(G.state_dict(),
'%s/%s.pth' % (root, join_strings('_', ['G', name_suffix])))
torch.save(G.optim.state_dict(),
'%s/%s.pth' % (root, join_strings('_', ['G_optim', name_suffix])))
torch.save(D.state_dict(),
'%s/%s.pth' % (root, join_strings('_', ['D', name_suffix])))
torch.save(D.optim.state_dict(),
'%s/%s.pth' % (root, join_strings('_', ['D_optim', name_suffix])))
torch.save(state_dict,
'%s/%s.pth' % (root, join_strings('_', ['state_dict', name_suffix])))
if G_ema is not None:
torch.save(G_ema.state_dict(),
'%s/%s.pth' % (root, join_strings('_', ['G_ema', name_suffix])))
# Load a model's weights, optimizer, and the state_dict
def load_weights(G, D, state_dict, weights_root, experiment_name,
name_suffix=None, G_ema=None, strict=True, load_optim=True):
root = '/'.join([weights_root, experiment_name])
if name_suffix:
print('Loading %s weights from %s...' % (name_suffix, root))
else:
print('Loading weights from %s...' % root)
if G is not None:
G.load_state_dict(
torch.load('%s/%s.pth' % (root, join_strings('_', ['G', name_suffix]))),
strict=strict)
if load_optim:
G.optim.load_state_dict(
torch.load('%s/%s.pth' % (root, join_strings('_', ['G_optim', name_suffix]))))
if D is not None:
D.load_state_dict(
torch.load('%s/%s.pth' % (root, join_strings('_', ['D', name_suffix]))),
strict=strict)
if load_optim:
D.optim.load_state_dict(
torch.load('%s/%s.pth' % (root, join_strings('_', ['D_optim', name_suffix]))))
# Load state dict
for item in state_dict:
state_dict[item] = torch.load('%s/%s.pth' % (root, join_strings('_', ['state_dict', name_suffix])))[item]
if G_ema is not None:
G_ema.load_state_dict(
torch.load('%s/%s.pth' % (root, join_strings('_', ['G_ema', name_suffix]))),
strict=strict)
''' MetricsLogger originally stolen from VoxNet source code.
Used for logging inception metrics'''
class MetricsLogger(object):
def __init__(self, fname, reinitialize=False):
self.fname = fname
self.reinitialize = reinitialize
if os.path.exists(self.fname):
if self.reinitialize:
print('{} exists, deleting...'.format(self.fname))
os.remove(self.fname)
def log(self, record=None, **kwargs):
"""
Assumption: no newlines in the input.
"""
if record is None:
record = {}
record.update(kwargs)
record['_stamp'] = time.time()
with open(self.fname, 'a') as f:
f.write(json.dumps(record, ensure_ascii=True) + '\n')
# Logstyle is either:
# '%#.#f' for floating point representation in text
# '%#.#e' for exponent representation in text
# 'npz' for output to npz # NOT YET SUPPORTED
# 'pickle' for output to a python pickle # NOT YET SUPPORTED
# 'mat' for output to a MATLAB .mat file # NOT YET SUPPORTED
class MyLogger(object):
def __init__(self, fname, reinitialize=False, logstyle='%3.3f'):
self.root = fname
if not os.path.exists(self.root):
os.mkdir(self.root)
self.reinitialize = reinitialize
self.metrics = []
self.logstyle = logstyle # One of '%3.3f' or like '%3.3e'
# Delete log if re-starting and log already exists
def reinit(self, item):
if os.path.exists('%s/%s.log' % (self.root, item)):
if self.reinitialize:
# Only print the removal mess
if 'sv' in item :
if not any('sv' in item for item in self.metrics):
print('Deleting singular value logs...')
else:
print('{} exists, deleting...'.format('%s_%s.log' % (self.root, item)))
os.remove('%s/%s.log' % (self.root, item))
# Log in plaintext; this is designed for being read in MATLAB(sorry not sorry)
def log(self, itr, **kwargs):
for arg in kwargs:
if arg not in self.metrics:
if self.reinitialize:
self.reinit(arg)
self.metrics += [arg]
if self.logstyle == 'pickle':
print('Pickle not currently supported...')
# with open('%s/%s.log' % (self.root, arg), 'a') as f:
# pickle.dump(kwargs[arg], f)
elif self.logstyle == 'mat':
print('.mat logstyle not currently supported...')
else:
with open('%s/%s.log' % (self.root, arg), 'a') as f:
f.write('%d: %s\n' % (itr, self.logstyle % kwargs[arg]))
# Write some metadata to the logs directory
def write_metadata(logs_root, experiment_name, config, state_dict):
with open(('%s/%s/metalog.txt' %
(logs_root, experiment_name)), 'w') as writefile:
writefile.write('datetime: %s\n' % str(datetime.datetime.now()))
writefile.write('config: %s\n' % str(config))
writefile.write('state: %s\n' %str(state_dict))
"""
Very basic progress indicator to wrap an iterable in.
Author: Jan Schlüter
Andy's adds: time elapsed in addition to ETA, makes it possible to add
estimated time to 1k iters instead of estimated time to completion.
"""
def progress(items, desc='', total=None, min_delay=0.1, displaytype='s1k'):
"""
Returns a generator over `items`, printing the number and percentage of
items processed and the estimated remaining processing time before yielding
the next item. `total` gives the total number of items (required if `items`
has no length), and `min_delay` gives the minimum time in seconds between
subsequent prints. `desc` gives an optional prefix text (end with a space).
"""
total = total or len(items)
t_start = time.time()
t_last = 0
for n, item in enumerate(items):
t_now = time.time()
if t_now - t_last > min_delay:
print("\r%s%d/%d (%6.2f%%)" % (
desc, n+1, total, n / float(total) * 100), end=" ")
if n > 0:
if displaytype == 's1k': # minutes/seconds for 1000 iters
next_1000 = n + (1000 - n%1000)
t_done = t_now - t_start
t_1k = t_done / n * next_1000
outlist = list(divmod(t_done, 60)) + list(divmod(t_1k - t_done, 60))
print("(TE/ET1k: %d:%02d / %d:%02d)" % tuple(outlist), end=" ")
else:# displaytype == 'eta':
t_done = t_now - t_start
t_total = t_done / n * total
outlist = list(divmod(t_done, 60)) + list(divmod(t_total - t_done, 60))
print("(TE/ETA: %d:%02d / %d:%02d)" % tuple(outlist), end=" ")
sys.stdout.flush()
t_last = t_now
yield item
t_total = time.time() - t_start
print("\r%s%d/%d (100.00%%) (took %d:%02d)" % ((desc, total, total) +
divmod(t_total, 60)))
# Sample function for use with inception metrics
def sample(G, z_, y_, config):
with torch.no_grad():
z_.sample_()
y_.sample_()
if config['parallel']:
G_z = nn.parallel.data_parallel(G, (z_, G.shared(y_)))
else:
G_z = G(z_, G.shared(y_))
return G_z, y_
# Sample function for sample sheets
def sample_sheet(G, classes_per_sheet, num_classes, samples_per_class, parallel,
samples_root, experiment_name, folder_number, z_=None):
# Prepare sample directory
if not os.path.isdir('%s/%s' % (samples_root, experiment_name)):
os.mkdir('%s/%s' % (samples_root, experiment_name))
if not os.path.isdir('%s/%s/%d' % (samples_root, experiment_name, folder_number)):
os.mkdir('%s/%s/%d' % (samples_root, experiment_name, folder_number))
# loop over total number of sheets
for i in range(num_classes // classes_per_sheet):
ims = []
y = torch.arange(i * classes_per_sheet, (i + 1) * classes_per_sheet, device='cuda')
for j in range(samples_per_class):
if (z_ is not None) and hasattr(z_, 'sample_') and classes_per_sheet <= z_.size(0):
z_.sample_()
else:
z_ = torch.randn(classes_per_sheet, G.dim_z, device='cuda')
with torch.no_grad():
if parallel:
o = nn.parallel.data_parallel(G, (z_[:classes_per_sheet], G.shared(y)))
else:
o = G(z_[:classes_per_sheet], G.shared(y))
ims += [o.data.cpu()]
# This line should properly unroll the images
out_ims = torch.stack(ims, 1).view(-1, ims[0].shape[1], ims[0].shape[2],
ims[0].shape[3]).data.float().cpu()
# The path for the samples
image_filename = '%s/%s/%d/samples%d.jpg' % (samples_root, experiment_name,
folder_number, i)
torchvision.utils.save_image(out_ims, image_filename,
nrow=samples_per_class, normalize=True)
# Interp function; expects x0 and x1 to be of shape (shape0, 1, rest_of_shape..)
def interp(x0, x1, num_midpoints):
lerp = torch.linspace(0, 1.0, num_midpoints + 2, device='cuda').to(x0.dtype)
return ((x0 * (1 - lerp.view(1, -1, 1))) + (x1 * lerp.view(1, -1, 1)))
# interp sheet function
# Supports full, class-wise and intra-class interpolation
def interp_sheet(G, num_per_sheet, num_midpoints, num_classes, parallel,
samples_root, experiment_name, folder_number, sheet_number=0,
fix_z=False, fix_y=False, device='cuda'):
# Prepare zs and ys
if fix_z: # If fix Z, only sample 1 z per row
zs = torch.randn(num_per_sheet, 1, G.dim_z, device=device)
zs = zs.repeat(1, num_midpoints + 2, 1).view(-1, G.dim_z)
else:
zs = interp(torch.randn(num_per_sheet, 1, G.dim_z, device=device),
torch.randn(num_per_sheet, 1, G.dim_z, device=device),
num_midpoints).view(-1, G.dim_z)
if fix_y: # If fix y, only sample 1 z per row
ys = sample_1hot(num_per_sheet, num_classes)
ys = G.shared(ys).view(num_per_sheet, 1, -1)
ys = ys.repeat(1, num_midpoints + 2, 1).view(num_per_sheet * (num_midpoints + 2), -1)
else:
ys = interp(G.shared(sample_1hot(num_per_sheet, num_classes)).view(num_per_sheet, 1, -1),
G.shared(sample_1hot(num_per_sheet, num_classes)).view(num_per_sheet, 1, -1),
num_midpoints).view(num_per_sheet * (num_midpoints + 2), -1)
# Run the net--note that we've already passed y through G.shared.
if G.fp16:
zs = zs.half()
with torch.no_grad():
if parallel:
out_ims = nn.parallel.data_parallel(G, (zs, ys)).data.cpu()
else:
out_ims = G(zs, ys).data.cpu()
interp_style = '' + ('Z' if not fix_z else '') + ('Y' if not fix_y else '')
image_filename = '%s/%s/%d/interp%s%d.jpg' % (samples_root, experiment_name,
folder_number, interp_style,
sheet_number)
torchvision.utils.save_image(out_ims, image_filename,
nrow=num_midpoints + 2, normalize=True)
# Convenience debugging function to print out gradnorms and shape from each layer
# May need to rewrite this so we can actually see which parameter is which
def print_grad_norms(net):
gradsums = [[float(torch.norm(param.grad).item()),
float(torch.norm(param).item()), param.shape]
for param in net.parameters()]
order = np.argsort([item[0] for item in gradsums])
print(['%3.3e,%3.3e, %s' % (gradsums[item_index][0],
gradsums[item_index][1],
str(gradsums[item_index][2]))
for item_index in order])
# Get singular values to log. This will use the state dict to find them
# and substitute underscores for dots.
def get_SVs(net, prefix):
d = net.state_dict()
return {('%s_%s' % (prefix, key)).replace('.', '_') :
float(d[key].item())
for key in d if 'sv' in key}
# Name an experiment based on its config
def name_from_config(config):
name = '_'.join([
item for item in [
'Big%s' % config['which_train_fn'],
config['dataset'],
config['model'] if config['model'] != 'BigGAN' else None,
'seed%d' % config['seed'],
'Gch%d' % config['G_ch'],
'Dch%d' % config['D_ch'],
'Gd%d' % config['G_depth'] if config['G_depth'] > 1 else None,
'Dd%d' % config['D_depth'] if config['D_depth'] > 1 else None,
'bs%d' % config['batch_size'],
'Gfp16' if config['G_fp16'] else None,
'Dfp16' if config['D_fp16'] else None,
'nDs%d' % config['num_D_steps'] if config['num_D_steps'] > 1 else None,
'nDa%d' % config['num_D_accumulations'] if config['num_D_accumulations'] > 1 else None,
'nGa%d' % config['num_G_accumulations'] if config['num_G_accumulations'] > 1 else None,
'Glr%2.1e' % config['G_lr'],
'Dlr%2.1e' % config['D_lr'],
'GB%3.3f' % config['G_B1'] if config['G_B1'] !=0.0 else None,
'GBB%3.3f' % config['G_B2'] if config['G_B2'] !=0.999 else None,
'DB%3.3f' % config['D_B1'] if config['D_B1'] !=0.0 else None,
'DBB%3.3f' % config['D_B2'] if config['D_B2'] !=0.999 else None,
'Gnl%s' % config['G_nl'],
'Dnl%s' % config['D_nl'],
'Ginit%s' % config['G_init'],
'Dinit%s' % config['D_init'],
'G%s' % config['G_param'] if config['G_param'] != 'SN' else None,
'D%s' % config['D_param'] if config['D_param'] != 'SN' else None,
'Gattn%s' % config['G_attn'] if config['G_attn'] != '0' else None,
'Dattn%s' % config['D_attn'] if config['D_attn'] != '0' else None,
'Gortho%2.1e' % config['G_ortho'] if config['G_ortho'] > 0.0 else None,
'Dortho%2.1e' % config['D_ortho'] if config['D_ortho'] > 0.0 else None,
config['norm_style'] if config['norm_style'] != 'bn' else None,
'cr' if config['cross_replica'] else None,
'Gshared' if config['G_shared'] else None,
'hier' if config['hier'] else None,
'ema' if config['ema'] else None,
config['name_suffix'] if config['name_suffix'] else None,
]
if item is not None])
# dogball
if config['hashname']:
return hashname(name)
else:
return name
# A simple function to produce a unique experiment name from the animal hashes.
def hashname(name):
h = hash(name)
a = h % len(animal_hash.a)
h = h // len(animal_hash.a)
b = h % len(animal_hash.b)
h = h // len(animal_hash.c)
c = h % len(animal_hash.c)
return animal_hash.a[a] + animal_hash.b[b] + animal_hash.c[c]
# Get GPU memory, -i is the index
def query_gpu(indices):
os.system('nvidia-smi -i 0 --query-gpu=memory.free --format=csv')
# Convenience function to count the number of parameters in a module
def count_parameters(module):
print('Number of parameters: {}'.format(
sum([p.data.nelement() for p in module.parameters()])))
# Convenience function to sample an index, not actually a 1-hot
def sample_1hot(batch_size, num_classes, device='cuda'):
return torch.randint(low=0, high=num_classes, size=(batch_size,),
device=device, dtype=torch.int64, requires_grad=False)
# A highly simplified convenience class for sampling from distributions
# One could also use PyTorch's inbuilt distributions package.
# Note that this class requires initialization to proceed as
# x = Distribution(torch.randn(size))
# x.init_distribution(dist_type, **dist_kwargs)
# x = x.to(device,dtype)
# This is partially based on https://discuss.pytorch.org/t/subclassing-torch-tensor/23754/2
class Distribution(torch.Tensor):
# Init the params of the distribution
def init_distribution(self, dist_type, **kwargs):
self.dist_type = dist_type
self.dist_kwargs = kwargs
if self.dist_type == 'normal':
self.mean, self.var = kwargs['mean'], kwargs['var']
elif self.dist_type == 'categorical':
self.num_categories = kwargs['num_categories']
self.label = kwargs.get('label', None)
def sample_(self):
if self.dist_type == 'normal':
self.normal_(self.mean, self.var)
elif self.dist_type == 'categorical':
if self.label is not None:
self.random_(self.label, self.label+1)
else:
self.random_(0, self.num_categories)
# return self.variable
# Silly hack: overwrite the to() method to wrap the new object
# in a distribution as well
def to(self, *args, **kwargs):
new_obj = Distribution(self)
new_obj.init_distribution(self.dist_type, **self.dist_kwargs)
new_obj.data = super().to(*args, **kwargs)
return new_obj
# Convenience function to prepare a z and y vector
def prepare_z_y(G_batch_size, dim_z, nclasses, device='cuda',
fp16=False,z_var=1.0, label=None):
z_ = Distribution(torch.randn(G_batch_size, dim_z, requires_grad=False))
z_.init_distribution('normal', mean=0, var=z_var)
z_ = z_.to(device,torch.float16 if fp16 else torch.float32)
if fp16:
z_ = z_.half()
y_ = Distribution(torch.zeros(G_batch_size, requires_grad=False))
y_.init_distribution('categorical',num_categories=nclasses, label=label)
y_ = y_.to(device, torch.int64)
return z_, y_
def initiate_standing_stats(net):
for module in net.modules():
if hasattr(module, 'accumulate_standing'):
module.reset_stats()
module.accumulate_standing = True
def accumulate_standing_stats(net, z, y, nclasses, num_accumulations=16):
initiate_standing_stats(net)
net.train()
for i in range(num_accumulations):
with torch.no_grad():
z.normal_()
y.random_(0, nclasses)
x = net(z, net.shared(y)) # No need to parallelize here unless using syncbn
# Set to eval mode
net.eval()
# This version of Adam keeps an fp32 copy of the parameters and
# does all of the parameter updates in fp32, while still doing the
# forwards and backwards passes using fp16 (i.e. fp16 copies of the
# parameters and fp16 activations).
#
# Note that this calls .float().cuda() on the params.
import math
from torch.optim.optimizer import Optimizer
class Adam16(Optimizer):
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,weight_decay=0):
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay)
params = list(params)
super(Adam16, self).__init__(params, defaults)
# Safety modification to make sure we floatify our state
def load_state_dict(self, state_dict):
super(Adam16, self).load_state_dict(state_dict)
for group in self.param_groups:
for p in group['params']:
self.state[p]['exp_avg'] = self.state[p]['exp_avg'].float()
self.state[p]['exp_avg_sq'] = self.state[p]['exp_avg_sq'].float()
self.state[p]['fp32_p'] = self.state[p]['fp32_p'].float()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = grad.new().resize_as_(grad).zero_()
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = grad.new().resize_as_(grad).zero_()
# Fp32 copy of the weights
state['fp32_p'] = p.data.float()
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
if group['weight_decay'] != 0:
grad = grad.add(group['weight_decay'], state['fp32_p'])
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1
state['fp32_p'].addcdiv_(-step_size, exp_avg, denom)
p.data = state['fp32_p'].half()
return loss
| 49,789 | 39.878489 | 109 | py |
adcgan | adcgan-main/BigGAN-PyTorch/layers.py | ''' Layers
This file contains various layers for the BigGAN models.
'''
import numpy as np
import torch
import torch.nn as nn
from torch.nn import init
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter as P
from sync_batchnorm import SynchronizedBatchNorm2d as SyncBN2d
# Projection of x onto y
def proj(x, y):
return torch.mm(y, x.t()) * y / torch.mm(y, y.t())
# Orthogonalize x wrt list of vectors ys
def gram_schmidt(x, ys):
for y in ys:
x = x - proj(x, y)
return x
# Apply num_itrs steps of the power method to estimate top N singular values.
def power_iteration(W, u_, update=True, eps=1e-12):
# Lists holding singular vectors and values
us, vs, svs = [], [], []
for i, u in enumerate(u_):
# Run one step of the power iteration
with torch.no_grad():
v = torch.matmul(u, W)
# Run Gram-Schmidt to subtract components of all other singular vectors
v = F.normalize(gram_schmidt(v, vs), eps=eps)
# Add to the list
vs += [v]
# Update the other singular vector
u = torch.matmul(v, W.t())
# Run Gram-Schmidt to subtract components of all other singular vectors
u = F.normalize(gram_schmidt(u, us), eps=eps)
# Add to the list
us += [u]
if update:
u_[i][:] = u
# Compute this singular value and add it to the list
svs += [torch.squeeze(torch.matmul(torch.matmul(v, W.t()), u.t()))]
#svs += [torch.sum(F.linear(u, W.transpose(0, 1)) * v)]
return svs, us, vs
# Convenience passthrough function
class identity(nn.Module):
def forward(self, input):
return input
# Spectral normalization base class
class SN(object):
def __init__(self, num_svs, num_itrs, num_outputs, transpose=False, eps=1e-12):
# Number of power iterations per step
self.num_itrs = num_itrs
# Number of singular values
self.num_svs = num_svs
# Transposed?
self.transpose = transpose
# Epsilon value for avoiding divide-by-0
self.eps = eps
# Register a singular vector for each sv
for i in range(self.num_svs):
self.register_buffer('u%d' % i, torch.randn(1, num_outputs))
self.register_buffer('sv%d' % i, torch.ones(1))
# Singular vectors (u side)
@property
def u(self):
return [getattr(self, 'u%d' % i) for i in range(self.num_svs)]
# Singular values;
# note that these buffers are just for logging and are not used in training.
@property
def sv(self):
return [getattr(self, 'sv%d' % i) for i in range(self.num_svs)]
# Compute the spectrally-normalized weight
def W_(self):
W_mat = self.weight.view(self.weight.size(0), -1)
if self.transpose:
W_mat = W_mat.t()
# Apply num_itrs power iterations
for _ in range(self.num_itrs):
svs, us, vs = power_iteration(W_mat, self.u, update=self.training, eps=self.eps)
# Update the svs
if self.training:
with torch.no_grad(): # Make sure to do this in a no_grad() context or you'll get memory leaks!
for i, sv in enumerate(svs):
self.sv[i][:] = sv
return self.weight / svs[0]
# 2D Conv layer with spectral norm
class SNConv2d(nn.Conv2d, SN):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True,
num_svs=1, num_itrs=1, eps=1e-12):
nn.Conv2d.__init__(self, in_channels, out_channels, kernel_size, stride,
padding, dilation, groups, bias)
SN.__init__(self, num_svs, num_itrs, out_channels, eps=eps)
def forward(self, x):
return F.conv2d(x, self.W_(), self.bias, self.stride,
self.padding, self.dilation, self.groups)
# Linear layer with spectral norm
class SNLinear(nn.Linear, SN):
def __init__(self, in_features, out_features, bias=True,
num_svs=1, num_itrs=1, eps=1e-12):
nn.Linear.__init__(self, in_features, out_features, bias)
SN.__init__(self, num_svs, num_itrs, out_features, eps=eps)
def forward(self, x):
return F.linear(x, self.W_(), self.bias)
# Embedding layer with spectral norm
# We use num_embeddings as the dim instead of embedding_dim here
# for convenience sake
class SNEmbedding(nn.Embedding, SN):
def __init__(self, num_embeddings, embedding_dim, padding_idx=None,
max_norm=None, norm_type=2, scale_grad_by_freq=False,
sparse=False, _weight=None,
num_svs=1, num_itrs=1, eps=1e-12):
nn.Embedding.__init__(self, num_embeddings, embedding_dim, padding_idx,
max_norm, norm_type, scale_grad_by_freq,
sparse, _weight)
SN.__init__(self, num_svs, num_itrs, num_embeddings, eps=eps)
def forward(self, x):
return F.embedding(x, self.W_())
# A non-local block as used in SA-GAN
# Note that the implementation as described in the paper is largely incorrect;
# refer to the released code for the actual implementation.
class Attention(nn.Module):
def __init__(self, ch, which_conv=SNConv2d, name='attention'):
super(Attention, self).__init__()
# Channel multiplier
self.ch = ch
self.which_conv = which_conv
self.theta = self.which_conv(self.ch, self.ch // 8, kernel_size=1, padding=0, bias=False)
self.phi = self.which_conv(self.ch, self.ch // 8, kernel_size=1, padding=0, bias=False)
self.g = self.which_conv(self.ch, self.ch // 2, kernel_size=1, padding=0, bias=False)
self.o = self.which_conv(self.ch // 2, self.ch, kernel_size=1, padding=0, bias=False)
# Learnable gain parameter
self.gamma = P(torch.tensor(0.), requires_grad=True)
def forward(self, x, y=None):
# Apply convs
theta = self.theta(x)
phi = F.max_pool2d(self.phi(x), [2,2])
g = F.max_pool2d(self.g(x), [2,2])
# Perform reshapes
theta = theta.view(-1, self. ch // 8, x.shape[2] * x.shape[3])
phi = phi.view(-1, self. ch // 8, x.shape[2] * x.shape[3] // 4)
g = g.view(-1, self. ch // 2, x.shape[2] * x.shape[3] // 4)
# Matmul and softmax to get attention maps
beta = F.softmax(torch.bmm(theta.transpose(1, 2), phi), -1)
# Attention map times g path
o = self.o(torch.bmm(g, beta.transpose(1,2)).view(-1, self.ch // 2, x.shape[2], x.shape[3]))
return self.gamma * o + x
# Fused batchnorm op
def fused_bn(x, mean, var, gain=None, bias=None, eps=1e-5):
# Apply scale and shift--if gain and bias are provided, fuse them here
# Prepare scale
scale = torch.rsqrt(var + eps)
# If a gain is provided, use it
if gain is not None:
scale = scale * gain
# Prepare shift
shift = mean * scale
# If bias is provided, use it
if bias is not None:
shift = shift - bias
return x * scale - shift
#return ((x - mean) / ((var + eps) ** 0.5)) * gain + bias # The unfused way.
# Manual BN
# Calculate means and variances using mean-of-squares minus mean-squared
def manual_bn(x, gain=None, bias=None, return_mean_var=False, eps=1e-5):
# Cast x to float32 if necessary
float_x = x.float()
# Calculate expected value of x (m) and expected value of x**2 (m2)
# Mean of x
m = torch.mean(float_x, [0, 2, 3], keepdim=True)
# Mean of x squared
m2 = torch.mean(float_x ** 2, [0, 2, 3], keepdim=True)
# Calculate variance as mean of squared minus mean squared.
var = (m2 - m **2)
# Cast back to float 16 if necessary
var = var.type(x.type())
m = m.type(x.type())
# Return mean and variance for updating stored mean/var if requested
if return_mean_var:
return fused_bn(x, m, var, gain, bias, eps), m.squeeze(), var.squeeze()
else:
return fused_bn(x, m, var, gain, bias, eps)
# My batchnorm, supports standing stats
class myBN(nn.Module):
def __init__(self, num_channels, eps=1e-5, momentum=0.1):
super(myBN, self).__init__()
# momentum for updating running stats
self.momentum = momentum
# epsilon to avoid dividing by 0
self.eps = eps
# Momentum
self.momentum = momentum
# Register buffers
self.register_buffer('stored_mean', torch.zeros(num_channels))
self.register_buffer('stored_var', torch.ones(num_channels))
self.register_buffer('accumulation_counter', torch.zeros(1))
# Accumulate running means and vars
self.accumulate_standing = False
# reset standing stats
def reset_stats(self):
self.stored_mean[:] = 0
self.stored_var[:] = 0
self.accumulation_counter[:] = 0
def forward(self, x, gain, bias):
if self.training:
out, mean, var = manual_bn(x, gain, bias, return_mean_var=True, eps=self.eps)
# If accumulating standing stats, increment them
if self.accumulate_standing:
self.stored_mean[:] = self.stored_mean + mean.data
self.stored_var[:] = self.stored_var + var.data
self.accumulation_counter += 1.0
# If not accumulating standing stats, take running averages
else:
self.stored_mean[:] = self.stored_mean * (1 - self.momentum) + mean * self.momentum
self.stored_var[:] = self.stored_var * (1 - self.momentum) + var * self.momentum
return out
# If not in training mode, use the stored statistics
else:
mean = self.stored_mean.view(1, -1, 1, 1)
var = self.stored_var.view(1, -1, 1, 1)
# If using standing stats, divide them by the accumulation counter
if self.accumulate_standing:
mean = mean / self.accumulation_counter
var = var / self.accumulation_counter
return fused_bn(x, mean, var, gain, bias, self.eps)
# Simple function to handle groupnorm norm stylization
def groupnorm(x, norm_style):
# If number of channels specified in norm_style:
if 'ch' in norm_style:
ch = int(norm_style.split('_')[-1])
groups = max(int(x.shape[1]) // ch, 1)
# If number of groups specified in norm style
elif 'grp' in norm_style:
groups = int(norm_style.split('_')[-1])
# If neither, default to groups = 16
else:
groups = 16
return F.group_norm(x, groups)
# Class-conditional bn
# output size is the number of channels, input size is for the linear layers
# Andy's Note: this class feels messy but I'm not really sure how to clean it up
# Suggestions welcome! (By which I mean, refactor this and make a pull request
# if you want to make this more readable/usable).
class ccbn(nn.Module):
def __init__(self, output_size, input_size, which_linear, eps=1e-5, momentum=0.1,
cross_replica=False, mybn=False, norm_style='bn',):
super(ccbn, self).__init__()
self.output_size, self.input_size = output_size, input_size
# Prepare gain and bias layers
self.gain = which_linear(input_size, output_size)
self.bias = which_linear(input_size, output_size)
# epsilon to avoid dividing by 0
self.eps = eps
# Momentum
self.momentum = momentum
# Use cross-replica batchnorm?
self.cross_replica = cross_replica
# Use my batchnorm?
self.mybn = mybn
# Norm style?
self.norm_style = norm_style
if self.cross_replica:
self.bn = SyncBN2d(output_size, eps=self.eps, momentum=self.momentum, affine=False)
elif self.mybn:
self.bn = myBN(output_size, self.eps, self.momentum)
elif self.norm_style in ['bn', 'in']:
self.register_buffer('stored_mean', torch.zeros(output_size))
self.register_buffer('stored_var', torch.ones(output_size))
def forward(self, x, y):
# Calculate class-conditional gains and biases
gain = (1 + self.gain(y)).view(y.size(0), -1, 1, 1)
bias = self.bias(y).view(y.size(0), -1, 1, 1)
# If using my batchnorm
if self.mybn or self.cross_replica:
return self.bn(x, gain=gain, bias=bias)
# else:
else:
if self.norm_style == 'bn':
out = F.batch_norm(x, self.stored_mean, self.stored_var, None, None,
self.training, 0.1, self.eps)
elif self.norm_style == 'in':
out = F.instance_norm(x, self.stored_mean, self.stored_var, None, None,
self.training, 0.1, self.eps)
elif self.norm_style == 'gn':
out = groupnorm(x, self.normstyle)
elif self.norm_style == 'nonorm':
out = x
return out * gain + bias
def extra_repr(self):
s = 'out: {output_size}, in: {input_size},'
s +=' cross_replica={cross_replica}'
return s.format(**self.__dict__)
# Normal, non-class-conditional BN
class bn(nn.Module):
def __init__(self, output_size, eps=1e-5, momentum=0.1,
cross_replica=False, mybn=False):
super(bn, self).__init__()
self.output_size= output_size
# Prepare gain and bias layers
self.gain = P(torch.ones(output_size), requires_grad=True)
self.bias = P(torch.zeros(output_size), requires_grad=True)
# epsilon to avoid dividing by 0
self.eps = eps
# Momentum
self.momentum = momentum
# Use cross-replica batchnorm?
self.cross_replica = cross_replica
# Use my batchnorm?
self.mybn = mybn
if self.cross_replica:
self.bn = SyncBN2d(output_size, eps=self.eps, momentum=self.momentum, affine=False)
elif mybn:
self.bn = myBN(output_size, self.eps, self.momentum)
# Register buffers if neither of the above
else:
self.register_buffer('stored_mean', torch.zeros(output_size))
self.register_buffer('stored_var', torch.ones(output_size))
def forward(self, x, y=None):
if self.cross_replica or self.mybn:
gain = self.gain.view(1,-1,1,1)
bias = self.bias.view(1,-1,1,1)
return self.bn(x, gain=gain, bias=bias)
else:
return F.batch_norm(x, self.stored_mean, self.stored_var, self.gain,
self.bias, self.training, self.momentum, self.eps)
# Generator blocks
# Note that this class assumes the kernel size and padding (and any other
# settings) have been selected in the main generator module and passed in
# through the which_conv arg. Similar rules apply with which_bn (the input
# size [which is actually the number of channels of the conditional info] must
# be preselected)
class GBlock(nn.Module):
def __init__(self, in_channels, out_channels,
which_conv=nn.Conv2d, which_bn=bn, activation=None,
upsample=None):
super(GBlock, self).__init__()
self.in_channels, self.out_channels = in_channels, out_channels
self.which_conv, self.which_bn = which_conv, which_bn
self.activation = activation
self.upsample = upsample
# Conv layers
self.conv1 = self.which_conv(self.in_channels, self.out_channels)
self.conv2 = self.which_conv(self.out_channels, self.out_channels)
self.learnable_sc = in_channels != out_channels or upsample
if self.learnable_sc:
self.conv_sc = self.which_conv(in_channels, out_channels,
kernel_size=1, padding=0)
# Batchnorm layers
self.bn1 = self.which_bn(in_channels)
self.bn2 = self.which_bn(out_channels)
# upsample layers
self.upsample = upsample
def forward(self, x, y):
h = self.activation(self.bn1(x, y))
if self.upsample:
h = self.upsample(h)
x = self.upsample(x)
h = self.conv1(h)
h = self.activation(self.bn2(h, y))
h = self.conv2(h)
if self.learnable_sc:
x = self.conv_sc(x)
return h + x
# Residual block for the discriminator
class DBlock(nn.Module):
def __init__(self, in_channels, out_channels, which_conv=SNConv2d, wide=True,
preactivation=False, activation=None, downsample=None,):
super(DBlock, self).__init__()
self.in_channels, self.out_channels = in_channels, out_channels
# If using wide D (as in SA-GAN and BigGAN), change the channel pattern
self.hidden_channels = self.out_channels if wide else self.in_channels
self.which_conv = which_conv
self.preactivation = preactivation
self.activation = activation
self.downsample = downsample
# Conv layers
self.conv1 = self.which_conv(self.in_channels, self.hidden_channels)
self.conv2 = self.which_conv(self.hidden_channels, self.out_channels)
self.learnable_sc = True if (in_channels != out_channels) or downsample else False
if self.learnable_sc:
self.conv_sc = self.which_conv(in_channels, out_channels,
kernel_size=1, padding=0)
def shortcut(self, x):
if self.preactivation:
if self.learnable_sc:
x = self.conv_sc(x)
if self.downsample:
x = self.downsample(x)
else:
if self.downsample:
x = self.downsample(x)
if self.learnable_sc:
x = self.conv_sc(x)
return x
def forward(self, x):
if self.preactivation:
# h = self.activation(x) # NOT TODAY SATAN
# Andy's note: This line *must* be an out-of-place ReLU or it
# will negatively affect the shortcut connection.
h = F.relu(x)
else:
h = x
h = self.conv1(h)
h = self.conv2(self.activation(h))
if self.downsample:
h = self.downsample(h)
return h + self.shortcut(x)
# dogball | 17,130 | 36.32244 | 101 | py |
adcgan | adcgan-main/BigGAN-PyTorch/datasets.py | ''' Datasets
This file contains definitions for our CIFAR, ImageFolder, and HDF5 datasets
'''
import os
import os.path
import sys
from PIL import Image
import numpy as np
from tqdm import tqdm, trange
import torchvision.datasets as dset
import torchvision.transforms as transforms
from torchvision.datasets.utils import download_url, check_integrity
import torch.utils.data as data
from torch.utils.data import DataLoader
IMG_EXTENSIONS = ['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm']
def is_image_file(filename):
"""Checks if a file is an image.
Args:
filename (string): path to a file
Returns:
bool: True if the filename ends with a known image extension
"""
filename_lower = filename.lower()
return any(filename_lower.endswith(ext) for ext in IMG_EXTENSIONS)
def find_classes(dir):
classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))]
classes.sort()
class_to_idx = {classes[i]: i for i in range(len(classes))}
return classes, class_to_idx
def make_dataset(dir, class_to_idx):
images = []
dir = os.path.expanduser(dir)
for target in tqdm(sorted(os.listdir(dir))):
d = os.path.join(dir, target)
if not os.path.isdir(d):
continue
for root, _, fnames in sorted(os.walk(d)):
for fname in sorted(fnames):
if is_image_file(fname):
path = os.path.join(root, fname)
item = (path, class_to_idx[target])
images.append(item)
return images
def pil_loader(path):
# open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')
def accimage_loader(path):
import accimage
try:
return accimage.Image(path)
except IOError:
# Potentially a decoding problem, fall back to PIL.Image
return pil_loader(path)
def default_loader(path):
from torchvision import get_image_backend
if get_image_backend() == 'accimage':
return accimage_loader(path)
else:
return pil_loader(path)
class ImageFolder(data.Dataset):
"""A generic data loader where the images are arranged in this way: ::
root/dogball/xxx.png
root/dogball/xxy.png
root/dogball/xxz.png
root/cat/123.png
root/cat/nsdf3.png
root/cat/asd932_.png
Args:
root (string): Root directory path.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
loader (callable, optional): A function to load an image given its path.
Attributes:
classes (list): List of the class names.
class_to_idx (dict): Dict with items (class_name, class_index).
imgs (list): List of (image path, class_index) tuples
"""
def __init__(self, root, transform=None, target_transform=None,
loader=default_loader, load_in_mem=False,
index_filename='imagenet_imgs.npz', **kwargs):
classes, class_to_idx = find_classes(root)
# Load pre-computed image directory walk
if os.path.exists(index_filename):
print('Loading pre-saved Index file %s...' % index_filename)
imgs = np.load(index_filename)['imgs']
# If first time, walk the folder directory and save the
# results to a pre-computed file.
else:
print('Generating Index file %s...' % index_filename)
imgs = make_dataset(root, class_to_idx)
np.savez_compressed(index_filename, **{'imgs' : imgs})
if len(imgs) == 0:
raise(RuntimeError("Found 0 images in subfolders of: " + root + "\n"
"Supported image extensions are: " + ",".join(IMG_EXTENSIONS)))
self.root = root
self.imgs = imgs
self.classes = classes
self.class_to_idx = class_to_idx
self.transform = transform
self.target_transform = target_transform
self.loader = loader
self.load_in_mem = load_in_mem
if self.load_in_mem:
print('Loading all images into memory...')
self.data, self.labels = [], []
for index in tqdm(range(len(self.imgs))):
path, target = imgs[index][0], imgs[index][1]
self.data.append(self.transform(self.loader(path)))
self.labels.append(target)
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is class_index of the target class.
"""
if self.load_in_mem:
img = self.data[index]
target = self.labels[index]
else:
path, target = self.imgs[index]
img = self.loader(str(path))
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
# print(img.size(), target)
return img, int(target)
def __len__(self):
return len(self.imgs)
def __repr__(self):
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
fmt_str += ' Root Location: {}\n'.format(self.root)
tmp = ' Transforms (if any): '
fmt_str += '{0}{1}\n'.format(tmp, self.transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
tmp = ' Target Transforms (if any): '
fmt_str += '{0}{1}'.format(tmp, self.target_transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
return fmt_str
''' ILSVRC_HDF5: A dataset to support I/O from an HDF5 to avoid
having to load individual images all the time. '''
import h5py as h5
import torch
class ILSVRC_HDF5(data.Dataset):
def __init__(self, root, transform=None, target_transform=None,
load_in_mem=False, train=True,download=False, validate_seed=0,
val_split=0, **kwargs): # last four are dummies
self.root = root
self.num_imgs = len(h5.File(root, 'r')['labels'])
# self.transform = transform
self.target_transform = target_transform
# Set the transform here
self.transform = transform
# load the entire dataset into memory?
self.load_in_mem = load_in_mem
# If loading into memory, do so now
if self.load_in_mem:
print('Loading %s into memory...' % root)
with h5.File(root,'r') as f:
self.data = f['imgs'][:]
self.labels = f['labels'][:]
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is class_index of the target class.
"""
# If loaded the entire dataset in RAM, get image from memory
if self.load_in_mem:
img = self.data[index]
target = self.labels[index]
# Else load it from disk
else:
with h5.File(self.root,'r') as f:
img = f['imgs'][index]
target = f['labels'][index]
# if self.transform is not None:
# img = self.transform(img)
# Apply my own transform
img = ((torch.from_numpy(img).float() / 255) - 0.5) * 2
if self.target_transform is not None:
target = self.target_transform(target)
return img, int(target)
def __len__(self):
return self.num_imgs
# return len(self.f['imgs'])
import pickle
class CIFAR10(dset.CIFAR10):
def __init__(self, root, train=True,
transform=None, target_transform=None,
download=True, validate_seed=0,
val_split=0, load_in_mem=True, **kwargs):
self.root = os.path.expanduser(root)
self.transform = transform
self.target_transform = target_transform
self.train = train # training set or test set
self.val_split = val_split
if download:
self.download()
if not self._check_integrity():
raise RuntimeError('Dataset not found or corrupted.' +
' You can use download=True to download it')
# now load the picked numpy arrays
self.data = []
self.labels= []
for fentry in self.train_list:
f = fentry[0]
file = os.path.join(self.root, self.base_folder, f)
fo = open(file, 'rb')
if sys.version_info[0] == 2:
entry = pickle.load(fo)
else:
entry = pickle.load(fo, encoding='latin1')
self.data.append(entry['data'])
if 'labels' in entry:
self.labels += entry['labels']
else:
self.labels += entry['fine_labels']
fo.close()
self.data = np.concatenate(self.data)
# Randomly select indices for validation
if self.val_split > 0:
label_indices = [[] for _ in range(max(self.labels)+1)]
for i,l in enumerate(self.labels):
label_indices[l] += [i]
label_indices = np.asarray(label_indices)
# randomly grab 500 elements of each class
np.random.seed(validate_seed)
self.val_indices = []
for l_i in label_indices:
self.val_indices += list(l_i[np.random.choice(len(l_i), int(len(self.data) * val_split) // (max(self.labels) + 1) ,replace=False)])
if self.train=='validate':
self.data = self.data[self.val_indices]
self.labels = list(np.asarray(self.labels)[self.val_indices])
self.data = self.data.reshape((int(50e3 * self.val_split), 3, 32, 32))
self.data = self.data.transpose((0, 2, 3, 1)) # convert to HWC
elif self.train:
print(np.shape(self.data))
if self.val_split > 0:
self.data = np.delete(self.data,self.val_indices,axis=0)
self.labels = list(np.delete(np.asarray(self.labels),self.val_indices,axis=0))
self.data = self.data.reshape((int(50e3 * (1.-self.val_split)), 3, 32, 32))
self.data = self.data.transpose((0, 2, 3, 1)) # convert to HWC
else:
f = self.test_list[0][0]
file = os.path.join(self.root, self.base_folder, f)
fo = open(file, 'rb')
if sys.version_info[0] == 2:
entry = pickle.load(fo)
else:
entry = pickle.load(fo, encoding='latin1')
self.data = entry['data']
if 'labels' in entry:
self.labels = entry['labels']
else:
self.labels = entry['fine_labels']
fo.close()
self.data = self.data.reshape((10000, 3, 32, 32))
self.data = self.data.transpose((0, 2, 3, 1)) # convert to HWC
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target = self.data[index], self.labels[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self):
return len(self.data)
class CIFAR100(CIFAR10):
base_folder = 'cifar-100-python'
url = "http://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz"
filename = "cifar-100-python.tar.gz"
tgz_md5 = 'eb9058c3a382ffc7106e4002c42a8d85'
train_list = [
['train', '16019d7e3df5f24257cddd939b257f8d'],
]
test_list = [
['test', 'f0ef6b0ae62326f3e7ffdfab6717acfc'],
]
| 11,416 | 30.451791 | 139 | py |
adcgan | adcgan-main/BigGAN-PyTorch/inception_utils.py | ''' Inception utilities
This file contains methods for calculating IS and FID, using either
the original numpy code or an accelerated fully-pytorch version that
uses a fast newton-schulz approximation for the matrix sqrt. There are also
methods for acquiring a desired number of samples from the Generator,
and parallelizing the inbuilt PyTorch inception network.
NOTE that Inception Scores and FIDs calculated using these methods will
*not* be directly comparable to values calculated using the original TF
IS/FID code. You *must* use the TF model if you wish to report and compare
numbers. This code tends to produce IS values that are 5-10% lower than
those obtained through TF.
'''
import numpy as np
from scipy import linalg # For numpy FID
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Parameter as P
from torchvision.models.inception import inception_v3
# Module that wraps the inception network to enable use with dataparallel and
# returning pool features and logits.
class WrapInception(nn.Module):
def __init__(self, net):
super(WrapInception,self).__init__()
self.net = net
self.mean = P(torch.tensor([0.485, 0.456, 0.406]).view(1, -1, 1, 1),
requires_grad=False)
self.std = P(torch.tensor([0.229, 0.224, 0.225]).view(1, -1, 1, 1),
requires_grad=False)
def forward(self, x):
# Normalize x
x = (x + 1.) / 2.0
x = (x - self.mean) / self.std
# Upsample if necessary
if x.shape[2] != 299 or x.shape[3] != 299:
x = F.interpolate(x, size=(299, 299), mode='bilinear', align_corners=True)
# 299 x 299 x 3
x = self.net.Conv2d_1a_3x3(x)
# 149 x 149 x 32
x = self.net.Conv2d_2a_3x3(x)
# 147 x 147 x 32
x = self.net.Conv2d_2b_3x3(x)
# 147 x 147 x 64
x = F.max_pool2d(x, kernel_size=3, stride=2)
# 73 x 73 x 64
x = self.net.Conv2d_3b_1x1(x)
# 73 x 73 x 80
x = self.net.Conv2d_4a_3x3(x)
# 71 x 71 x 192
x = F.max_pool2d(x, kernel_size=3, stride=2)
# 35 x 35 x 192
x = self.net.Mixed_5b(x)
# 35 x 35 x 256
x = self.net.Mixed_5c(x)
# 35 x 35 x 288
x = self.net.Mixed_5d(x)
# 35 x 35 x 288
x = self.net.Mixed_6a(x)
# 17 x 17 x 768
x = self.net.Mixed_6b(x)
# 17 x 17 x 768
x = self.net.Mixed_6c(x)
# 17 x 17 x 768
x = self.net.Mixed_6d(x)
# 17 x 17 x 768
x = self.net.Mixed_6e(x)
# 17 x 17 x 768
# 17 x 17 x 768
x = self.net.Mixed_7a(x)
# 8 x 8 x 1280
x = self.net.Mixed_7b(x)
# 8 x 8 x 2048
x = self.net.Mixed_7c(x)
# 8 x 8 x 2048
pool = torch.mean(x.view(x.size(0), x.size(1), -1), 2)
# 1 x 1 x 2048
logits = self.net.fc(F.dropout(pool, training=False).view(pool.size(0), -1))
# 1000 (num_classes)
return pool, logits
# A pytorch implementation of cov, from Modar M. Alfadly
# https://discuss.pytorch.org/t/covariance-and-gradient-support/16217/2
def torch_cov(m, rowvar=False):
'''Estimate a covariance matrix given data.
Covariance indicates the level to which two variables vary together.
If we examine N-dimensional samples, `X = [x_1, x_2, ... x_N]^T`,
then the covariance matrix element `C_{ij}` is the covariance of
`x_i` and `x_j`. The element `C_{ii}` is the variance of `x_i`.
Args:
m: A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables.
rowvar: If `rowvar` is True, then each row represents a
variable, with observations in the columns. Otherwise, the
relationship is transposed: each column represents a variable,
while the rows contain observations.
Returns:
The covariance matrix of the variables.
'''
if m.dim() > 2:
raise ValueError('m has more than 2 dimensions')
if m.dim() < 2:
m = m.view(1, -1)
if not rowvar and m.size(0) != 1:
m = m.t()
# m = m.type(torch.double) # uncomment this line if desired
fact = 1.0 / (m.size(1) - 1)
m -= torch.mean(m, dim=1, keepdim=True)
mt = m.t() # if complex: mt = m.t().conj()
return fact * m.matmul(mt).squeeze()
# Pytorch implementation of matrix sqrt, from Tsung-Yu Lin, and Subhransu Maji
# https://github.com/msubhransu/matrix-sqrt
def sqrt_newton_schulz(A, numIters, dtype=None):
with torch.no_grad():
if dtype is None:
dtype = A.type()
batchSize = A.shape[0]
dim = A.shape[1]
normA = A.mul(A).sum(dim=1).sum(dim=1).sqrt()
Y = A.div(normA.view(batchSize, 1, 1).expand_as(A));
I = torch.eye(dim,dim).view(1, dim, dim).repeat(batchSize,1,1).type(dtype)
Z = torch.eye(dim,dim).view(1, dim, dim).repeat(batchSize,1,1).type(dtype)
for i in range(numIters):
T = 0.5*(3.0*I - Z.bmm(Y))
Y = Y.bmm(T)
Z = T.bmm(Z)
sA = Y*torch.sqrt(normA).view(batchSize, 1, 1).expand_as(A)
return sA
# FID calculator from TTUR--consider replacing this with GPU-accelerated cov
# calculations using torch?
def numpy_calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6):
"""Numpy implementation of the Frechet Distance.
Taken from https://github.com/bioinf-jku/TTUR
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
Stable version by Dougal J. Sutherland.
Params:
-- mu1 : Numpy array containing the activations of a layer of the
inception net (like returned by the function 'get_predictions')
for generated samples.
-- mu2 : The sample mean over activations, precalculated on an
representive data set.
-- sigma1: The covariance matrix over activations for generated samples.
-- sigma2: The covariance matrix over activations, precalculated on an
representive data set.
Returns:
-- : The Frechet Distance.
"""
mu1 = np.atleast_1d(mu1)
mu2 = np.atleast_1d(mu2)
sigma1 = np.atleast_2d(sigma1)
sigma2 = np.atleast_2d(sigma2)
assert mu1.shape == mu2.shape, \
'Training and test mean vectors have different lengths'
assert sigma1.shape == sigma2.shape, \
'Training and test covariances have different dimensions'
diff = mu1 - mu2
# Product might be almost singular
covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
if not np.isfinite(covmean).all():
msg = ('fid calculation produces singular product; '
'adding %s to diagonal of cov estimates') % eps
print(msg)
offset = np.eye(sigma1.shape[0]) * eps
covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))
# Numerical error might give slight imaginary component
if np.iscomplexobj(covmean):
print('wat')
if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
m = np.max(np.abs(covmean.imag))
raise ValueError('Imaginary component {}'.format(m))
covmean = covmean.real
tr_covmean = np.trace(covmean)
out = diff.dot(diff) + np.trace(sigma1) + np.trace(sigma2) - 2 * tr_covmean
return out
def torch_calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6):
"""Pytorch implementation of the Frechet Distance.
Taken from https://github.com/bioinf-jku/TTUR
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
Stable version by Dougal J. Sutherland.
Params:
-- mu1 : Numpy array containing the activations of a layer of the
inception net (like returned by the function 'get_predictions')
for generated samples.
-- mu2 : The sample mean over activations, precalculated on an
representive data set.
-- sigma1: The covariance matrix over activations for generated samples.
-- sigma2: The covariance matrix over activations, precalculated on an
representive data set.
Returns:
-- : The Frechet Distance.
"""
assert mu1.shape == mu2.shape, \
'Training and test mean vectors have different lengths'
assert sigma1.shape == sigma2.shape, \
'Training and test covariances have different dimensions'
diff = mu1 - mu2
# Run 50 itrs of newton-schulz to get the matrix sqrt of sigma1 dot sigma2
covmean = sqrt_newton_schulz(sigma1.mm(sigma2).unsqueeze(0), 50).squeeze()
out = (diff.dot(diff) + torch.trace(sigma1) + torch.trace(sigma2)
- 2 * torch.trace(covmean))
return out
# Calculate Inception Score mean + std given softmax'd logits and number of splits
def calculate_inception_score(pred, num_splits=10):
scores = []
for index in range(num_splits):
pred_chunk = pred[index * (pred.shape[0] // num_splits): (index + 1) * (pred.shape[0] // num_splits), :]
kl_inception = pred_chunk * (np.log(pred_chunk) - np.log(np.expand_dims(np.mean(pred_chunk, 0), 0)))
kl_inception = np.mean(np.sum(kl_inception, 1))
scores.append(np.exp(kl_inception))
return np.mean(scores), np.std(scores)
# Loop and run the sampler and the net until it accumulates num_inception_images
# activations. Return the pool, the logits, and the labels (if one wants
# Inception Accuracy the labels of the generated class will be needed)
def accumulate_inception_activations(sample, net, num_inception_images=50000):
pool, logits, labels = [], [], []
while (torch.cat(logits, 0).shape[0] if len(logits) else 0) < num_inception_images:
with torch.no_grad():
images, labels_val = sample()
pool_val, logits_val = net(images.float())
pool += [pool_val]
logits += [F.softmax(logits_val, 1)]
labels += [labels_val]
return torch.cat(pool, 0), torch.cat(logits, 0), torch.cat(labels, 0)
# Load and wrap the Inception model
def load_inception_net(parallel=False):
inception_model = inception_v3(pretrained=True, transform_input=False)
inception_model = WrapInception(inception_model.eval()).cuda()
if parallel:
print('Parallelizing Inception module...')
inception_model = nn.DataParallel(inception_model)
return inception_model
# This produces a function which takes in an iterator which returns a set number of samples
# and iterates until it accumulates config['num_inception_images'] images.
# The iterator can return samples with a different batch size than used in
# training, using the setting confg['inception_batchsize']
def prepare_inception_metrics(dataset, parallel, no_fid=False, no_is=False, label=None):
# Load metrics; this is intentionally not in a try-except loop so that
# the script will crash here if it cannot find the Inception moments.
# By default, remove the "hdf5" from dataset
dataset = dataset.strip('_hdf5')
if type(label) == int:
data_mu = np.load(dataset+'_{:03d}_inception_moments.npz'.format(label))['mu']
data_sigma = np.load(dataset+'_{:03d}_inception_moments.npz'.format(label))['sigma']
else:
data_mu = np.load(dataset+'_inception_moments.npz')['mu']
data_sigma = np.load(dataset+'_inception_moments.npz')['sigma']
# Load network
net = load_inception_net(parallel)
def get_inception_metrics(sample, num_inception_images, num_splits=10,
prints=True, use_torch=False):
if prints:
print('Gathering activations...')
pool, logits, labels = accumulate_inception_activations(sample, net, num_inception_images)
if prints:
print('Calculating Inception Score...')
if no_is:
IS_mean, IS_std = 0, 0
else:
IS_mean, IS_std = calculate_inception_score(logits.cpu().numpy(), num_splits)
if no_fid:
FID = 9999.0
else:
if prints:
print('Calculating means and covariances...')
if use_torch:
mu, sigma = torch.mean(pool, 0), torch_cov(pool, rowvar=False)
else:
mu, sigma = np.mean(pool.cpu().numpy(), axis=0), np.cov(pool.cpu().numpy(), rowvar=False)
if prints:
print('Covariances calculated, getting FID...')
if use_torch:
FID = torch_calculate_frechet_distance(mu, sigma, torch.tensor(data_mu).float().cuda(), torch.tensor(data_sigma).float().cuda())
FID = float(FID.cpu().numpy())
else:
FID = numpy_calculate_frechet_distance(mu, sigma, data_mu, data_sigma)
# Delete mu, sigma, pool, logits, and labels, just in case
del mu, sigma, pool, logits, labels
return IS_mean, IS_std, FID
return get_inception_metrics | 12,572 | 38.662461 | 136 | py |
adcgan | adcgan-main/BigGAN-PyTorch/animal_hash.py | c = ['Aardvark', 'Abyssinian', 'Affenpinscher', 'Akbash', 'Akita', 'Albatross',
'Alligator', 'Alpaca', 'Angelfish', 'Ant', 'Anteater', 'Antelope', 'Ape',
'Armadillo', 'Ass', 'Avocet', 'Axolotl', 'Baboon', 'Badger', 'Balinese',
'Bandicoot', 'Barb', 'Barnacle', 'Barracuda', 'Bat', 'Beagle', 'Bear',
'Beaver', 'Bee', 'Beetle', 'Binturong', 'Bird', 'Birman', 'Bison',
'Bloodhound', 'Boar', 'Bobcat', 'Bombay', 'Bongo', 'Bonobo', 'Booby',
'Budgerigar', 'Buffalo', 'Bulldog', 'Bullfrog', 'Burmese', 'Butterfly',
'Caiman', 'Camel', 'Capybara', 'Caracal', 'Caribou', 'Cassowary', 'Cat',
'Caterpillar', 'Catfish', 'Cattle', 'Centipede', 'Chameleon', 'Chamois',
'Cheetah', 'Chicken', 'Chihuahua', 'Chimpanzee', 'Chinchilla', 'Chinook',
'Chipmunk', 'Chough', 'Cichlid', 'Clam', 'Coati', 'Cobra', 'Cockroach',
'Cod', 'Collie', 'Coral', 'Cormorant', 'Cougar', 'Cow', 'Coyote',
'Crab', 'Crane', 'Crocodile', 'Crow', 'Curlew', 'Cuscus', 'Cuttlefish',
'Dachshund', 'Dalmatian', 'Deer', 'Dhole', 'Dingo', 'Dinosaur', 'Discus',
'Dodo', 'Dog', 'Dogball', 'Dogfish', 'Dolphin', 'Donkey', 'Dormouse',
'Dove', 'Dragonfly', 'Drever', 'Duck', 'Dugong', 'Dunker', 'Dunlin',
'Eagle', 'Earwig', 'Echidna', 'Eel', 'Eland', 'Elephant', 'ElephantSeal',
'Elk', 'Emu', 'Falcon', 'Ferret', 'Finch', 'Fish', 'Flamingo', 'Flounder',
'Fly', 'Fossa', 'Fox', 'Frigatebird', 'Frog', 'Galago', 'Gar', 'Gaur',
'Gazelle', 'Gecko', 'Gerbil', 'Gharial', 'GiantPanda', 'Gibbon', 'Giraffe',
'Gnat', 'Gnu', 'Goat', 'Goldfinch', 'Goldfish', 'Goose', 'Gopher',
'Gorilla', 'Goshawk', 'Grasshopper', 'Greyhound', 'Grouse', 'Guanaco',
'GuineaFowl', 'GuineaPig', 'Gull', 'Guppy', 'Hamster', 'Hare', 'Harrier',
'Havanese', 'Hawk', 'Hedgehog', 'Heron', 'Herring', 'Himalayan',
'Hippopotamus', 'Hornet', 'Horse', 'Human', 'Hummingbird', 'Hyena',
'Ibis', 'Iguana', 'Impala', 'Indri', 'Insect', 'Jackal', 'Jaguar',
'Javanese', 'Jay', 'Jellyfish', 'Kakapo', 'Kangaroo', 'Kingfisher',
'Kiwi', 'Koala', 'KomodoDragon', 'Kouprey', 'Kudu', 'Labradoodle',
'Ladybird', 'Lapwing', 'Lark', 'Lemming', 'Lemur', 'Leopard', 'Liger',
'Lion', 'Lionfish', 'Lizard', 'Llama', 'Lobster', 'Locust', 'Loris',
'Louse', 'Lynx', 'Lyrebird', 'Macaw', 'Magpie', 'Mallard', 'Maltese',
'Manatee', 'Mandrill', 'Markhor', 'Marten', 'Mastiff', 'Mayfly', 'Meerkat',
'Millipede', 'Mink', 'Mole', 'Molly', 'Mongoose', 'Mongrel', 'Monkey',
'Moorhen', 'Moose', 'Mosquito', 'Moth', 'Mouse', 'Mule', 'Narwhal',
'Neanderthal', 'Newfoundland', 'Newt', 'Nightingale', 'Numbat', 'Ocelot',
'Octopus', 'Okapi', 'Olm', 'Opossum', 'Orang-utan', 'Oryx', 'Ostrich',
'Otter', 'Owl', 'Ox', 'Oyster', 'Pademelon', 'Panther', 'Parrot',
'Partridge', 'Peacock', 'Peafowl', 'Pekingese', 'Pelican', 'Penguin',
'Persian', 'Pheasant', 'Pig', 'Pigeon', 'Pika', 'Pike', 'Piranha',
'Platypus', 'Pointer', 'Pony', 'Poodle', 'Porcupine', 'Porpoise',
'Possum', 'PrairieDog', 'Prawn', 'Puffin', 'Pug', 'Puma', 'Quail',
'Quelea', 'Quetzal', 'Quokka', 'Quoll', 'Rabbit', 'Raccoon', 'Ragdoll',
'Rail', 'Ram', 'Rat', 'Rattlesnake', 'Raven', 'RedDeer', 'RedPanda',
'Reindeer', 'Rhinoceros', 'Robin', 'Rook', 'Rottweiler', 'Ruff',
'Salamander', 'Salmon', 'SandDollar', 'Sandpiper', 'Saola',
'Sardine', 'Scorpion', 'SeaLion', 'SeaUrchin', 'Seahorse',
'Seal', 'Serval', 'Shark', 'Sheep', 'Shrew', 'Shrimp', 'Siamese',
'Siberian', 'Skunk', 'Sloth', 'Snail', 'Snake', 'Snowshoe', 'Somali',
'Sparrow', 'Spider', 'Sponge', 'Squid', 'Squirrel', 'Starfish', 'Starling',
'Stingray', 'Stinkbug', 'Stoat', 'Stork', 'Swallow', 'Swan', 'Tang',
'Tapir', 'Tarsier', 'Termite', 'Tetra', 'Tiffany', 'Tiger', 'Toad',
'Tortoise', 'Toucan', 'Tropicbird', 'Trout', 'Tuatara', 'Turkey',
'Turtle', 'Uakari', 'Uguisu', 'Umbrellabird', 'Viper', 'Vulture',
'Wallaby', 'Walrus', 'Warthog', 'Wasp', 'WaterBuffalo', 'Weasel',
'Whale', 'Whippet', 'Wildebeest', 'Wolf', 'Wolverine', 'Wombat',
'Woodcock', 'Woodlouse', 'Woodpecker', 'Worm', 'Wrasse', 'Wren',
'Yak', 'Zebra', 'Zebu', 'Zonkey']
a = ['able', 'above', 'absent', 'absolute', 'abstract', 'abundant', 'academic',
'acceptable', 'accepted', 'accessible', 'accurate', 'accused', 'active',
'actual', 'acute', 'added', 'additional', 'adequate', 'adjacent',
'administrative', 'adorable', 'advanced', 'adverse', 'advisory',
'aesthetic', 'afraid', 'african', 'aggregate', 'aggressive', 'agreeable',
'agreed', 'agricultural', 'alert', 'alive', 'alleged', 'allied', 'alone',
'alright', 'alternative', 'amateur', 'amazing', 'ambitious', 'american',
'amused', 'ancient', 'angry', 'annoyed', 'annual', 'anonymous', 'anxious',
'appalling', 'apparent', 'applicable', 'appropriate', 'arab', 'arbitrary',
'architectural', 'armed', 'arrogant', 'artificial', 'artistic', 'ashamed',
'asian', 'asleep', 'assistant', 'associated', 'atomic', 'attractive',
'australian', 'automatic', 'autonomous', 'available', 'average',
'awake', 'aware', 'awful', 'awkward', 'back', 'bad', 'balanced', 'bare',
'basic', 'beautiful', 'beneficial', 'better', 'bewildered', 'big',
'binding', 'biological', 'bitter', 'bizarre', 'black', 'blank', 'blind',
'blonde', 'bloody', 'blue', 'blushing', 'boiling', 'bold', 'bored',
'boring', 'bottom', 'brainy', 'brave', 'breakable', 'breezy', 'brief',
'bright', 'brilliant', 'british', 'broad', 'broken', 'brown', 'bumpy',
'burning', 'busy', 'calm', 'canadian', 'capable', 'capitalist', 'careful',
'casual', 'catholic', 'causal', 'cautious', 'central', 'certain',
'changing', 'characteristic', 'charming', 'cheap', 'cheerful', 'chemical',
'chief', 'chilly', 'chinese', 'chosen', 'christian', 'chronic', 'chubby',
'circular', 'civic', 'civil', 'civilian', 'classic', 'classical', 'clean',
'clear', 'clever', 'clinical', 'close', 'closed', 'cloudy', 'clumsy',
'coastal', 'cognitive', 'coherent', 'cold', 'collective', 'colonial',
'colorful', 'colossal', 'coloured', 'colourful', 'combative', 'combined',
'comfortable', 'coming', 'commercial', 'common', 'communist', 'compact',
'comparable', 'comparative', 'compatible', 'competent', 'competitive',
'complete', 'complex', 'complicated', 'comprehensive', 'compulsory',
'conceptual', 'concerned', 'concrete', 'condemned', 'confident',
'confidential', 'confused', 'conscious', 'conservation', 'conservative',
'considerable', 'consistent', 'constant', 'constitutional',
'contemporary', 'content', 'continental', 'continued', 'continuing',
'continuous', 'controlled', 'controversial', 'convenient', 'conventional',
'convinced', 'convincing', 'cooing', 'cool', 'cooperative', 'corporate',
'correct', 'corresponding', 'costly', 'courageous', 'crazy', 'creative',
'creepy', 'criminal', 'critical', 'crooked', 'crowded', 'crucial',
'crude', 'cruel', 'cuddly', 'cultural', 'curious', 'curly', 'current',
'curved', 'cute', 'daily', 'damaged', 'damp', 'dangerous', 'dark', 'dead',
'deaf', 'deafening', 'dear', 'decent', 'decisive', 'deep', 'defeated',
'defensive', 'defiant', 'definite', 'deliberate', 'delicate', 'delicious',
'delighted', 'delightful', 'democratic', 'dependent', 'depressed',
'desirable', 'desperate', 'detailed', 'determined', 'developed',
'developing', 'devoted', 'different', 'difficult', 'digital', 'diplomatic',
'direct', 'dirty', 'disabled', 'disappointed', 'disastrous',
'disciplinary', 'disgusted', 'distant', 'distinct', 'distinctive',
'distinguished', 'disturbed', 'disturbing', 'diverse', 'divine', 'dizzy',
'domestic', 'dominant', 'double', 'doubtful', 'drab', 'dramatic',
'dreadful', 'driving', 'drunk', 'dry', 'dual', 'due', 'dull', 'dusty',
'dutch', 'dying', 'dynamic', 'eager', 'early', 'eastern', 'easy',
'economic', 'educational', 'eerie', 'effective', 'efficient',
'elaborate', 'elated', 'elderly', 'eldest', 'electoral', 'electric',
'electrical', 'electronic', 'elegant', 'eligible', 'embarrassed',
'embarrassing', 'emotional', 'empirical', 'empty', 'enchanting',
'encouraging', 'endless', 'energetic', 'english', 'enormous',
'enthusiastic', 'entire', 'entitled', 'envious', 'environmental', 'equal',
'equivalent', 'essential', 'established', 'estimated', 'ethical',
'ethnic', 'european', 'eventual', 'everyday', 'evident', 'evil',
'evolutionary', 'exact', 'excellent', 'exceptional', 'excess',
'excessive', 'excited', 'exciting', 'exclusive', 'existing', 'exotic',
'expected', 'expensive', 'experienced', 'experimental', 'explicit',
'extended', 'extensive', 'external', 'extra', 'extraordinary', 'extreme',
'exuberant', 'faint', 'fair', 'faithful', 'familiar', 'famous', 'fancy',
'fantastic', 'far', 'fascinating', 'fashionable', 'fast', 'fat', 'fatal',
'favourable', 'favourite', 'federal', 'fellow', 'female', 'feminist',
'few', 'fierce', 'filthy', 'final', 'financial', 'fine', 'firm', 'fiscal',
'fit', 'fixed', 'flaky', 'flat', 'flexible', 'fluffy', 'fluttering',
'flying', 'following', 'fond', 'foolish', 'foreign', 'formal',
'formidable', 'forthcoming', 'fortunate', 'forward', 'fragile',
'frail', 'frantic', 'free', 'french', 'frequent', 'fresh', 'friendly',
'frightened', 'front', 'frozen', 'fucking', 'full', 'full-time', 'fun',
'functional', 'fundamental', 'funny', 'furious', 'future', 'fuzzy',
'gastric', 'gay', 'general', 'generous', 'genetic', 'gentle', 'genuine',
'geographical', 'german', 'giant', 'gigantic', 'given', 'glad',
'glamorous', 'gleaming', 'global', 'glorious', 'golden', 'good',
'gorgeous', 'gothic', 'governing', 'graceful', 'gradual', 'grand',
'grateful', 'greasy', 'great', 'greek', 'green', 'grey', 'grieving',
'grim', 'gross', 'grotesque', 'growing', 'grubby', 'grumpy', 'guilty',
'handicapped', 'handsome', 'happy', 'hard', 'harsh', 'head', 'healthy',
'heavy', 'helpful', 'helpless', 'hidden', 'high', 'high-pitched',
'hilarious', 'hissing', 'historic', 'historical', 'hollow', 'holy',
'homeless', 'homely', 'hon', 'honest', 'horizontal', 'horrible',
'hostile', 'hot', 'huge', 'human', 'hungry', 'hurt', 'hushed', 'husky',
'icy', 'ideal', 'identical', 'ideological', 'ill', 'illegal',
'imaginative', 'immediate', 'immense', 'imperial', 'implicit',
'important', 'impossible', 'impressed', 'impressive', 'improved',
'inadequate', 'inappropriate', 'inc', 'inclined', 'increased',
'increasing', 'incredible', 'independent', 'indian', 'indirect',
'individual', 'industrial', 'inevitable', 'influential', 'informal',
'inherent', 'initial', 'injured', 'inland', 'inner', 'innocent',
'innovative', 'inquisitive', 'instant', 'institutional', 'insufficient',
'intact', 'integral', 'integrated', 'intellectual', 'intelligent',
'intense', 'intensive', 'interested', 'interesting', 'interim',
'interior', 'intermediate', 'internal', 'international', 'intimate',
'invisible', 'involved', 'iraqi', 'irish', 'irrelevant', 'islamic',
'isolated', 'israeli', 'italian', 'itchy', 'japanese', 'jealous',
'jewish', 'jittery', 'joint', 'jolly', 'joyous', 'judicial', 'juicy',
'junior', 'just', 'keen', 'key', 'kind', 'known', 'korean', 'labour',
'large', 'large-scale', 'late', 'latin', 'lazy', 'leading', 'left',
'legal', 'legislative', 'legitimate', 'lengthy', 'lesser', 'level',
'lexical', 'liable', 'liberal', 'light', 'like', 'likely', 'limited',
'linear', 'linguistic', 'liquid', 'literary', 'little', 'live', 'lively',
'living', 'local', 'logical', 'lonely', 'long', 'long-term', 'loose',
'lost', 'loud', 'lovely', 'low', 'loyal', 'ltd', 'lucky', 'mad',
'magenta', 'magic', 'magnetic', 'magnificent', 'main', 'major', 'male',
'mammoth', 'managerial', 'managing', 'manual', 'many', 'marginal',
'marine', 'marked', 'married', 'marvellous', 'marxist', 'mass', 'massive',
'mathematical', 'mature', 'maximum', 'mean', 'meaningful', 'mechanical',
'medical', 'medieval', 'melodic', 'melted', 'mental', 'mere',
'metropolitan', 'mid', 'middle', 'middle-class', 'mighty', 'mild',
'military', 'miniature', 'minimal', 'minimum', 'ministerial', 'minor',
'miserable', 'misleading', 'missing', 'misty', 'mixed', 'moaning',
'mobile', 'moderate', 'modern', 'modest', 'molecular', 'monetary',
'monthly', 'moral', 'motionless', 'muddy', 'multiple', 'mushy',
'musical', 'mute', 'mutual', 'mysterious', 'naked', 'narrow', 'nasty',
'national', 'native', 'natural', 'naughty', 'naval', 'near', 'nearby',
'neat', 'necessary', 'negative', 'neighbouring', 'nervous', 'net',
'neutral', 'new', 'nice', 'nineteenth-century', 'noble', 'noisy',
'normal', 'northern', 'nosy', 'notable', 'novel', 'nuclear', 'numerous',
'nursing', 'nutritious', 'nutty', 'obedient', 'objective', 'obliged',
'obnoxious', 'obvious', 'occasional', 'occupational', 'odd', 'official',
'ok', 'okay', 'old', 'old-fashioned', 'olympic', 'only', 'open',
'operational', 'opposite', 'optimistic', 'oral', 'orange', 'ordinary',
'organic', 'organisational', 'original', 'orthodox', 'other', 'outdoor',
'outer', 'outrageous', 'outside', 'outstanding', 'overall', 'overseas',
'overwhelming', 'painful', 'pale', 'palestinian', 'panicky', 'parallel',
'parental', 'parliamentary', 'part-time', 'partial', 'particular',
'passing', 'passive', 'past', 'patient', 'payable', 'peaceful',
'peculiar', 'perfect', 'permanent', 'persistent', 'personal', 'petite',
'philosophical', 'physical', 'pink', 'plain', 'planned', 'plastic',
'pleasant', 'pleased', 'poised', 'polish', 'polite', 'political', 'poor',
'popular', 'positive', 'possible', 'post-war', 'potential', 'powerful',
'practical', 'precious', 'precise', 'preferred', 'pregnant',
'preliminary', 'premier', 'prepared', 'present', 'presidential',
'pretty', 'previous', 'prickly', 'primary', 'prime', 'primitive',
'principal', 'printed', 'prior', 'private', 'probable', 'productive',
'professional', 'profitable', 'profound', 'progressive', 'prominent',
'promising', 'proper', 'proposed', 'prospective', 'protective',
'protestant', 'proud', 'provincial', 'psychiatric', 'psychological',
'public', 'puny', 'pure', 'purple', 'purring', 'puzzled', 'quaint',
'qualified', 'quick', 'quickest', 'quiet', 'racial', 'radical', 'rainy',
'random', 'rapid', 'rare', 'raspy', 'rational', 'ratty', 'raw', 'ready',
'real', 'realistic', 'rear', 'reasonable', 'recent', 'red', 'reduced',
'redundant', 'regional', 'registered', 'regular', 'regulatory', 'related',
'relative', 'relaxed', 'relevant', 'reliable', 'relieved', 'religious',
'reluctant', 'remaining', 'remarkable', 'remote', 'renewed',
'representative', 'repulsive', 'required', 'resident', 'residential',
'resonant', 'respectable', 'respective', 'responsible', 'resulting',
'retail', 'retired', 'revolutionary', 'rich', 'ridiculous', 'right',
'rigid', 'ripe', 'rising', 'rival', 'roasted', 'robust', 'rolling',
'roman', 'romantic', 'rotten', 'rough', 'round', 'royal', 'rubber',
'rude', 'ruling', 'running', 'rural', 'russian', 'sacred', 'sad', 'safe',
'salty', 'satisfactory', 'satisfied', 'scared', 'scary', 'scattered',
'scientific', 'scornful', 'scottish', 'scrawny', 'screeching',
'secondary', 'secret', 'secure', 'select', 'selected', 'selective',
'selfish', 'semantic', 'senior', 'sensible', 'sensitive', 'separate',
'serious', 'severe', 'sexual', 'shaggy', 'shaky', 'shallow', 'shared',
'sharp', 'sheer', 'shiny', 'shivering', 'shocked', 'short', 'short-term',
'shrill', 'shy', 'sick', 'significant', 'silent', 'silky', 'silly',
'similar', 'simple', 'single', 'skilled', 'skinny', 'sleepy', 'slight',
'slim', 'slimy', 'slippery', 'slow', 'small', 'smart', 'smiling',
'smoggy', 'smooth', 'so-called', 'social', 'socialist', 'soft', 'solar',
'sole', 'solid', 'sophisticated', 'sore', 'sorry', 'sound', 'sour',
'southern', 'soviet', 'spanish', 'spare', 'sparkling', 'spatial',
'special', 'specific', 'specified', 'spectacular', 'spicy', 'spiritual',
'splendid', 'spontaneous', 'sporting', 'spotless', 'spotty', 'square',
'squealing', 'stable', 'stale', 'standard', 'static', 'statistical',
'statutory', 'steady', 'steep', 'sticky', 'stiff', 'still', 'stingy',
'stormy', 'straight', 'straightforward', 'strange', 'strategic',
'strict', 'striking', 'striped', 'strong', 'structural', 'stuck',
'stupid', 'subjective', 'subsequent', 'substantial', 'subtle',
'successful', 'successive', 'sudden', 'sufficient', 'suitable',
'sunny', 'super', 'superb', 'superior', 'supporting', 'supposed',
'supreme', 'sure', 'surprised', 'surprising', 'surrounding',
'surviving', 'suspicious', 'sweet', 'swift', 'swiss', 'symbolic',
'sympathetic', 'systematic', 'tall', 'tame', 'tan', 'tart',
'tasteless', 'tasty', 'technical', 'technological', 'teenage',
'temporary', 'tender', 'tense', 'terrible', 'territorial', 'testy',
'then', 'theoretical', 'thick', 'thin', 'thirsty', 'thorough',
'thoughtful', 'thoughtless', 'thundering', 'tight', 'tiny', 'tired',
'top', 'tory', 'total', 'tough', 'toxic', 'traditional', 'tragic',
'tremendous', 'tricky', 'tropical', 'troubled', 'turkish', 'typical',
'ugliest', 'ugly', 'ultimate', 'unable', 'unacceptable', 'unaware',
'uncertain', 'unchanged', 'uncomfortable', 'unconscious', 'underground',
'underlying', 'unemployed', 'uneven', 'unexpected', 'unfair',
'unfortunate', 'unhappy', 'uniform', 'uninterested', 'unique', 'united',
'universal', 'unknown', 'unlikely', 'unnecessary', 'unpleasant',
'unsightly', 'unusual', 'unwilling', 'upper', 'upset', 'uptight',
'urban', 'urgent', 'used', 'useful', 'useless', 'usual', 'vague',
'valid', 'valuable', 'variable', 'varied', 'various', 'varying', 'vast',
'verbal', 'vertical', 'very', 'victorian', 'victorious', 'video-taped',
'violent', 'visible', 'visiting', 'visual', 'vital', 'vivacious',
'vivid', 'vocational', 'voiceless', 'voluntary', 'vulnerable',
'wandering', 'warm', 'wasteful', 'watery', 'weak', 'wealthy', 'weary',
'wee', 'weekly', 'weird', 'welcome', 'well', 'well-known', 'welsh',
'western', 'wet', 'whispering', 'white', 'whole', 'wicked', 'wide',
'wide-eyed', 'widespread', 'wild', 'willing', 'wise', 'witty',
'wonderful', 'wooden', 'working', 'working-class', 'worldwide',
'worried', 'worrying', 'worthwhile', 'worthy', 'written', 'wrong',
'yellow', 'young', 'yummy', 'zany', 'zealous']
b = ['abiding', 'accelerating', 'accepting', 'accomplishing', 'achieving',
'acquiring', 'acteding', 'activating', 'adapting', 'adding', 'addressing',
'administering', 'admiring', 'admiting', 'adopting', 'advising', 'affording',
'agreeing', 'alerting', 'alighting', 'allowing', 'altereding', 'amusing',
'analyzing', 'announcing', 'annoying', 'answering', 'anticipating',
'apologizing', 'appearing', 'applauding', 'applieding', 'appointing',
'appraising', 'appreciating', 'approving', 'arbitrating', 'arguing',
'arising', 'arranging', 'arresting', 'arriving', 'ascertaining', 'asking',
'assembling', 'assessing', 'assisting', 'assuring', 'attaching', 'attacking',
'attaining', 'attempting', 'attending', 'attracting', 'auditeding', 'avoiding',
'awaking', 'backing', 'baking', 'balancing', 'baning', 'banging', 'baring',
'bating', 'bathing', 'battling', 'bing', 'beaming', 'bearing', 'beating',
'becoming', 'beging', 'begining', 'behaving', 'beholding', 'belonging',
'bending', 'beseting', 'beting', 'biding', 'binding', 'biting', 'bleaching',
'bleeding', 'blessing', 'blinding', 'blinking', 'bloting', 'blowing',
'blushing', 'boasting', 'boiling', 'bolting', 'bombing', 'booking',
'boring', 'borrowing', 'bouncing', 'bowing', 'boxing', 'braking',
'branching', 'breaking', 'breathing', 'breeding', 'briefing', 'bringing',
'broadcasting', 'bruising', 'brushing', 'bubbling', 'budgeting', 'building',
'bumping', 'burning', 'bursting', 'burying', 'busting', 'buying', 'buzing',
'calculating', 'calling', 'camping', 'caring', 'carrying', 'carving',
'casting', 'cataloging', 'catching', 'causing', 'challenging', 'changing',
'charging', 'charting', 'chasing', 'cheating', 'checking', 'cheering',
'chewing', 'choking', 'choosing', 'choping', 'claiming', 'claping',
'clarifying', 'classifying', 'cleaning', 'clearing', 'clinging', 'cliping',
'closing', 'clothing', 'coaching', 'coiling', 'collecting', 'coloring',
'combing', 'coming', 'commanding', 'communicating', 'comparing', 'competing',
'compiling', 'complaining', 'completing', 'composing', 'computing',
'conceiving', 'concentrating', 'conceptualizing', 'concerning', 'concluding',
'conducting', 'confessing', 'confronting', 'confusing', 'connecting',
'conserving', 'considering', 'consisting', 'consolidating', 'constructing',
'consulting', 'containing', 'continuing', 'contracting', 'controling',
'converting', 'coordinating', 'copying', 'correcting', 'correlating',
'costing', 'coughing', 'counseling', 'counting', 'covering', 'cracking',
'crashing', 'crawling', 'creating', 'creeping', 'critiquing', 'crossing',
'crushing', 'crying', 'curing', 'curling', 'curving', 'cuting', 'cycling',
'daming', 'damaging', 'dancing', 'daring', 'dealing', 'decaying', 'deceiving',
'deciding', 'decorating', 'defining', 'delaying', 'delegating', 'delighting',
'delivering', 'demonstrating', 'depending', 'describing', 'deserting',
'deserving', 'designing', 'destroying', 'detailing', 'detecting',
'determining', 'developing', 'devising', 'diagnosing', 'diging',
'directing', 'disagreing', 'disappearing', 'disapproving', 'disarming',
'discovering', 'disliking', 'dispensing', 'displaying', 'disproving',
'dissecting', 'distributing', 'diving', 'diverting', 'dividing', 'doing',
'doubling', 'doubting', 'drafting', 'draging', 'draining', 'dramatizing',
'drawing', 'dreaming', 'dressing', 'drinking', 'driping', 'driving',
'dropping', 'drowning', 'druming', 'drying', 'dusting', 'dwelling',
'earning', 'eating', 'editeding', 'educating', 'eliminating',
'embarrassing', 'employing', 'emptying', 'enacteding', 'encouraging',
'ending', 'enduring', 'enforcing', 'engineering', 'enhancing',
'enjoying', 'enlisting', 'ensuring', 'entering', 'entertaining',
'escaping', 'establishing', 'estimating', 'evaluating', 'examining',
'exceeding', 'exciting', 'excusing', 'executing', 'exercising', 'exhibiting',
'existing', 'expanding', 'expecting', 'expediting', 'experimenting',
'explaining', 'exploding', 'expressing', 'extending', 'extracting',
'facing', 'facilitating', 'fading', 'failing', 'fancying', 'fastening',
'faxing', 'fearing', 'feeding', 'feeling', 'fencing', 'fetching', 'fighting',
'filing', 'filling', 'filming', 'finalizing', 'financing', 'finding',
'firing', 'fiting', 'fixing', 'flaping', 'flashing', 'fleing', 'flinging',
'floating', 'flooding', 'flowing', 'flowering', 'flying', 'folding',
'following', 'fooling', 'forbiding', 'forcing', 'forecasting', 'foregoing',
'foreseing', 'foretelling', 'forgeting', 'forgiving', 'forming',
'formulating', 'forsaking', 'framing', 'freezing', 'frightening', 'frying',
'gathering', 'gazing', 'generating', 'geting', 'giving', 'glowing', 'gluing',
'going', 'governing', 'grabing', 'graduating', 'grating', 'greasing', 'greeting',
'grinning', 'grinding', 'griping', 'groaning', 'growing', 'guaranteeing',
'guarding', 'guessing', 'guiding', 'hammering', 'handing', 'handling',
'handwriting', 'hanging', 'happening', 'harassing', 'harming', 'hating',
'haunting', 'heading', 'healing', 'heaping', 'hearing', 'heating', 'helping',
'hiding', 'hitting', 'holding', 'hooking', 'hoping', 'hopping', 'hovering',
'hugging', 'hmuming', 'hunting', 'hurrying', 'hurting', 'hypothesizing',
'identifying', 'ignoring', 'illustrating', 'imagining', 'implementing',
'impressing', 'improving', 'improvising', 'including', 'increasing',
'inducing', 'influencing', 'informing', 'initiating', 'injecting',
'injuring', 'inlaying', 'innovating', 'inputing', 'inspecting',
'inspiring', 'installing', 'instituting', 'instructing', 'insuring',
'integrating', 'intending', 'intensifying', 'interesting',
'interfering', 'interlaying', 'interpreting', 'interrupting',
'interviewing', 'introducing', 'inventing', 'inventorying',
'investigating', 'inviting', 'irritating', 'itching', 'jailing',
'jamming', 'jogging', 'joining', 'joking', 'judging', 'juggling', 'jumping',
'justifying', 'keeping', 'kepting', 'kicking', 'killing', 'kissing', 'kneeling',
'kniting', 'knocking', 'knotting', 'knowing', 'labeling', 'landing', 'lasting',
'laughing', 'launching', 'laying', 'leading', 'leaning', 'leaping', 'learning',
'leaving', 'lecturing', 'leding', 'lending', 'leting', 'leveling',
'licensing', 'licking', 'lying', 'lifteding', 'lighting', 'lightening',
'liking', 'listing', 'listening', 'living', 'loading', 'locating',
'locking', 'loging', 'longing', 'looking', 'losing', 'loving',
'maintaining', 'making', 'maning', 'managing', 'manipulating',
'manufacturing', 'mapping', 'marching', 'marking', 'marketing',
'marrying', 'matching', 'mating', 'mattering', 'meaning', 'measuring',
'meddling', 'mediating', 'meeting', 'melting', 'melting', 'memorizing',
'mending', 'mentoring', 'milking', 'mining', 'misleading', 'missing',
'misspelling', 'mistaking', 'misunderstanding', 'mixing', 'moaning',
'modeling', 'modifying', 'monitoring', 'mooring', 'motivating',
'mourning', 'moving', 'mowing', 'muddling', 'muging', 'multiplying',
'murdering', 'nailing', 'naming', 'navigating', 'needing', 'negotiating',
'nesting', 'noding', 'nominating', 'normalizing', 'noting', 'noticing',
'numbering', 'obeying', 'objecting', 'observing', 'obtaining', 'occuring',
'offending', 'offering', 'officiating', 'opening', 'operating', 'ordering',
'organizing', 'orienteding', 'originating', 'overcoming', 'overdoing',
'overdrawing', 'overflowing', 'overhearing', 'overtaking', 'overthrowing',
'owing', 'owning', 'packing', 'paddling', 'painting', 'parking', 'parting',
'participating', 'passing', 'pasting', 'pating', 'pausing', 'paying',
'pecking', 'pedaling', 'peeling', 'peeping', 'perceiving', 'perfecting',
'performing', 'permiting', 'persuading', 'phoning', 'photographing',
'picking', 'piloting', 'pinching', 'pining', 'pinpointing', 'pioneering',
'placing', 'planing', 'planting', 'playing', 'pleading', 'pleasing',
'plugging', 'pointing', 'poking', 'polishing', 'poping', 'possessing',
'posting', 'pouring', 'practicing', 'praiseding', 'praying', 'preaching',
'preceding', 'predicting', 'prefering', 'preparing', 'prescribing',
'presenting', 'preserving', 'preseting', 'presiding', 'pressing',
'pretending', 'preventing', 'pricking', 'printing', 'processing',
'procuring', 'producing', 'professing', 'programing', 'progressing',
'projecting', 'promising', 'promoting', 'proofreading', 'proposing',
'protecting', 'proving', 'providing', 'publicizing', 'pulling', 'pumping',
'punching', 'puncturing', 'punishing', 'purchasing', 'pushing', 'puting',
'qualifying', 'questioning', 'queuing', 'quiting', 'racing', 'radiating',
'raining', 'raising', 'ranking', 'rating', 'reaching', 'reading',
'realigning', 'realizing', 'reasoning', 'receiving', 'recognizing',
'recommending', 'reconciling', 'recording', 'recruiting', 'reducing',
'referring', 'reflecting', 'refusing', 'regreting', 'regulating',
'rehabilitating', 'reigning', 'reinforcing', 'rejecting', 'rejoicing',
'relating', 'relaxing', 'releasing', 'relying', 'remaining', 'remembering',
'reminding', 'removing', 'rendering', 'reorganizing', 'repairing',
'repeating', 'replacing', 'replying', 'reporting', 'representing',
'reproducing', 'requesting', 'rescuing', 'researching', 'resolving',
'responding', 'restoreding', 'restructuring', 'retiring', 'retrieving',
'returning', 'reviewing', 'revising', 'rhyming', 'riding', 'riding',
'ringing', 'rinsing', 'rising', 'risking', 'robing', 'rocking', 'rolling',
'roting', 'rubing', 'ruining', 'ruling', 'runing', 'rushing', 'sacking',
'sailing', 'satisfying', 'saving', 'sawing', 'saying', 'scaring',
'scattering', 'scheduling', 'scolding', 'scorching', 'scraping',
'scratching', 'screaming', 'screwing', 'scribbling', 'scrubing',
'sealing', 'searching', 'securing', 'seing', 'seeking', 'selecting',
'selling', 'sending', 'sensing', 'separating', 'serving', 'servicing',
'seting', 'settling', 'sewing', 'shading', 'shaking', 'shaping',
'sharing', 'shaving', 'shearing', 'sheding', 'sheltering', 'shining',
'shivering', 'shocking', 'shoing', 'shooting', 'shoping', 'showing',
'shrinking', 'shruging', 'shuting', 'sighing', 'signing', 'signaling',
'simplifying', 'sining', 'singing', 'sinking', 'siping', 'siting',
'sketching', 'skiing', 'skiping', 'slaping', 'slaying', 'sleeping',
'sliding', 'slinging', 'slinking', 'sliping', 'sliting', 'slowing',
'smashing', 'smelling', 'smiling', 'smiting', 'smoking', 'snatching',
'sneaking', 'sneezing', 'sniffing', 'snoring', 'snowing', 'soaking',
'solving', 'soothing', 'soothsaying', 'sorting', 'sounding', 'sowing',
'sparing', 'sparking', 'sparkling', 'speaking', 'specifying', 'speeding',
'spelling', 'spending', 'spilling', 'spining', 'spiting', 'spliting',
'spoiling', 'spoting', 'spraying', 'spreading', 'springing', 'sprouting',
'squashing', 'squeaking', 'squealing', 'squeezing', 'staining', 'stamping',
'standing', 'staring', 'starting', 'staying', 'stealing', 'steering',
'stepping', 'sticking', 'stimulating', 'stinging', 'stinking', 'stirring',
'stitching', 'stoping', 'storing', 'straping', 'streamlining',
'strengthening', 'stretching', 'striding', 'striking', 'stringing',
'stripping', 'striving', 'stroking', 'structuring', 'studying',
'stuffing', 'subleting', 'subtracting', 'succeeding', 'sucking',
'suffering', 'suggesting', 'suiting', 'summarizing', 'supervising',
'supplying', 'supporting', 'supposing', 'surprising', 'surrounding',
'suspecting', 'suspending', 'swearing', 'sweating', 'sweeping', 'swelling',
'swimming', 'swinging', 'switching', 'symbolizing', 'synthesizing',
'systemizing', 'tabulating', 'taking', 'talking', 'taming', 'taping',
'targeting', 'tasting', 'teaching', 'tearing', 'teasing', 'telephoning',
'telling', 'tempting', 'terrifying', 'testing', 'thanking', 'thawing',
'thinking', 'thriving', 'throwing', 'thrusting', 'ticking', 'tickling',
'tying', 'timing', 'tiping', 'tiring', 'touching', 'touring', 'towing',
'tracing', 'trading', 'training', 'transcribing', 'transfering',
'transforming', 'translating', 'transporting', 'traping', 'traveling',
'treading', 'treating', 'trembling', 'tricking', 'triping', 'troting',
'troubling', 'troubleshooting', 'trusting', 'trying', 'tuging', 'tumbling',
'turning', 'tutoring', 'twisting', 'typing', 'undergoing', 'understanding',
'undertaking', 'undressing', 'unfastening', 'unifying', 'uniting',
'unlocking', 'unpacking', 'untidying', 'updating', 'upgrading',
'upholding', 'upseting', 'using', 'utilizing', 'vanishing', 'verbalizing',
'verifying', 'vexing', 'visiting', 'wailing', 'waiting', 'waking',
'walking', 'wandering', 'wanting', 'warming', 'warning', 'washing',
'wasting', 'watching', 'watering', 'waving', 'wearing', 'weaving',
'wedding', 'weeping', 'weighing', 'welcoming', 'wending', 'weting',
'whining', 'whiping', 'whirling', 'whispering', 'whistling', 'wining',
'winding', 'winking', 'wiping', 'wishing', 'withdrawing', 'withholding',
'withstanding', 'wobbling', 'wondering', 'working', 'worrying', 'wrapping',
'wrecking', 'wrestling', 'wriggling', 'wringing', 'writing', 'x-raying',
'yawning', 'yelling', 'zipping', 'zooming'] | 32,285 | 72.544419 | 82 | py |
adcgan | adcgan-main/BigGAN-PyTorch/calculate_inception_moments.py | ''' Calculate Inception Moments
This script iterates over the dataset and calculates the moments of the
activations of the Inception net (needed for FID), and also returns
the Inception Score of the training data.
Note that if you don't shuffle the data, the IS of true data will be under-
estimated as it is label-ordered. By default, the data is not shuffled
so as to reduce non-determinism. '''
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import utils
import inception_utils
from tqdm import tqdm, trange
from argparse import ArgumentParser
def prepare_parser():
usage = 'Calculate and store inception metrics.'
parser = ArgumentParser(description=usage)
parser.add_argument(
'--dataset', type=str, default='I128_hdf5',
help='Which Dataset to train on, out of I128, I256, C10, C100...'
'Append _hdf5 to use the hdf5 version of the dataset. (default: %(default)s)')
parser.add_argument(
'--data_root', type=str, default='data',
help='Default location where data is stored (default: %(default)s)')
parser.add_argument(
'--batch_size', type=int, default=64,
help='Default overall batchsize (default: %(default)s)')
parser.add_argument(
'--parallel', action='store_true', default=False,
help='Train with multiple GPUs (default: %(default)s)')
parser.add_argument(
'--augment', action='store_true', default=False,
help='Augment with random crops and flips (default: %(default)s)')
parser.add_argument(
'--num_workers', type=int, default=8,
help='Number of dataloader workers (default: %(default)s)')
parser.add_argument(
'--shuffle', action='store_true', default=False,
help='Shuffle the data? (default: %(default)s)')
parser.add_argument(
'--seed', type=int, default=0,
help='Random seed to use.')
return parser
def run(config):
# Get loader
config['drop_last'] = False
loaders = utils.get_data_loaders(**config)
# Load inception net
net = inception_utils.load_inception_net(parallel=config['parallel'])
pool, logits, labels = [], [], []
device = 'cuda'
for i, (x, y) in enumerate(tqdm(loaders[0])):
x = x.to(device)
with torch.no_grad():
pool_val, logits_val = net(x)
pool += [np.asarray(pool_val.cpu())]
logits += [np.asarray(F.softmax(logits_val, 1).cpu())]
labels += [np.asarray(y.cpu())]
pool, logits, labels = [np.concatenate(item, 0) for item in [pool, logits, labels]]
# uncomment to save pool, logits, and labels to disk
# print('Saving pool, logits, and labels to disk...')
# np.savez(config['dataset']+'_inception_activations.npz',
# {'pool': pool, 'logits': logits, 'labels': labels})
# Calculate inception metrics and report them
print('Calculating inception metrics...')
IS_mean, IS_std = inception_utils.calculate_inception_score(logits)
print('Training data from dataset %s has IS of %5.5f +/- %5.5f' % (config['dataset'], IS_mean, IS_std))
# Prepare mu and sigma, save to disk. Remove "hdf5" by default
# (the FID code also knows to strip "hdf5")
print('Calculating means and covariances...')
mu, sigma = np.mean(pool, axis=0), np.cov(pool, rowvar=False)
print('Saving calculated means and covariances to disk...')
np.savez(config['dataset'].strip('_hdf5')+'_inception_moments.npz', **{'mu' : mu, 'sigma' : sigma})
def run_intra(config):
from utils import nclass_dict
# Get loader
config['drop_last'] = False
loaders = utils.get_data_loaders(**config)
# Load inception net
net = inception_utils.load_inception_net(parallel=config['parallel'])
pool, logits, labels = [], [], []
device = 'cuda'
for i, (x, y) in enumerate(tqdm(loaders[0])):
x = x.to(device)
with torch.no_grad():
pool_val, logits_val = net(x)
pool += [np.asarray(pool_val.cpu())]
logits += [np.asarray(F.softmax(logits_val, 1).cpu())]
labels += [np.asarray(y.cpu())]
pool, logits, labels = [np.concatenate(item, 0) for item in [pool, logits, labels]]
# uncomment to save pool, logits, and labels to disk
# print('Saving pool, logits, and labels to disk...')
# np.savez(config['dataset']+'_inception_activations.npz',
# {'pool': pool, 'logits': logits, 'labels': labels})
for cls in range(nclass_dict[config['dataset']]):
# Calculate inception metrics and report them
print('Calculating {:03d} inception metrics...'.format(cls))
IS_mean, IS_std = inception_utils.calculate_inception_score(logits[labels == cls])
print('Training data from dataset %s has IS of %5.5f +/- %5.5f' % (config['dataset'], IS_mean, IS_std))
# Prepare mu and sigma, save to disk. Remove "hdf5" by default
# (the FID code also knows to strip "hdf5")
print('Calculating means and covariances...')
mu, sigma = np.mean(pool, axis=0), np.cov(pool, rowvar=False)
print('Saving calculated means and covariances to disk...')
np.savez(config['dataset'].strip('_hdf5')+'_{:03d}_inception_moments.npz'.format(cls), **{'mu' : mu, 'sigma' : sigma})
def main():
# parse command line
parser = prepare_parser()
config = vars(parser.parse_args())
print(config)
run(config)
run_intra(config)
if __name__ == '__main__':
main()
| 5,247 | 40 | 122 | py |
adcgan | adcgan-main/BigGAN-PyTorch/train.py | """ BigGAN: The Authorized Unofficial PyTorch release
Code by A. Brock and A. Andonian
This code is an unofficial reimplementation of
"Large-Scale GAN Training for High Fidelity Natural Image Synthesis,"
by A. Brock, J. Donahue, and K. Simonyan (arXiv 1809.11096).
Let's go.
"""
import os
import functools
import math
import numpy as np
from tqdm import tqdm, trange
import torch
import torch.nn as nn
from torch.nn import init
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter as P
import torchvision
# Import my stuff
import inception_utils
import utils
import losses
import train_fns
from sync_batchnorm import patch_replication_callback
# The main training file. Config is a dictionary specifying the configuration
# of this training run.
def run(config):
# Update the config dict as necessary
# This is for convenience, to add settings derived from the user-specified
# configuration into the config-dict (e.g. inferring the number of classes
# and size of the images from the dataset, passing in a pytorch object
# for the activation specified as a string)
config['resolution'] = utils.imsize_dict[config['dataset']]
config['n_classes'] = utils.nclass_dict[config['dataset']]
config['G_activation'] = utils.activation_dict[config['G_nl']]
config['D_activation'] = utils.activation_dict[config['D_nl']]
config['projection'] = 'pd' in config['loss']
print('Using projection discriminator?: ', config['projection'])
# By default, skip init if resuming training.
if config['resume']:
print('Skipping initialization for training resumption...')
config['skip_init'] = True
config = utils.update_config_roots(config)
device = 'cuda'
# Seed RNG
utils.seed_rng(config['seed'])
# Prepare root folders if necessary
utils.prepare_root(config)
# Setup cudnn.benchmark for free speed
torch.backends.cudnn.benchmark = True
# Import the model--this line allows us to dynamically select different files.
model = __import__(config['model'])
experiment_name = (config['experiment_name'] if config['experiment_name']
else utils.name_from_config(config))
print('Experiment name is %s' % experiment_name)
# Next, build the model
G = model.Generator(**config).to(device)
D = model.Discriminator(**config).to(device)
# If using EMA, prepare it
if config['ema']:
print('Preparing EMA for G with decay of {}'.format(config['ema_decay']))
G_ema = model.Generator(**{**config, 'skip_init':True,
'no_optim': True}).to(device)
ema = utils.ema(G, G_ema, config['ema_decay'], config['ema_start'])
else:
G_ema, ema = None, None
# FP16?
if config['G_fp16']:
print('Casting G to float16...')
G = G.half()
if config['ema']:
G_ema = G_ema.half()
if config['D_fp16']:
print('Casting D to fp16...')
D = D.half()
# Consider automatically reducing SN_eps?
GD = model.G_D(G, D)
print(G)
print(D)
print('Number of params in G: {} D: {}'.format(
*[sum([p.data.nelement() for p in net.parameters()]) for net in [G,D]]))
# Prepare state dict, which holds things like epoch # and itr #
state_dict = {'itr': 0, 'epoch': 0, 'save_num': 0, 'save_best_num': 0,
'best_IS': 0, 'best_FID': 999999, 'config': config}
# If loading from a pre-trained model, load weights
if config['resume']:
print('Loading weights...')
utils.load_weights(G, D, state_dict,
config['weights_root'], experiment_name,
config['load_weights'] if config['load_weights'] else None,
G_ema if config['ema'] else None)
# If parallel, parallelize the GD module
if config['parallel']:
GD = nn.DataParallel(GD)
if config['cross_replica']:
patch_replication_callback(GD)
# Prepare loggers for stats; metrics holds test metrics,
# lmetrics holds any desired training metrics.
test_metrics_fname = '%s/%s_log.jsonl' % (config['logs_root'],
experiment_name)
train_metrics_fname = '%s/%s' % (config['logs_root'], experiment_name)
print('Inception Metrics will be saved to {}'.format(test_metrics_fname))
test_log = utils.MetricsLogger(test_metrics_fname,
reinitialize=(not config['resume']))
print('Training Metrics will be saved to {}'.format(train_metrics_fname))
train_log = utils.MyLogger(train_metrics_fname,
reinitialize=(not config['resume']),
logstyle=config['logstyle'])
# Write metadata
utils.write_metadata(config['logs_root'], experiment_name, config, state_dict)
# Prepare data; the Discriminator's batch size is all that needs to be passed
# to the dataloader, as G doesn't require dataloading.
# Note that at every loader iteration we pass in enough data to complete
# a full D iteration (regardless of number of D steps and accumulations)
D_batch_size = (config['batch_size'] * config['num_D_steps']
* config['num_D_accumulations'])
loaders = utils.get_data_loaders(**{**config, 'batch_size': D_batch_size,
'start_itr': state_dict['itr']})
# Prepare inception metrics: FID and IS
get_inception_metrics = inception_utils.prepare_inception_metrics(config['dataset'], config['parallel'], config['no_fid'])
# Prepare noise and randomly sampled label arrays
# Allow for different batch sizes in G
G_batch_size = max(config['G_batch_size'], config['batch_size'])
z_, y_ = utils.prepare_z_y(G_batch_size, G.dim_z, config['n_classes'],
device=device, fp16=config['G_fp16'])
# Prepare a fixed z & y to see individual sample evolution throghout training
fixed_z, fixed_y = utils.prepare_z_y(G_batch_size, G.dim_z,
config['n_classes'], device=device,
fp16=config['G_fp16'])
fixed_z.sample_()
fixed_y.sample_()
# Loaders are loaded, prepare the training function
if config['which_train_fn'] == 'GAN':
train = train_fns.GAN_training_function(G, D, GD, z_, y_,
ema, state_dict, config)
# Else, assume debugging and use the dummy train fn
else:
train = train_fns.dummy_training_function()
# Prepare Sample function for use with inception metrics
sample = functools.partial(utils.sample,
G=(G_ema if config['ema'] and config['use_ema']
else G),
z_=z_, y_=y_, config=config)
print('Beginning training at epoch %d...' % state_dict['epoch'])
# Train for specified number of epochs, although we mostly track G iterations.
for epoch in range(state_dict['epoch'], config['num_epochs']):
# Which progressbar to use? TQDM or my own?
if config['pbar'] == 'mine':
pbar = utils.progress(loaders[0],displaytype='s1k' if config['use_multiepoch_sampler'] else 'eta')
else:
pbar = tqdm(loaders[0])
for i, (x, y) in enumerate(pbar):
# Increment the iteration counter
state_dict['itr'] += 1
# Make sure G and D are in training mode, just in case they got set to eval
# For D, which typically doesn't have BN, this shouldn't matter much.
G.train()
D.train()
if config['ema']:
G_ema.train()
if config['D_fp16']:
x, y = x.to(device).half(), y.to(device)
else:
x, y = x.to(device), y.to(device)
metrics = train(x, y)
train_log.log(itr=int(state_dict['itr']), **metrics)
# Every sv_log_interval, log singular values
if (config['sv_log_interval'] > 0) and (not (state_dict['itr'] % config['sv_log_interval'])):
train_log.log(itr=int(state_dict['itr']),
**{**utils.get_SVs(G, 'G'), **utils.get_SVs(D, 'D')})
# If using my progbar, print metrics.
if config['pbar'] == 'mine':
print(', '.join(['itr: %d' % state_dict['itr']]
+ ['%s : %+4.3f' % (key, metrics[key])
for key in metrics]), end=' ')
# Save weights and copies as configured at specified interval
if not (state_dict['itr'] % config['save_every']):
if config['G_eval_mode']:
print('Switchin G to eval mode...')
G.eval()
if config['ema']:
G_ema.eval()
train_fns.save_and_sample(G, D, G_ema, z_, y_, fixed_z, fixed_y,
state_dict, config, experiment_name)
# Test every specified interval
if not (state_dict['itr'] % config['test_every']):
if config['G_eval_mode']:
print('Switchin G to eval mode...')
G.eval()
train_fns.test(G, D, G_ema, z_, y_, state_dict, config, sample,
get_inception_metrics, experiment_name, test_log)
# Increment epoch counter at end of epoch
state_dict['epoch'] += 1
def main():
# parse command line and run
parser = utils.prepare_parser()
config = vars(parser.parse_args())
print(config)
run(config)
if __name__ == '__main__':
main() | 9,268 | 39.475983 | 124 | py |
adcgan | adcgan-main/BigGAN-PyTorch/sync_batchnorm/replicate.py | # -*- coding: utf-8 -*-
# File : replicate.py
# Author : Jiayuan Mao
# Email : [email protected]
# Date : 27/01/2018
#
# This file is part of Synchronized-BatchNorm-PyTorch.
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
# Distributed under MIT License.
import functools
from torch.nn.parallel.data_parallel import DataParallel
__all__ = [
'CallbackContext',
'execute_replication_callbacks',
'DataParallelWithCallback',
'patch_replication_callback'
]
class CallbackContext(object):
pass
def execute_replication_callbacks(modules):
"""
Execute an replication callback `__data_parallel_replicate__` on each module created by original replication.
The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)`
Note that, as all modules are isomorphism, we assign each sub-module with a context
(shared among multiple copies of this module on different devices).
Through this context, different copies can share some information.
We guarantee that the callback on the master copy (the first copy) will be called ahead of calling the callback
of any slave copies.
"""
master_copy = modules[0]
nr_modules = len(list(master_copy.modules()))
ctxs = [CallbackContext() for _ in range(nr_modules)]
for i, module in enumerate(modules):
for j, m in enumerate(module.modules()):
if hasattr(m, '__data_parallel_replicate__'):
m.__data_parallel_replicate__(ctxs[j], i)
class DataParallelWithCallback(DataParallel):
"""
Data Parallel with a replication callback.
An replication callback `__data_parallel_replicate__` of each module will be invoked after being created by
original `replicate` function.
The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)`
Examples:
> sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
> sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1])
# sync_bn.__data_parallel_replicate__ will be invoked.
"""
def replicate(self, module, device_ids):
modules = super(DataParallelWithCallback, self).replicate(module, device_ids)
execute_replication_callbacks(modules)
return modules
def patch_replication_callback(data_parallel):
"""
Monkey-patch an existing `DataParallel` object. Add the replication callback.
Useful when you have customized `DataParallel` implementation.
Examples:
> sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
> sync_bn = DataParallel(sync_bn, device_ids=[0, 1])
> patch_replication_callback(sync_bn)
# this is equivalent to
> sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
> sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1])
"""
assert isinstance(data_parallel, DataParallel)
old_replicate = data_parallel.replicate
@functools.wraps(old_replicate)
def new_replicate(module, device_ids):
modules = old_replicate(module, device_ids)
execute_replication_callbacks(modules)
return modules
data_parallel.replicate = new_replicate
| 3,226 | 32.968421 | 115 | py |
adcgan | adcgan-main/BigGAN-PyTorch/sync_batchnorm/unittest.py | # -*- coding: utf-8 -*-
# File : unittest.py
# Author : Jiayuan Mao
# Email : [email protected]
# Date : 27/01/2018
#
# This file is part of Synchronized-BatchNorm-PyTorch.
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
# Distributed under MIT License.
import unittest
import torch
class TorchTestCase(unittest.TestCase):
def assertTensorClose(self, x, y):
adiff = float((x - y).abs().max())
if (y == 0).all():
rdiff = 'NaN'
else:
rdiff = float((adiff / y).abs().max())
message = (
'Tensor close check failed\n'
'adiff={}\n'
'rdiff={}\n'
).format(adiff, rdiff)
self.assertTrue(torch.allclose(x, y), message)
| 746 | 23.9 | 59 | py |
adcgan | adcgan-main/BigGAN-PyTorch/sync_batchnorm/batchnorm.py | # -*- coding: utf-8 -*-
# File : batchnorm.py
# Author : Jiayuan Mao
# Email : [email protected]
# Date : 27/01/2018
#
# This file is part of Synchronized-BatchNorm-PyTorch.
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
# Distributed under MIT License.
import collections
import torch
import torch.nn.functional as F
from torch.nn.modules.batchnorm import _BatchNorm
from torch.nn.parallel._functions import ReduceAddCoalesced, Broadcast
from .comm import SyncMaster
__all__ = ['SynchronizedBatchNorm1d', 'SynchronizedBatchNorm2d', 'SynchronizedBatchNorm3d']
def _sum_ft(tensor):
"""sum over the first and last dimention"""
return tensor.sum(dim=0).sum(dim=-1)
def _unsqueeze_ft(tensor):
"""add new dementions at the front and the tail"""
return tensor.unsqueeze(0).unsqueeze(-1)
_ChildMessage = collections.namedtuple('_ChildMessage', ['sum', 'ssum', 'sum_size'])
_MasterMessage = collections.namedtuple('_MasterMessage', ['sum', 'inv_std'])
# _MasterMessage = collections.namedtuple('_MasterMessage', ['sum', 'ssum', 'sum_size'])
class _SynchronizedBatchNorm(_BatchNorm):
def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True):
super(_SynchronizedBatchNorm, self).__init__(num_features, eps=eps, momentum=momentum, affine=affine)
self._sync_master = SyncMaster(self._data_parallel_master)
self._is_parallel = False
self._parallel_id = None
self._slave_pipe = None
def forward(self, input, gain=None, bias=None):
# If it is not parallel computation or is in evaluation mode, use PyTorch's implementation.
if not (self._is_parallel and self.training):
out = F.batch_norm(
input, self.running_mean, self.running_var, self.weight, self.bias,
self.training, self.momentum, self.eps)
if gain is not None:
out = out + gain
if bias is not None:
out = out + bias
return out
# Resize the input to (B, C, -1).
input_shape = input.size()
# print(input_shape)
input = input.view(input.size(0), input.size(1), -1)
# Compute the sum and square-sum.
sum_size = input.size(0) * input.size(2)
input_sum = _sum_ft(input)
input_ssum = _sum_ft(input ** 2)
# Reduce-and-broadcast the statistics.
# print('it begins')
if self._parallel_id == 0:
mean, inv_std = self._sync_master.run_master(_ChildMessage(input_sum, input_ssum, sum_size))
else:
mean, inv_std = self._slave_pipe.run_slave(_ChildMessage(input_sum, input_ssum, sum_size))
# if self._parallel_id == 0:
# # print('here')
# sum, ssum, num = self._sync_master.run_master(_ChildMessage(input_sum, input_ssum, sum_size))
# else:
# # print('there')
# sum, ssum, num = self._slave_pipe.run_slave(_ChildMessage(input_sum, input_ssum, sum_size))
# print('how2')
# num = sum_size
# print('Sum: %f, ssum: %f, sumsize: %f, insum: %f' %(float(sum.sum().cpu()), float(ssum.sum().cpu()), float(sum_size), float(input_sum.sum().cpu())))
# Fix the graph
# sum = (sum.detach() - input_sum.detach()) + input_sum
# ssum = (ssum.detach() - input_ssum.detach()) + input_ssum
# mean = sum / num
# var = ssum / num - mean ** 2
# # var = (ssum - mean * sum) / num
# inv_std = torch.rsqrt(var + self.eps)
# Compute the output.
if gain is not None:
# print('gaining')
# scale = _unsqueeze_ft(inv_std) * gain.squeeze(-1)
# shift = _unsqueeze_ft(mean) * scale - bias.squeeze(-1)
# output = input * scale - shift
output = (input - _unsqueeze_ft(mean)) * (_unsqueeze_ft(inv_std) * gain.squeeze(-1)) + bias.squeeze(-1)
elif self.affine:
# MJY:: Fuse the multiplication for speed.
output = (input - _unsqueeze_ft(mean)) * _unsqueeze_ft(inv_std * self.weight) + _unsqueeze_ft(self.bias)
else:
output = (input - _unsqueeze_ft(mean)) * _unsqueeze_ft(inv_std)
# Reshape it.
return output.view(input_shape)
def __data_parallel_replicate__(self, ctx, copy_id):
self._is_parallel = True
self._parallel_id = copy_id
# parallel_id == 0 means master device.
if self._parallel_id == 0:
ctx.sync_master = self._sync_master
else:
self._slave_pipe = ctx.sync_master.register_slave(copy_id)
def _data_parallel_master(self, intermediates):
"""Reduce the sum and square-sum, compute the statistics, and broadcast it."""
# Always using same "device order" makes the ReduceAdd operation faster.
# Thanks to:: Tete Xiao (http://tetexiao.com/)
intermediates = sorted(intermediates, key=lambda i: i[1].sum.get_device())
to_reduce = [i[1][:2] for i in intermediates]
to_reduce = [j for i in to_reduce for j in i] # flatten
target_gpus = [i[1].sum.get_device() for i in intermediates]
sum_size = sum([i[1].sum_size for i in intermediates])
sum_, ssum = ReduceAddCoalesced.apply(target_gpus[0], 2, *to_reduce)
mean, inv_std = self._compute_mean_std(sum_, ssum, sum_size)
broadcasted = Broadcast.apply(target_gpus, mean, inv_std)
# print('a')
# print(type(sum_), type(ssum), type(sum_size), sum_.shape, ssum.shape, sum_size)
# broadcasted = Broadcast.apply(target_gpus, sum_, ssum, torch.tensor(sum_size).float().to(sum_.device))
# print('b')
outputs = []
for i, rec in enumerate(intermediates):
outputs.append((rec[0], _MasterMessage(*broadcasted[i*2:i*2+2])))
# outputs.append((rec[0], _MasterMessage(*broadcasted[i*3:i*3+3])))
return outputs
def _compute_mean_std(self, sum_, ssum, size):
"""Compute the mean and standard-deviation with sum and square-sum. This method
also maintains the moving average on the master device."""
assert size > 1, 'BatchNorm computes unbiased standard-deviation, which requires size > 1.'
mean = sum_ / size
sumvar = ssum - sum_ * mean
unbias_var = sumvar / (size - 1)
bias_var = sumvar / size
self.running_mean = (1 - self.momentum) * self.running_mean + self.momentum * mean.data
self.running_var = (1 - self.momentum) * self.running_var + self.momentum * unbias_var.data
return mean, torch.rsqrt(bias_var + self.eps)
# return mean, bias_var.clamp(self.eps) ** -0.5
class SynchronizedBatchNorm1d(_SynchronizedBatchNorm):
r"""Applies Synchronized Batch Normalization over a 2d or 3d input that is seen as a
mini-batch.
.. math::
y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta
This module differs from the built-in PyTorch BatchNorm1d as the mean and
standard-deviation are reduced across all devices during training.
For example, when one uses `nn.DataParallel` to wrap the network during
training, PyTorch's implementation normalize the tensor on each device using
the statistics only on that device, which accelerated the computation and
is also easy to implement, but the statistics might be inaccurate.
Instead, in this synchronized version, the statistics will be computed
over all training samples distributed on multiple devices.
Note that, for one-GPU or CPU-only case, this module behaves exactly same
as the built-in PyTorch implementation.
The mean and standard-deviation are calculated per-dimension over
the mini-batches and gamma and beta are learnable parameter vectors
of size C (where C is the input size).
During training, this layer keeps a running estimate of its computed mean
and variance. The running sum is kept with a default momentum of 0.1.
During evaluation, this running mean/variance is used for normalization.
Because the BatchNorm is done over the `C` dimension, computing statistics
on `(N, L)` slices, it's common terminology to call this Temporal BatchNorm
Args:
num_features: num_features from an expected input of size
`batch_size x num_features [x width]`
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Default: 0.1
affine: a boolean value that when set to ``True``, gives the layer learnable
affine parameters. Default: ``True``
Shape:
- Input: :math:`(N, C)` or :math:`(N, C, L)`
- Output: :math:`(N, C)` or :math:`(N, C, L)` (same shape as input)
Examples:
>>> # With Learnable Parameters
>>> m = SynchronizedBatchNorm1d(100)
>>> # Without Learnable Parameters
>>> m = SynchronizedBatchNorm1d(100, affine=False)
>>> input = torch.autograd.Variable(torch.randn(20, 100))
>>> output = m(input)
"""
def _check_input_dim(self, input):
if input.dim() != 2 and input.dim() != 3:
raise ValueError('expected 2D or 3D input (got {}D input)'
.format(input.dim()))
super(SynchronizedBatchNorm1d, self)._check_input_dim(input)
class SynchronizedBatchNorm2d(_SynchronizedBatchNorm):
r"""Applies Batch Normalization over a 4d input that is seen as a mini-batch
of 3d inputs
.. math::
y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta
This module differs from the built-in PyTorch BatchNorm2d as the mean and
standard-deviation are reduced across all devices during training.
For example, when one uses `nn.DataParallel` to wrap the network during
training, PyTorch's implementation normalize the tensor on each device using
the statistics only on that device, which accelerated the computation and
is also easy to implement, but the statistics might be inaccurate.
Instead, in this synchronized version, the statistics will be computed
over all training samples distributed on multiple devices.
Note that, for one-GPU or CPU-only case, this module behaves exactly same
as the built-in PyTorch implementation.
The mean and standard-deviation are calculated per-dimension over
the mini-batches and gamma and beta are learnable parameter vectors
of size C (where C is the input size).
During training, this layer keeps a running estimate of its computed mean
and variance. The running sum is kept with a default momentum of 0.1.
During evaluation, this running mean/variance is used for normalization.
Because the BatchNorm is done over the `C` dimension, computing statistics
on `(N, H, W)` slices, it's common terminology to call this Spatial BatchNorm
Args:
num_features: num_features from an expected input of
size batch_size x num_features x height x width
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Default: 0.1
affine: a boolean value that when set to ``True``, gives the layer learnable
affine parameters. Default: ``True``
Shape:
- Input: :math:`(N, C, H, W)`
- Output: :math:`(N, C, H, W)` (same shape as input)
Examples:
>>> # With Learnable Parameters
>>> m = SynchronizedBatchNorm2d(100)
>>> # Without Learnable Parameters
>>> m = SynchronizedBatchNorm2d(100, affine=False)
>>> input = torch.autograd.Variable(torch.randn(20, 100, 35, 45))
>>> output = m(input)
"""
def _check_input_dim(self, input):
if input.dim() != 4:
raise ValueError('expected 4D input (got {}D input)'
.format(input.dim()))
super(SynchronizedBatchNorm2d, self)._check_input_dim(input)
class SynchronizedBatchNorm3d(_SynchronizedBatchNorm):
r"""Applies Batch Normalization over a 5d input that is seen as a mini-batch
of 4d inputs
.. math::
y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta
This module differs from the built-in PyTorch BatchNorm3d as the mean and
standard-deviation are reduced across all devices during training.
For example, when one uses `nn.DataParallel` to wrap the network during
training, PyTorch's implementation normalize the tensor on each device using
the statistics only on that device, which accelerated the computation and
is also easy to implement, but the statistics might be inaccurate.
Instead, in this synchronized version, the statistics will be computed
over all training samples distributed on multiple devices.
Note that, for one-GPU or CPU-only case, this module behaves exactly same
as the built-in PyTorch implementation.
The mean and standard-deviation are calculated per-dimension over
the mini-batches and gamma and beta are learnable parameter vectors
of size C (where C is the input size).
During training, this layer keeps a running estimate of its computed mean
and variance. The running sum is kept with a default momentum of 0.1.
During evaluation, this running mean/variance is used for normalization.
Because the BatchNorm is done over the `C` dimension, computing statistics
on `(N, D, H, W)` slices, it's common terminology to call this Volumetric BatchNorm
or Spatio-temporal BatchNorm
Args:
num_features: num_features from an expected input of
size batch_size x num_features x depth x height x width
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Default: 0.1
affine: a boolean value that when set to ``True``, gives the layer learnable
affine parameters. Default: ``True``
Shape:
- Input: :math:`(N, C, D, H, W)`
- Output: :math:`(N, C, D, H, W)` (same shape as input)
Examples:
>>> # With Learnable Parameters
>>> m = SynchronizedBatchNorm3d(100)
>>> # Without Learnable Parameters
>>> m = SynchronizedBatchNorm3d(100, affine=False)
>>> input = torch.autograd.Variable(torch.randn(20, 100, 35, 45, 10))
>>> output = m(input)
"""
def _check_input_dim(self, input):
if input.dim() != 5:
raise ValueError('expected 5D input (got {}D input)'
.format(input.dim()))
super(SynchronizedBatchNorm3d, self)._check_input_dim(input) | 14,882 | 41.644699 | 159 | py |
adcgan | adcgan-main/BigGAN-PyTorch/sync_batchnorm/batchnorm_reimpl.py | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File : batchnorm_reimpl.py
# Author : acgtyrant
# Date : 11/01/2018
#
# This file is part of Synchronized-BatchNorm-PyTorch.
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
# Distributed under MIT License.
import torch
import torch.nn as nn
import torch.nn.init as init
__all__ = ['BatchNormReimpl']
class BatchNorm2dReimpl(nn.Module):
"""
A re-implementation of batch normalization, used for testing the numerical
stability.
Author: acgtyrant
See also:
https://github.com/vacancy/Synchronized-BatchNorm-PyTorch/issues/14
"""
def __init__(self, num_features, eps=1e-5, momentum=0.1):
super().__init__()
self.num_features = num_features
self.eps = eps
self.momentum = momentum
self.weight = nn.Parameter(torch.empty(num_features))
self.bias = nn.Parameter(torch.empty(num_features))
self.register_buffer('running_mean', torch.zeros(num_features))
self.register_buffer('running_var', torch.ones(num_features))
self.reset_parameters()
def reset_running_stats(self):
self.running_mean.zero_()
self.running_var.fill_(1)
def reset_parameters(self):
self.reset_running_stats()
init.uniform_(self.weight)
init.zeros_(self.bias)
def forward(self, input_):
batchsize, channels, height, width = input_.size()
numel = batchsize * height * width
input_ = input_.permute(1, 0, 2, 3).contiguous().view(channels, numel)
sum_ = input_.sum(1)
sum_of_square = input_.pow(2).sum(1)
mean = sum_ / numel
sumvar = sum_of_square - sum_ * mean
self.running_mean = (
(1 - self.momentum) * self.running_mean
+ self.momentum * mean.detach()
)
unbias_var = sumvar / (numel - 1)
self.running_var = (
(1 - self.momentum) * self.running_var
+ self.momentum * unbias_var.detach()
)
bias_var = sumvar / numel
inv_std = 1 / (bias_var + self.eps).pow(0.5)
output = (
(input_ - mean.unsqueeze(1)) * inv_std.unsqueeze(1) *
self.weight.unsqueeze(1) + self.bias.unsqueeze(1))
return output.view(channels, batchsize, height, width).permute(1, 0, 2, 3).contiguous()
| 2,383 | 30.786667 | 95 | py |
adcgan | adcgan-main/BigGAN-PyTorch/sync_batchnorm/comm.py | # -*- coding: utf-8 -*-
# File : comm.py
# Author : Jiayuan Mao
# Email : [email protected]
# Date : 27/01/2018
#
# This file is part of Synchronized-BatchNorm-PyTorch.
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
# Distributed under MIT License.
import queue
import collections
import threading
__all__ = ['FutureResult', 'SlavePipe', 'SyncMaster']
class FutureResult(object):
"""A thread-safe future implementation. Used only as one-to-one pipe."""
def __init__(self):
self._result = None
self._lock = threading.Lock()
self._cond = threading.Condition(self._lock)
def put(self, result):
with self._lock:
assert self._result is None, 'Previous result has\'t been fetched.'
self._result = result
self._cond.notify()
def get(self):
with self._lock:
if self._result is None:
self._cond.wait()
res = self._result
self._result = None
return res
_MasterRegistry = collections.namedtuple('MasterRegistry', ['result'])
_SlavePipeBase = collections.namedtuple('_SlavePipeBase', ['identifier', 'queue', 'result'])
class SlavePipe(_SlavePipeBase):
"""Pipe for master-slave communication."""
def run_slave(self, msg):
self.queue.put((self.identifier, msg))
ret = self.result.get()
self.queue.put(True)
return ret
class SyncMaster(object):
"""An abstract `SyncMaster` object.
- During the replication, as the data parallel will trigger an callback of each module, all slave devices should
call `register(id)` and obtain an `SlavePipe` to communicate with the master.
- During the forward pass, master device invokes `run_master`, all messages from slave devices will be collected,
and passed to a registered callback.
- After receiving the messages, the master device should gather the information and determine to message passed
back to each slave devices.
"""
def __init__(self, master_callback):
"""
Args:
master_callback: a callback to be invoked after having collected messages from slave devices.
"""
self._master_callback = master_callback
self._queue = queue.Queue()
self._registry = collections.OrderedDict()
self._activated = False
def __getstate__(self):
return {'master_callback': self._master_callback}
def __setstate__(self, state):
self.__init__(state['master_callback'])
def register_slave(self, identifier):
"""
Register an slave device.
Args:
identifier: an identifier, usually is the device id.
Returns: a `SlavePipe` object which can be used to communicate with the master device.
"""
if self._activated:
assert self._queue.empty(), 'Queue is not clean before next initialization.'
self._activated = False
self._registry.clear()
future = FutureResult()
self._registry[identifier] = _MasterRegistry(future)
return SlavePipe(identifier, self._queue, future)
def run_master(self, master_msg):
"""
Main entry for the master device in each forward pass.
The messages were first collected from each devices (including the master device), and then
an callback will be invoked to compute the message to be sent back to each devices
(including the master device).
Args:
master_msg: the message that the master want to send to itself. This will be placed as the first
message when calling `master_callback`. For detailed usage, see `_SynchronizedBatchNorm` for an example.
Returns: the message to be sent back to the master device.
"""
self._activated = True
intermediates = [(0, master_msg)]
for i in range(self.nr_slaves):
intermediates.append(self._queue.get())
results = self._master_callback(intermediates)
assert results[0][0] == 0, 'The first result should belongs to the master.'
for i, res in results:
if i == 0:
continue
self._registry[i].result.put(res)
for i in range(self.nr_slaves):
assert self._queue.get() is True
return results[0][1]
@property
def nr_slaves(self):
return len(self._registry)
| 4,449 | 31.246377 | 117 | py |
adcgan | adcgan-main/BigGAN-PyTorch/sync_batchnorm/__init__.py | # -*- coding: utf-8 -*-
# File : __init__.py
# Author : Jiayuan Mao
# Email : [email protected]
# Date : 27/01/2018
#
# This file is part of Synchronized-BatchNorm-PyTorch.
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
# Distributed under MIT License.
from .batchnorm import SynchronizedBatchNorm1d, SynchronizedBatchNorm2d, SynchronizedBatchNorm3d
from .replicate import DataParallelWithCallback, patch_replication_callback
| 449 | 33.615385 | 96 | py |
adcgan | adcgan-main/BigGAN-PyTorch/TFHub/biggan_v1.py | # BigGAN V1:
# This is now deprecated code used for porting the TFHub modules to pytorch,
# included here for reference only.
import numpy as np
import torch
from scipy.stats import truncnorm
from torch import nn
from torch.nn import Parameter
from torch.nn import functional as F
def l2normalize(v, eps=1e-4):
return v / (v.norm() + eps)
def truncated_z_sample(batch_size, z_dim, truncation=0.5, seed=None):
state = None if seed is None else np.random.RandomState(seed)
values = truncnorm.rvs(-2, 2, size=(batch_size, z_dim), random_state=state)
return truncation * values
def denorm(x):
out = (x + 1) / 2
return out.clamp_(0, 1)
class SpectralNorm(nn.Module):
def __init__(self, module, name='weight', power_iterations=1):
super(SpectralNorm, self).__init__()
self.module = module
self.name = name
self.power_iterations = power_iterations
if not self._made_params():
self._make_params()
def _update_u_v(self):
u = getattr(self.module, self.name + "_u")
v = getattr(self.module, self.name + "_v")
w = getattr(self.module, self.name + "_bar")
height = w.data.shape[0]
_w = w.view(height, -1)
for _ in range(self.power_iterations):
v = l2normalize(torch.matmul(_w.t(), u))
u = l2normalize(torch.matmul(_w, v))
sigma = u.dot((_w).mv(v))
setattr(self.module, self.name, w / sigma.expand_as(w))
def _made_params(self):
try:
getattr(self.module, self.name + "_u")
getattr(self.module, self.name + "_v")
getattr(self.module, self.name + "_bar")
return True
except AttributeError:
return False
def _make_params(self):
w = getattr(self.module, self.name)
height = w.data.shape[0]
width = w.view(height, -1).data.shape[1]
u = Parameter(w.data.new(height).normal_(0, 1), requires_grad=False)
v = Parameter(w.data.new(height).normal_(0, 1), requires_grad=False)
u.data = l2normalize(u.data)
v.data = l2normalize(v.data)
w_bar = Parameter(w.data)
del self.module._parameters[self.name]
self.module.register_parameter(self.name + "_u", u)
self.module.register_parameter(self.name + "_v", v)
self.module.register_parameter(self.name + "_bar", w_bar)
def forward(self, *args):
self._update_u_v()
return self.module.forward(*args)
class SelfAttention(nn.Module):
""" Self Attention Layer"""
def __init__(self, in_dim, activation=F.relu):
super().__init__()
self.chanel_in = in_dim
self.activation = activation
self.theta = SpectralNorm(nn.Conv2d(in_channels=in_dim, out_channels=in_dim // 8, kernel_size=1, bias=False))
self.phi = SpectralNorm(nn.Conv2d(in_channels=in_dim, out_channels=in_dim // 8, kernel_size=1, bias=False))
self.pool = nn.MaxPool2d(2, 2)
self.g = SpectralNorm(nn.Conv2d(in_channels=in_dim, out_channels=in_dim // 2, kernel_size=1, bias=False))
self.o_conv = SpectralNorm(nn.Conv2d(in_channels=in_dim // 2, out_channels=in_dim, kernel_size=1, bias=False))
self.gamma = nn.Parameter(torch.zeros(1))
self.softmax = nn.Softmax(dim=-1)
def forward(self, x):
m_batchsize, C, width, height = x.size()
N = height * width
theta = self.theta(x)
phi = self.phi(x)
phi = self.pool(phi)
phi = phi.view(m_batchsize, -1, N // 4)
theta = theta.view(m_batchsize, -1, N)
theta = theta.permute(0, 2, 1)
attention = self.softmax(torch.bmm(theta, phi))
g = self.pool(self.g(x)).view(m_batchsize, -1, N // 4)
attn_g = torch.bmm(g, attention.permute(0, 2, 1)).view(m_batchsize, -1, width, height)
out = self.o_conv(attn_g)
return self.gamma * out + x
class ConditionalBatchNorm2d(nn.Module):
def __init__(self, num_features, num_classes, eps=1e-4, momentum=0.1):
super().__init__()
self.num_features = num_features
self.bn = nn.BatchNorm2d(num_features, affine=False, eps=eps, momentum=momentum)
self.gamma_embed = SpectralNorm(nn.Linear(num_classes, num_features, bias=False))
self.beta_embed = SpectralNorm(nn.Linear(num_classes, num_features, bias=False))
def forward(self, x, y):
out = self.bn(x)
gamma = self.gamma_embed(y) + 1
beta = self.beta_embed(y)
out = gamma.view(-1, self.num_features, 1, 1) * out + beta.view(-1, self.num_features, 1, 1)
return out
class GBlock(nn.Module):
def __init__(
self,
in_channel,
out_channel,
kernel_size=[3, 3],
padding=1,
stride=1,
n_class=None,
bn=True,
activation=F.relu,
upsample=True,
downsample=False,
z_dim=148,
):
super().__init__()
self.conv0 = SpectralNorm(
nn.Conv2d(in_channel, out_channel, kernel_size, stride, padding, bias=True if bn else True)
)
self.conv1 = SpectralNorm(
nn.Conv2d(out_channel, out_channel, kernel_size, stride, padding, bias=True if bn else True)
)
self.skip_proj = False
if in_channel != out_channel or upsample or downsample:
self.conv_sc = SpectralNorm(nn.Conv2d(in_channel, out_channel, 1, 1, 0))
self.skip_proj = True
self.upsample = upsample
self.downsample = downsample
self.activation = activation
self.bn = bn
if bn:
self.HyperBN = ConditionalBatchNorm2d(in_channel, z_dim)
self.HyperBN_1 = ConditionalBatchNorm2d(out_channel, z_dim)
def forward(self, input, condition=None):
out = input
if self.bn:
out = self.HyperBN(out, condition)
out = self.activation(out)
if self.upsample:
out = F.interpolate(out, scale_factor=2)
out = self.conv0(out)
if self.bn:
out = self.HyperBN_1(out, condition)
out = self.activation(out)
out = self.conv1(out)
if self.downsample:
out = F.avg_pool2d(out, 2)
if self.skip_proj:
skip = input
if self.upsample:
skip = F.interpolate(skip, scale_factor=2)
skip = self.conv_sc(skip)
if self.downsample:
skip = F.avg_pool2d(skip, 2)
else:
skip = input
return out + skip
class Generator128(nn.Module):
def __init__(self, code_dim=120, n_class=1000, chn=96, debug=False):
super().__init__()
self.linear = nn.Linear(n_class, 128, bias=False)
if debug:
chn = 8
self.first_view = 16 * chn
self.G_linear = SpectralNorm(nn.Linear(20, 4 * 4 * 16 * chn))
z_dim = code_dim + 28
self.GBlock = nn.ModuleList([
GBlock(16 * chn, 16 * chn, n_class=n_class, z_dim=z_dim),
GBlock(16 * chn, 8 * chn, n_class=n_class, z_dim=z_dim),
GBlock(8 * chn, 4 * chn, n_class=n_class, z_dim=z_dim),
GBlock(4 * chn, 2 * chn, n_class=n_class, z_dim=z_dim),
GBlock(2 * chn, 1 * chn, n_class=n_class, z_dim=z_dim),
])
self.sa_id = 4
self.num_split = len(self.GBlock) + 1
self.attention = SelfAttention(2 * chn)
self.ScaledCrossReplicaBN = nn.BatchNorm2d(1 * chn, eps=1e-4)
self.colorize = SpectralNorm(nn.Conv2d(1 * chn, 3, [3, 3], padding=1))
def forward(self, input, class_id):
codes = torch.chunk(input, self.num_split, 1)
class_emb = self.linear(class_id) # 128
out = self.G_linear(codes[0])
out = out.view(-1, 4, 4, self.first_view).permute(0, 3, 1, 2)
for i, (code, GBlock) in enumerate(zip(codes[1:], self.GBlock)):
if i == self.sa_id:
out = self.attention(out)
condition = torch.cat([code, class_emb], 1)
out = GBlock(out, condition)
out = self.ScaledCrossReplicaBN(out)
out = F.relu(out)
out = self.colorize(out)
return torch.tanh(out)
class Generator256(nn.Module):
def __init__(self, code_dim=140, n_class=1000, chn=96, debug=False):
super().__init__()
self.linear = nn.Linear(n_class, 128, bias=False)
if debug:
chn = 8
self.first_view = 16 * chn
self.G_linear = SpectralNorm(nn.Linear(20, 4 * 4 * 16 * chn))
self.GBlock = nn.ModuleList([
GBlock(16 * chn, 16 * chn, n_class=n_class),
GBlock(16 * chn, 8 * chn, n_class=n_class),
GBlock(8 * chn, 8 * chn, n_class=n_class),
GBlock(8 * chn, 4 * chn, n_class=n_class),
GBlock(4 * chn, 2 * chn, n_class=n_class),
GBlock(2 * chn, 1 * chn, n_class=n_class),
])
self.sa_id = 5
self.num_split = len(self.GBlock) + 1
self.attention = SelfAttention(2 * chn)
self.ScaledCrossReplicaBN = nn.BatchNorm2d(1 * chn, eps=1e-4)
self.colorize = SpectralNorm(nn.Conv2d(1 * chn, 3, [3, 3], padding=1))
def forward(self, input, class_id):
codes = torch.chunk(input, self.num_split, 1)
class_emb = self.linear(class_id) # 128
out = self.G_linear(codes[0])
out = out.view(-1, 4, 4, self.first_view).permute(0, 3, 1, 2)
for i, (code, GBlock) in enumerate(zip(codes[1:], self.GBlock)):
if i == self.sa_id:
out = self.attention(out)
condition = torch.cat([code, class_emb], 1)
out = GBlock(out, condition)
out = self.ScaledCrossReplicaBN(out)
out = F.relu(out)
out = self.colorize(out)
return torch.tanh(out)
class Generator512(nn.Module):
def __init__(self, code_dim=128, n_class=1000, chn=96, debug=False):
super().__init__()
self.linear = nn.Linear(n_class, 128, bias=False)
if debug:
chn = 8
self.first_view = 16 * chn
self.G_linear = SpectralNorm(nn.Linear(16, 4 * 4 * 16 * chn))
z_dim = code_dim + 16
self.GBlock = nn.ModuleList([
GBlock(16 * chn, 16 * chn, n_class=n_class, z_dim=z_dim),
GBlock(16 * chn, 8 * chn, n_class=n_class, z_dim=z_dim),
GBlock(8 * chn, 8 * chn, n_class=n_class, z_dim=z_dim),
GBlock(8 * chn, 4 * chn, n_class=n_class, z_dim=z_dim),
GBlock(4 * chn, 2 * chn, n_class=n_class, z_dim=z_dim),
GBlock(2 * chn, 1 * chn, n_class=n_class, z_dim=z_dim),
GBlock(1 * chn, 1 * chn, n_class=n_class, z_dim=z_dim),
])
self.sa_id = 4
self.num_split = len(self.GBlock) + 1
self.attention = SelfAttention(4 * chn)
self.ScaledCrossReplicaBN = nn.BatchNorm2d(1 * chn)
self.colorize = SpectralNorm(nn.Conv2d(1 * chn, 3, [3, 3], padding=1))
def forward(self, input, class_id):
codes = torch.chunk(input, self.num_split, 1)
class_emb = self.linear(class_id) # 128
out = self.G_linear(codes[0])
out = out.view(-1, 4, 4, self.first_view).permute(0, 3, 1, 2)
for i, (code, GBlock) in enumerate(zip(codes[1:], self.GBlock)):
if i == self.sa_id:
out = self.attention(out)
condition = torch.cat([code, class_emb], 1)
out = GBlock(out, condition)
out = self.ScaledCrossReplicaBN(out)
out = F.relu(out)
out = self.colorize(out)
return torch.tanh(out)
class Discriminator(nn.Module):
def __init__(self, n_class=1000, chn=96, debug=False):
super().__init__()
def conv(in_channel, out_channel, downsample=True):
return GBlock(in_channel, out_channel, bn=False, upsample=False, downsample=downsample)
if debug:
chn = 8
self.debug = debug
self.pre_conv = nn.Sequential(
SpectralNorm(nn.Conv2d(3, 1 * chn, 3, padding=1)),
nn.ReLU(),
SpectralNorm(nn.Conv2d(1 * chn, 1 * chn, 3, padding=1)),
nn.AvgPool2d(2),
)
self.pre_skip = SpectralNorm(nn.Conv2d(3, 1 * chn, 1))
self.conv = nn.Sequential(
conv(1 * chn, 1 * chn, downsample=True),
conv(1 * chn, 2 * chn, downsample=True),
SelfAttention(2 * chn),
conv(2 * chn, 2 * chn, downsample=True),
conv(2 * chn, 4 * chn, downsample=True),
conv(4 * chn, 8 * chn, downsample=True),
conv(8 * chn, 8 * chn, downsample=True),
conv(8 * chn, 16 * chn, downsample=True),
conv(16 * chn, 16 * chn, downsample=False),
)
self.linear = SpectralNorm(nn.Linear(16 * chn, 1))
self.embed = nn.Embedding(n_class, 16 * chn)
self.embed.weight.data.uniform_(-0.1, 0.1)
self.embed = SpectralNorm(self.embed)
def forward(self, input, class_id):
out = self.pre_conv(input)
out += self.pre_skip(F.avg_pool2d(input, 2))
out = self.conv(out)
out = F.relu(out)
out = out.view(out.size(0), out.size(1), -1)
out = out.sum(2)
out_linear = self.linear(out).squeeze(1)
embed = self.embed(class_id)
prod = (out * embed).sum(1)
return out_linear + prod | 12,173 | 30.29563 | 114 | py |
adcgan | adcgan-main/BigGAN-PyTorch/TFHub/converter.py | """Utilities for converting TFHub BigGAN generator weights to PyTorch.
Recommended usage:
To convert all BigGAN variants and generate test samples, use:
```bash
CUDA_VISIBLE_DEVICES=0 python converter.py --generate_samples
```
See `parse_args` for additional options.
"""
import argparse
import os
import sys
import h5py
import torch
import torch.nn as nn
from torchvision.utils import save_image
import tensorflow as tf
import tensorflow_hub as hub
import parse
# import reference biggan from this folder
import biggan_v1 as biggan_for_conversion
# Import model from main folder
sys.path.append('..')
import BigGAN
DEVICE = 'cuda'
HDF5_TMPL = 'biggan-{}.h5'
PTH_TMPL = 'biggan-{}.pth'
MODULE_PATH_TMPL = 'https://tfhub.dev/deepmind/biggan-{}/2'
Z_DIMS = {
128: 120,
256: 140,
512: 128}
RESOLUTIONS = list(Z_DIMS)
def dump_tfhub_to_hdf5(module_path, hdf5_path, redownload=False):
"""Loads TFHub weights and saves them to intermediate HDF5 file.
Args:
module_path ([Path-like]): Path to TFHub module.
hdf5_path ([Path-like]): Path to output HDF5 file.
Returns:
[h5py.File]: Loaded hdf5 file containing module weights.
"""
if os.path.exists(hdf5_path) and (not redownload):
print('Loading BigGAN hdf5 file from:', hdf5_path)
return h5py.File(hdf5_path, 'r')
print('Loading BigGAN module from:', module_path)
tf.reset_default_graph()
hub.Module(module_path)
print('Loaded BigGAN module from:', module_path)
initializer = tf.global_variables_initializer()
sess = tf.Session()
sess.run(initializer)
print('Saving BigGAN weights to :', hdf5_path)
h5f = h5py.File(hdf5_path, 'w')
for var in tf.global_variables():
val = sess.run(var)
h5f.create_dataset(var.name, data=val)
print(f'Saving {var.name} with shape {val.shape}')
h5f.close()
return h5py.File(hdf5_path, 'r')
class TFHub2Pytorch(object):
TF_ROOT = 'module'
NUM_GBLOCK = {
128: 5,
256: 6,
512: 7
}
w = 'w'
b = 'b'
u = 'u0'
v = 'u1'
gamma = 'gamma'
beta = 'beta'
def __init__(self, state_dict, tf_weights, resolution=256, load_ema=True, verbose=False):
self.state_dict = state_dict
self.tf_weights = tf_weights
self.resolution = resolution
self.verbose = verbose
if load_ema:
for name in ['w', 'b', 'gamma', 'beta']:
setattr(self, name, getattr(self, name) + '/ema_b999900')
def load(self):
self.load_generator()
return self.state_dict
def load_generator(self):
GENERATOR_ROOT = os.path.join(self.TF_ROOT, 'Generator')
for i in range(self.NUM_GBLOCK[self.resolution]):
name_tf = os.path.join(GENERATOR_ROOT, 'GBlock')
name_tf += f'_{i}' if i != 0 else ''
self.load_GBlock(f'GBlock.{i}.', name_tf)
self.load_attention('attention.', os.path.join(GENERATOR_ROOT, 'attention'))
self.load_linear('linear', os.path.join(self.TF_ROOT, 'linear'), bias=False)
self.load_snlinear('G_linear', os.path.join(GENERATOR_ROOT, 'G_Z', 'G_linear'))
self.load_colorize('colorize', os.path.join(GENERATOR_ROOT, 'conv_2d'))
self.load_ScaledCrossReplicaBNs('ScaledCrossReplicaBN',
os.path.join(GENERATOR_ROOT, 'ScaledCrossReplicaBN'))
def load_linear(self, name_pth, name_tf, bias=True):
self.state_dict[name_pth + '.weight'] = self.load_tf_tensor(name_tf, self.w).permute(1, 0)
if bias:
self.state_dict[name_pth + '.bias'] = self.load_tf_tensor(name_tf, self.b)
def load_snlinear(self, name_pth, name_tf, bias=True):
self.state_dict[name_pth + '.module.weight_u'] = self.load_tf_tensor(name_tf, self.u).squeeze()
self.state_dict[name_pth + '.module.weight_v'] = self.load_tf_tensor(name_tf, self.v).squeeze()
self.state_dict[name_pth + '.module.weight_bar'] = self.load_tf_tensor(name_tf, self.w).permute(1, 0)
if bias:
self.state_dict[name_pth + '.module.bias'] = self.load_tf_tensor(name_tf, self.b)
def load_colorize(self, name_pth, name_tf):
self.load_snconv(name_pth, name_tf)
def load_GBlock(self, name_pth, name_tf):
self.load_convs(name_pth, name_tf)
self.load_HyperBNs(name_pth, name_tf)
def load_convs(self, name_pth, name_tf):
self.load_snconv(name_pth + 'conv0', os.path.join(name_tf, 'conv0'))
self.load_snconv(name_pth + 'conv1', os.path.join(name_tf, 'conv1'))
self.load_snconv(name_pth + 'conv_sc', os.path.join(name_tf, 'conv_sc'))
def load_snconv(self, name_pth, name_tf, bias=True):
if self.verbose:
print(f'loading: {name_pth} from {name_tf}')
self.state_dict[name_pth + '.module.weight_u'] = self.load_tf_tensor(name_tf, self.u).squeeze()
self.state_dict[name_pth + '.module.weight_v'] = self.load_tf_tensor(name_tf, self.v).squeeze()
self.state_dict[name_pth + '.module.weight_bar'] = self.load_tf_tensor(name_tf, self.w).permute(3, 2, 0, 1)
if bias:
self.state_dict[name_pth + '.module.bias'] = self.load_tf_tensor(name_tf, self.b).squeeze()
def load_conv(self, name_pth, name_tf, bias=True):
self.state_dict[name_pth + '.weight_u'] = self.load_tf_tensor(name_tf, self.u).squeeze()
self.state_dict[name_pth + '.weight_v'] = self.load_tf_tensor(name_tf, self.v).squeeze()
self.state_dict[name_pth + '.weight_bar'] = self.load_tf_tensor(name_tf, self.w).permute(3, 2, 0, 1)
if bias:
self.state_dict[name_pth + '.bias'] = self.load_tf_tensor(name_tf, self.b)
def load_HyperBNs(self, name_pth, name_tf):
self.load_HyperBN(name_pth + 'HyperBN', os.path.join(name_tf, 'HyperBN'))
self.load_HyperBN(name_pth + 'HyperBN_1', os.path.join(name_tf, 'HyperBN_1'))
def load_ScaledCrossReplicaBNs(self, name_pth, name_tf):
self.state_dict[name_pth + '.bias'] = self.load_tf_tensor(name_tf, self.beta).squeeze()
self.state_dict[name_pth + '.weight'] = self.load_tf_tensor(name_tf, self.gamma).squeeze()
self.state_dict[name_pth + '.running_mean'] = self.load_tf_tensor(name_tf + 'bn', 'accumulated_mean')
self.state_dict[name_pth + '.running_var'] = self.load_tf_tensor(name_tf + 'bn', 'accumulated_var')
self.state_dict[name_pth + '.num_batches_tracked'] = torch.tensor(
self.tf_weights[os.path.join(name_tf + 'bn', 'accumulation_counter:0')][()], dtype=torch.float32)
def load_HyperBN(self, name_pth, name_tf):
if self.verbose:
print(f'loading: {name_pth} from {name_tf}')
beta = name_pth + '.beta_embed.module'
gamma = name_pth + '.gamma_embed.module'
self.state_dict[beta + '.weight_u'] = self.load_tf_tensor(os.path.join(name_tf, 'beta'), self.u).squeeze()
self.state_dict[gamma + '.weight_u'] = self.load_tf_tensor(os.path.join(name_tf, 'gamma'), self.u).squeeze()
self.state_dict[beta + '.weight_v'] = self.load_tf_tensor(os.path.join(name_tf, 'beta'), self.v).squeeze()
self.state_dict[gamma + '.weight_v'] = self.load_tf_tensor(os.path.join(name_tf, 'gamma'), self.v).squeeze()
self.state_dict[beta + '.weight_bar'] = self.load_tf_tensor(os.path.join(name_tf, 'beta'), self.w).permute(1, 0)
self.state_dict[gamma +
'.weight_bar'] = self.load_tf_tensor(os.path.join(name_tf, 'gamma'), self.w).permute(1, 0)
cr_bn_name = name_tf.replace('HyperBN', 'CrossReplicaBN')
self.state_dict[name_pth + '.bn.running_mean'] = self.load_tf_tensor(cr_bn_name, 'accumulated_mean')
self.state_dict[name_pth + '.bn.running_var'] = self.load_tf_tensor(cr_bn_name, 'accumulated_var')
self.state_dict[name_pth + '.bn.num_batches_tracked'] = torch.tensor(
self.tf_weights[os.path.join(cr_bn_name, 'accumulation_counter:0')][()], dtype=torch.float32)
def load_attention(self, name_pth, name_tf):
self.load_snconv(name_pth + 'theta', os.path.join(name_tf, 'theta'), bias=False)
self.load_snconv(name_pth + 'phi', os.path.join(name_tf, 'phi'), bias=False)
self.load_snconv(name_pth + 'g', os.path.join(name_tf, 'g'), bias=False)
self.load_snconv(name_pth + 'o_conv', os.path.join(name_tf, 'o_conv'), bias=False)
self.state_dict[name_pth + 'gamma'] = self.load_tf_tensor(name_tf, self.gamma)
def load_tf_tensor(self, prefix, var, device='0'):
name = os.path.join(prefix, var) + f':{device}'
return torch.from_numpy(self.tf_weights[name][:])
# Convert from v1: This function maps
def convert_from_v1(hub_dict, resolution=128):
weightname_dict = {'weight_u': 'u0', 'weight_bar': 'weight', 'bias': 'bias'}
convnum_dict = {'conv0': 'conv1', 'conv1': 'conv2', 'conv_sc': 'conv_sc'}
attention_blocknum = {128: 3, 256: 4, 512: 3}[resolution]
hub2me = {'linear.weight': 'shared.weight', # This is actually the shared weight
# Linear stuff
'G_linear.module.weight_bar': 'linear.weight',
'G_linear.module.bias': 'linear.bias',
'G_linear.module.weight_u': 'linear.u0',
# output layer stuff
'ScaledCrossReplicaBN.weight': 'output_layer.0.gain',
'ScaledCrossReplicaBN.bias': 'output_layer.0.bias',
'ScaledCrossReplicaBN.running_mean': 'output_layer.0.stored_mean',
'ScaledCrossReplicaBN.running_var': 'output_layer.0.stored_var',
'colorize.module.weight_bar': 'output_layer.2.weight',
'colorize.module.bias': 'output_layer.2.bias',
'colorize.module.weight_u': 'output_layer.2.u0',
# Attention stuff
'attention.gamma': 'blocks.%d.1.gamma' % attention_blocknum,
'attention.theta.module.weight_u': 'blocks.%d.1.theta.u0' % attention_blocknum,
'attention.theta.module.weight_bar': 'blocks.%d.1.theta.weight' % attention_blocknum,
'attention.phi.module.weight_u': 'blocks.%d.1.phi.u0' % attention_blocknum,
'attention.phi.module.weight_bar': 'blocks.%d.1.phi.weight' % attention_blocknum,
'attention.g.module.weight_u': 'blocks.%d.1.g.u0' % attention_blocknum,
'attention.g.module.weight_bar': 'blocks.%d.1.g.weight' % attention_blocknum,
'attention.o_conv.module.weight_u': 'blocks.%d.1.o.u0' % attention_blocknum,
'attention.o_conv.module.weight_bar':'blocks.%d.1.o.weight' % attention_blocknum,
}
# Loop over the hub dict and build the hub2me map
for name in hub_dict.keys():
if 'GBlock' in name:
if 'HyperBN' not in name: # it's a conv
out = parse.parse('GBlock.{:d}.{}.module.{}',name)
blocknum, convnum, weightname = out
if weightname not in weightname_dict:
continue # else hyperBN in
out_name = 'blocks.%d.0.%s.%s' % (blocknum, convnum_dict[convnum], weightname_dict[weightname]) # Increment conv number by 1
else: # hyperbn not conv
BNnum = 2 if 'HyperBN_1' in name else 1
if 'embed' in name:
out = parse.parse('GBlock.{:d}.{}.module.{}',name)
blocknum, gamma_or_beta, weightname = out
if weightname not in weightname_dict: # Ignore weight_v
continue
out_name = 'blocks.%d.0.bn%d.%s.%s' % (blocknum, BNnum, 'gain' if 'gamma' in gamma_or_beta else 'bias', weightname_dict[weightname])
else:
out = parse.parse('GBlock.{:d}.{}.bn.{}',name)
blocknum, dummy, mean_or_var = out
if 'num_batches_tracked' in mean_or_var:
continue
out_name = 'blocks.%d.0.bn%d.%s' % (blocknum, BNnum, 'stored_mean' if 'mean' in mean_or_var else 'stored_var')
hub2me[name] = out_name
# Invert the hub2me map
me2hub = {hub2me[item]: item for item in hub2me}
new_dict = {}
dimz_dict = {128: 20, 256: 20, 512:16}
for item in me2hub:
# Swap input dim ordering on batchnorm bois to account for my arbitrary change of ordering when concatenating Ys and Zs
if ('bn' in item and 'weight' in item) and ('gain' in item or 'bias' in item) and ('output_layer' not in item):
new_dict[item] = torch.cat([hub_dict[me2hub[item]][:, -128:], hub_dict[me2hub[item]][:, :dimz_dict[resolution]]], 1)
# Reshape the first linear weight, bias, and u0
elif item == 'linear.weight':
new_dict[item] = hub_dict[me2hub[item]].contiguous().view(4, 4, 96 * 16, -1).permute(2,0,1,3).contiguous().view(-1,dimz_dict[resolution])
elif item == 'linear.bias':
new_dict[item] = hub_dict[me2hub[item]].view(4, 4, 96 * 16).permute(2,0,1).contiguous().view(-1)
elif item == 'linear.u0':
new_dict[item] = hub_dict[me2hub[item]].view(4, 4, 96 * 16).permute(2,0,1).contiguous().view(1, -1)
elif me2hub[item] == 'linear.weight': # THIS IS THE SHARED WEIGHT NOT THE FIRST LINEAR LAYER
# Transpose shared weight so that it's an embedding
new_dict[item] = hub_dict[me2hub[item]].t()
elif 'weight_u' in me2hub[item]: # Unsqueeze u0s
new_dict[item] = hub_dict[me2hub[item]].unsqueeze(0)
else:
new_dict[item] = hub_dict[me2hub[item]]
return new_dict
def get_config(resolution):
attn_dict = {128: '64', 256: '128', 512: '64'}
dim_z_dict = {128: 120, 256: 140, 512: 128}
config = {'G_param': 'SN', 'D_param': 'SN',
'G_ch': 96, 'D_ch': 96,
'D_wide': True, 'G_shared': True,
'shared_dim': 128, 'dim_z': dim_z_dict[resolution],
'hier': True, 'cross_replica': False,
'mybn': False, 'G_activation': nn.ReLU(inplace=True),
'G_attn': attn_dict[resolution],
'norm_style': 'bn',
'G_init': 'ortho', 'skip_init': True, 'no_optim': True,
'G_fp16': False, 'G_mixed_precision': False,
'accumulate_stats': False, 'num_standing_accumulations': 16,
'G_eval_mode': True,
'BN_eps': 1e-04, 'SN_eps': 1e-04,
'num_G_SVs': 1, 'num_G_SV_itrs': 1, 'resolution': resolution,
'n_classes': 1000}
return config
def convert_biggan(resolution, weight_dir, redownload=False, no_ema=False, verbose=False):
module_path = MODULE_PATH_TMPL.format(resolution)
hdf5_path = os.path.join(weight_dir, HDF5_TMPL.format(resolution))
pth_path = os.path.join(weight_dir, PTH_TMPL.format(resolution))
tf_weights = dump_tfhub_to_hdf5(module_path, hdf5_path, redownload=redownload)
G_temp = getattr(biggan_for_conversion, f'Generator{resolution}')()
state_dict_temp = G_temp.state_dict()
converter = TFHub2Pytorch(state_dict_temp, tf_weights, resolution=resolution,
load_ema=(not no_ema), verbose=verbose)
state_dict_v1 = converter.load()
state_dict = convert_from_v1(state_dict_v1, resolution)
# Get the config, build the model
config = get_config(resolution)
G = BigGAN.Generator(**config)
G.load_state_dict(state_dict, strict=False) # Ignore missing sv0 entries
torch.save(state_dict, pth_path)
# output_location ='pretrained_weights/TFHub-PyTorch-128.pth'
return G
def generate_sample(G, z_dim, batch_size, filename, parallel=False):
G.eval()
G.to(DEVICE)
with torch.no_grad():
z = torch.randn(batch_size, G.dim_z).to(DEVICE)
y = torch.randint(low=0, high=1000, size=(batch_size,),
device=DEVICE, dtype=torch.int64, requires_grad=False)
if parallel:
images = nn.parallel.data_parallel(G, (z, G.shared(y)))
else:
images = G(z, G.shared(y))
save_image(images, filename, scale_each=True, normalize=True)
def parse_args():
usage = 'Parser for conversion script.'
parser = argparse.ArgumentParser(description=usage)
parser.add_argument(
'--resolution', '-r', type=int, default=None, choices=[128, 256, 512],
help='Resolution of TFHub module to convert. Converts all resolutions if None.')
parser.add_argument(
'--redownload', action='store_true', default=False,
help='Redownload weights and overwrite current hdf5 file, if present.')
parser.add_argument(
'--weights_dir', type=str, default='pretrained_weights')
parser.add_argument(
'--samples_dir', type=str, default='pretrained_samples')
parser.add_argument(
'--no_ema', action='store_true', default=False,
help='Do not load ema weights.')
parser.add_argument(
'--verbose', action='store_true', default=False,
help='Additionally logging.')
parser.add_argument(
'--generate_samples', action='store_true', default=False,
help='Generate test sample with pretrained model.')
parser.add_argument(
'--batch_size', type=int, default=64,
help='Batch size used for test sample.')
parser.add_argument(
'--parallel', action='store_true', default=False,
help='Parallelize G?')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
os.makedirs(args.weights_dir, exist_ok=True)
os.makedirs(args.samples_dir, exist_ok=True)
if args.resolution is not None:
G = convert_biggan(args.resolution, args.weights_dir,
redownload=args.redownload,
no_ema=args.no_ema, verbose=args.verbose)
if args.generate_samples:
filename = os.path.join(args.samples_dir, f'biggan{args.resolution}_samples.jpg')
print('Generating samples...')
generate_sample(G, Z_DIMS[args.resolution], args.batch_size, filename, args.parallel)
else:
for res in RESOLUTIONS:
G = convert_biggan(res, args.weights_dir,
redownload=args.redownload,
no_ema=args.no_ema, verbose=args.verbose)
if args.generate_samples:
filename = os.path.join(args.samples_dir, f'biggan{res}_samples.jpg')
print('Generating samples...')
generate_sample(G, Z_DIMS[res], args.batch_size, filename, args.parallel) | 17,428 | 42.355721 | 143 | py |
DeepSpectrum | DeepSpectrum-master/setup.py | #!/usr/bin/env python
import re
import sys
import warnings
warnings.filterwarnings('ignore', category=DeprecationWarning)
warnings.filterwarnings('ignore', category=FutureWarning)
from setuptools import setup, find_packages
from subprocess import CalledProcessError, check_output
PROJECT = "DeepSpectrum"
VERSION = "0.6.9"
LICENSE = "GPLv3+"
AUTHOR = "Maurice Gerczuk"
AUTHOR_EMAIL = "[email protected]"
URL = 'https://github.com/DeepSpectrum/DeepSpectrum'
with open("DESCRIPTION.md", "r") as fh:
LONG_DESCRIPTION = fh.read()
install_requires = [
"audeep>=0.9.4",
"imread>=0.7.0",
"tqdm>=4.30.0",
"matplotlib>=3.3",
"numba==0.48.0",
"librosa>=0.7.0, <0.8.0",
"click>=7.0",
"Pillow >=6.0.0",
"tensorflow-gpu>=1.15.2, <2",
"opencv-python>=4.0.0.21",
"torch>=1.2.0",
"torchvision>=0.5.0"
]
tests_require = ['pytest>=4.4.1', 'pytest-cov>=2.7.1']
needs_pytest = {'pytest', 'test', 'ptr'}.intersection(sys.argv)
setup_requires = ['pytest-runner'] if needs_pytest else []
packages = find_packages('src')
setup(
name=PROJECT,
version=VERSION,
license=LICENSE,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
long_description=LONG_DESCRIPTION,
long_description_content_type="text/markdown",
descrption="DeepSpectrum is a Python toolkit for feature extraction from audio data with pre-trained Image Convolutional Neural Networks (CNNs).",
platforms=["Any"],
scripts=[],
provides=[],
python_requires="~=3.7.0",
install_requires=install_requires,
setup_requires=setup_requires,
tests_require=tests_require,
namespace_packages=[],
packages=packages,
package_dir={'': 'src'},
#'audeep': 'auDeep/audeep'},
include_package_data=True,
entry_points={
"console_scripts": [
"deepspectrum = deepspectrum.__main__:cli",
]
},
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
'Environment :: GPU :: NVIDIA CUDA :: 10.0',
# Indicate who your project is intended for
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Intended Audience :: Science/Research',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Programming Language :: Python :: 3.7',
],
keywords='machine-learning audio-analysis science research',
project_urls={
'Source': 'https://github.com/DeepSpectrum/DeepSpectrum/',
'Tracker': 'https://github.com/DeepSpectrum/DeepSpectrum/issues',
},
url=URL,
zip_safe=False,
)
| 2,795 | 29.064516 | 150 | py |
DeepSpectrum | DeepSpectrum-master/src/deepspectrum/__main__.py | import sys, os
# sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 'auDeep'))
import warnings
# from numba.errors import NumbaDeprecationWarning, NumbaPendingDeprecationWarning
warnings.filterwarnings('ignore', category=DeprecationWarning)
warnings.filterwarnings('ignore', category=FutureWarning)
# warnings.filterwarnings('ignore', category=NumbaDeprecationWarning)
# warnings.filterwarnings('ignore', category=NumbaPendingDeprecationWarning)
import click
import logging
import logging.config
import pkg_resources
from deepspectrum.cli.features import features
from deepspectrum.cli.image_features import image_features
from deepspectrum.cli.features_with_parser import features_with_parser
from deepspectrum.cli.plot import plot
from deepspectrum.cli.utils import add_options
from deepspectrum import __version__ as VERSION
_global_options = [
click.option('-v', '--verbose', count=True),
]
version_str = f"DeepSpectrum %(version)s\nCopyright (C) 2017-2020 Shahin Amiriparian, Maurice Gerczuk, Sandra Ottl, " \
"Bjoern Schuller\n" \
"License GPLv3+: GNU GPL version 3 or later <http://gnu.org/licenses/gpl.html>.\n" \
"This is free software: you are free to change and redistribute it.\n" \
"There is NO WARRANTY, to the extent permitted by law."
@click.group()
@add_options(_global_options)
@click.version_option(VERSION, message=version_str)
def cli(verbose):
click.echo('Verbosity: %s' % verbose)
log_levels = ['ERROR', 'INFO', 'DEBUG']
verbose = min(2, verbose)
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': False, # this fixes the problem
'formatters': {
'standard': {
'format': '%(asctime)s [%(levelname)s] %(name)s: %(message)s'
},
},
'handlers': {
'default': {
'level': log_levels[verbose],
'class': 'logging.StreamHandler',
'formatter': 'standard'
},
},
'loggers': {
'': {
'handlers': ['default'],
'level': log_levels[verbose],
'propagate': True
}
}
})
cli.add_command(features)
cli.add_command(features_with_parser)
cli.add_command(plot)
cli.add_command(image_features)
| 2,397 | 32.305556 | 119 | py |
DeepSpectrum | DeepSpectrum-master/src/deepspectrum/__init__.py | import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
__version__ = '0.6.9'
| 108 | 17.166667 | 58 | py |
DeepSpectrum | DeepSpectrum-master/src/deepspectrum/cli/features.py | import logging
import click
from deepspectrum.cli.configuration import Configuration, GENERAL_OPTIONS,\
PLOTTING_OPTIONS, EXTRACTION_OPTIONS, LABEL_OPTIONS, WRITER_OPTIONS, Filetypes
from os import environ
from .utils import add_options
log = logging.getLogger(__name__)
DESCRIPTION_EXTRACT = 'Extract deep spectrum features from wav files.'
environ['GLOG_minloglevel'] = '2'
environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
@click.command(help=DESCRIPTION_EXTRACT)
@add_options(GENERAL_OPTIONS)
@add_options(PLOTTING_OPTIONS)
@add_options(EXTRACTION_OPTIONS)
@add_options(LABEL_OPTIONS)
@add_options(WRITER_OPTIONS)
def features(**kwargs):
# set up the configuration object and parse commandline arguments
from ..backend.plotting import PlotGenerator
from ..tools.feature_writer import get_writer
configuration = Configuration(plotting=True,
extraction=True,
writer=True,
file_type=Filetypes.AUDIO,
**kwargs)
plots = PlotGenerator(
files=configuration.files,
number_of_processes=configuration.number_of_processes,
**configuration.plotting_args)
log.info('Loading model and weights...')
extractor = configuration.extractor(images=plots,
**configuration.extraction_args)
writer = get_writer(**configuration.writer_args)
writer.write_features(configuration.files, extractor, hide_progress=False)
log.info('Done extracting features.')
| 1,569 | 34.681818 | 78 | py |
DeepSpectrum | DeepSpectrum-master/src/deepspectrum/cli/utils.py | def add_options(options):
def _add_options(func):
for option in reversed(options):
func = option(func)
return func
return _add_options
| 172 | 20.625 | 40 | py |
DeepSpectrum | DeepSpectrum-master/src/deepspectrum/cli/configuration.py | import logging
import click
import configparser
import fnmatch
import re
import decimal
from enum import Enum
from multiprocessing import cpu_count
from os import makedirs, walk
from matplotlib import cm
from os.path import abspath, join, isfile, basename, dirname, realpath, splitext
mpl_cmaps = list(cm.cmaps_listed)+list(cm.datad)
cmaps = mpl_cmaps
cmaps += [cmap+'_r' for cmap in mpl_cmaps]
from deepspectrum.backend.plotting import PLOTTING_FUNCTIONS
from deepspectrum.tools.label_parser import LabelParser
from deepspectrum.tools.path import get_relative_path
max_np = cpu_count()
decimal.getcontext().prec = 6
log = logging.getLogger(__name__)
def _check_positive(ctx, param, value):
if value is None:
return value
ivalue = int(value)
if ivalue <= 0:
raise click.BadParameter("%s is an invalid positive int value" % value)
return ivalue
class Filetypes(Enum):
AUDIO = ['wav', 'ogg', 'flac', 'mp3']
IMAGE = ['png', 'jpg']
GENERAL_OPTIONS = [
click.argument(
"input",
type=click.Path(dir_okay=True,
file_okay=True,
exists=True,
readable=True),
),
click.option(
"-c",
"--config",
type=click.Path(readable=True, dir_okay=False),
help="Path to configuration file which specifies available extraction networks. If this file does not exist a new one is created and filled with the standard settings.",
default=join(dirname(realpath(__file__)), "deep.conf"), show_default=True
),
click.option(
"-np",
"--number-of-processes",
type=click.IntRange(1, max_np, clamp=True),
help="Define the number of processes used in parallel for the extraction. If None defaults to cpu-count",
default=max_np, show_default=True
),
]
PARSER_OPTIONS = [
click.option(
"-p",
"--parser",
type=click.Path(readable=True, dir_okay=False),
help="Path to auDeep parser file.",
default=None, show_default=True)
]
PLOTTING_OPTIONS = [
click.option(
"-s",
"--start",
help="Set a start time from which features should be extracted from the audio files.",
type=decimal.Decimal,
default=0, show_default=True
),
click.option(
"-e",
"--end",
help="Set a end time until which features should be extracted from the audio files.",
type=decimal.Decimal,
default=None, show_default=True
),
click.option(
"-t",
"--window-size-and-hop",
help="Extract deep spectrum features from windows with specified length and hopsize in seconds.",
nargs=2,
type=decimal.Decimal,
default=[None, None], show_default=True
),
click.option(
"-nfft",
default=None,
help="specify the size for the FFT window in number of samples",
type=int, show_default=True
),
click.option(
"-cm",
"--colour-map",
default="viridis",
help="define the matplotlib colour map to use for the spectrograms",
show_default=True,
type=click.Choice(cmaps)), # ,
# choices=sorted([m for m in cm.cmap_d]))
click.option(
"-fql",
"--frequency-limit",
type=int,
help="define a limit for the frequency axis for plotting the spectrograms",
default=None, show_default=True
),
click.option(
"-sr",
"--sample-rate",
type=int,
help="define a target sample rate for reading the audio files. Audio files will be resampled to this rate before spectrograms are extracted.",
default=None, show_default=True
),
click.option(
"-so",
"--spectrogram-out",
help="define an existing folder where spectrogram plots should be saved during feature extraction. By default, spectrograms are not saved on disk to speed up extraction.",
default=None, show_default=True
),
click.option(
"-wo",
"--wav-out",
help="Convenience function to write the chunks of audio data used in the extraction to the specified folder.",
default=None, show_default=True
),
click.option(
"-m",
"--mode",
help="Type of plot to use in the system.",
default="spectrogram",
show_default=True,
type=click.Choice(PLOTTING_FUNCTIONS.keys()),
),
click.option(
"-nm",
"--number-of-melbands",
type=int,
callback=_check_positive,
help="Number of melbands used for computing the melspectrogram.",
default=128,
show_default=True
),
click.option(
"-fs",
"--frequency-scale",
help="Scale for the y-axis of the plots used by the system. Defaults to 'chroma' in chroma mode.",
default="linear",
show_default=True,
type=click.Choice(["linear", "log", "mel"]),
),
click.option(
"-d",
"--delta",
callback=_check_positive,
help="If given, derivatives of the given order of the selected features are displayed in the plots used by the system.",
default=None,
show_default=True
),
click.option(
"-ppdfs",
"--pretty_pdfs",
is_flag=True,
help="Add if you want to create nice pdf plots of the spectrograms the system uses. For figures in your papers ^.^",
),
]
EXTRACTION_OPTIONS = [
click.option(
"-en",
"--extraction-network",
help="specify the CNN that will be used for the feature extraction. You need to specify a valid weight file in .npy format in your configuration file for this network.",
default="vgg16",
show_default=True
),
click.option(
"-fl",
"--feature-layer",
default="fc2",
help="name of CNN layer from which to extract the features.",
show_default=True
),
click.option(
"-bs",
"--batch-size",
type=int,
help="Maximum batch size for feature extraction. Adjust according to your gpu memory size.",
default=128,
show_default=True
),
]
WRITER_OPTIONS = [
click.option(
"-o",
"--output",
help="The file which the features are written to. Supports csv and arff formats",
required=True,
type=click.Path(writable=True, dir_okay=False),
),
click.option(
"-nl",
"--no-labels",
is_flag=True,
help="Do not write class labels to the output.",
),
click.option(
"-nts",
"--no-timestamps",
is_flag=True,
help="Remove timestamps from the output.",
),
click.option(
"-tc",
"--time-continuous",
is_flag=True,
help='Set labelling of features to timecontinuous mode. Only works in conjunction with "-t" and a label file with a matching timestamp column.',
),
]
LABEL_OPTIONS = [
click.option(
"-lf",
"--label-file",
help="csv file with the labels for the files in the form: 'filename, label'. If nothing is specified here or under -labels, the name(s) of the directory/directories are used as labels.",
default=None,
show_default=True,
type=click.Path(exists=True, dir_okay=False, readable=True),
),
click.option(
"-el",
"--explicit-label",
type=str,
nargs=1,
help="Define an explicit label for the input files.",
default=None,
show_default=True
),
]
class Configuration:
"""
This class handles the configuration of the deep spectrum extractor by reading commandline options and the
configuration file. It then parses the labels for the audio files and configures the Caffe Network used for
extraction.
"""
def __init__(
self,
plotting=True,
extraction=True,
writer=True,
parser=False,
file_type=Filetypes.AUDIO,
input=None,
config="deep.conf",
number_of_processes=max_np,
colour_map="viridis",
mode="mel",
frequency_scale="linear",
delta=None,
frequency_limit=None,
nfft=None,
start=0,
end=None,
window_size_and_hop=None,
number_of_melbands=128,
spectrogram_out=None,
wav_out=None,
pretty_pdfs=False,
extraction_network="vgg16",
feature_layer="fc7",
batch_size=128,
output=None,
time_continuous=False,
label_file=None,
explicit_label=None,
no_timestamps=False,
no_labels=False,
sample_rate=None,
label_dict=None,
labels=None,
):
self.input_folder = input if not isfile(input) else dirname(input)
self.config = config
self.number_of_processes = number_of_processes
self.model_weights = "imagenet"
self.file_type = file_type
self.plotting = plotting
self.plotting_args = {}
self.extraction = extraction
self.extraction_args = {}
self.writer = writer
self.writer_args = {}
self.backend = "keras"
self.parser = parser
if self.plotting:
self.plotting_args["cmap"] = colour_map
self.plotting_args["mode"] = mode
self.plotting_args["scale"] = frequency_scale
self.plotting_args["delta"] = delta
self.plotting_args["ylim"] = frequency_limit
self.plotting_args["nfft"] = nfft
self.plotting_args["start"] = start
self.plotting_args["end"] = end
self.plotting_args["window"] = (window_size_and_hop[0]
if window_size_and_hop else None)
self.plotting_args["hop"] = (window_size_and_hop[1]
if window_size_and_hop else None)
self.plotting_args["resample"] = sample_rate
self.plotting_args["base_path"] = self.input_folder
if self.plotting_args["mode"] == "mel":
self.plotting_args["melbands"] = number_of_melbands
if self.plotting_args["mode"] == "chroma":
self.plotting_args["scale"] = "chroma"
self.plotting_args["output_spectrograms"] = (
abspath(spectrogram_out)
if spectrogram_out is not None else None)
self.plotting_args["output_wavs"] = (abspath(wav_out) if
wav_out is not None else None)
if pretty_pdfs:
self.plotting_args["file_type"] = "pdf"
self.plotting_args["labelling"] = True
if self.extraction:
self.net = extraction_network
self.extraction_args["layer"] = feature_layer
self.extraction_args["batch_size"] = batch_size
self._load_config()
self.files = self._find_files(input)
if not self.files:
log.error(
f"No files were found under the path {input}. Check the specified input path."
)
exit(1)
if self.writer:
self.label_file = label_file
self.writer_args["output"] = output
makedirs(dirname(abspath(self.writer_args["output"])),
exist_ok=True)
self.writer_args["continuous_labels"] = (
("window" in self.plotting_args) and time_continuous
and self.label_file)
self.writer_args["labels"] = explicit_label
self.writer_args["write_timestamps"] = (
window_size_and_hop !=
(None, None)) and not no_timestamps and self.plotting
self.writer_args["no_labels"] = no_labels
log.info("Parsing labels...")
if self.parser:
self.writer_args["label_dict"] = label_dict
self.writer_args["labels"] = labels
self._files_to_extract(relative_paths_in_label_dict=False)
elif self.label_file is not None:
self._read_label_file()
else:
self._create_labels_from_folder_structure()
def _find_files(self, folder):
log.debug(f'Input file types are "{self.file_type.value}".')
if isfile(folder) and splitext(folder)[1][1:] in self.file_type.value:
log.debug(f"{folder} is a single {self.file_type.value}-file.")
return [folder]
input_files = []
for file_type in self.file_type.value:
globexpression = "*." + file_type
reg_expr = re.compile(fnmatch.translate(globexpression),
re.IGNORECASE)
log.debug(f"Searching {folder} for {file_type}-files.")
for root, dirs, files in walk(folder, topdown=True):
new_files = [
join(root, j) for j in files if re.match(reg_expr, j)
]
log.debug(
f"Found {len(new_files)} {file_type}-files in {root}.")
input_files += new_files
log.debug(
f"Found a total of {len(input_files)} {self.file_type.value}-files."
)
return sorted(input_files)
def _files_to_extract(self, relative_paths_in_label_dict=True):
file_names = set(
map(
lambda f: get_relative_path(
f, prefix=self.input_folder), self.files))
if not relative_paths_in_label_dict:
self.writer_args["label_dict"] = {get_relative_path(
key, prefix=self.input_folder): value for key, value in self.writer_args["label_dict"].items()}
# check if labels are missing for specific files
missing_labels = file_names.difference(self.writer_args["label_dict"])
if missing_labels:
log.info(
f"No labels for: {len(missing_labels)} files. Only processing files with labels."
)
self.files = [
file for file in self.files
if get_relative_path(
file, prefix=self.input_folder) in self.writer_args["label_dict"]
]
log.info(f'Extracting features for {len(self.files)} files.')
def _read_label_file(self):
"""
Read labels from either .csv or .tsv files
:return: Nothing
"""
if self.label_file.endswith(".tsv"):
parser = LabelParser(
self.label_file,
delimiter="\t",
timecontinuous=self.writer_args["continuous_labels"],
)
else:
parser = LabelParser(
self.label_file,
delimiter=",",
timecontinuous=self.writer_args["continuous_labels"],
)
parser.parse_labels()
self.writer_args["label_dict"] = parser.label_dict
self.writer_args["labels"] = parser.labels
self._files_to_extract()
def _create_labels_from_folder_structure(self):
"""
If no label file is given, either explicit labels or the folder structure is used as class values for the input.
:return: Nothing
"""
if self.writer_args["labels"] is None:
self.writer_args["label_dict"] = {
get_relative_path(
f, prefix=self.input_folder): [basename(dirname(f))]
for f in self.files
}
else:
# map the labels given on the commandline to all files in a given folder to all input files
self.writer_args["label_dict"] = {
get_relative_path(f, prefix=self.input_folder):
[str(self.writer_args["labels"])]
for f in self.files
}
labels = sorted(
list(map(lambda x: x[0], self.writer_args["label_dict"].values())))
self.writer_args["labels"] = [("class", set(labels))]
def _load_config(self):
"""
Parses the configuration file given on the commandline. If it does not exist yet, creates a new one containing
standard settings.
:param conf_file: configuration file to parse or create
:return: Nothing
"""
conf_parser = configparser.ConfigParser()
# check if the file exists and parse it
if isfile(self.config):
log.info("Found config file " + self.config)
conf_parser.read(self.config)
main_conf = conf_parser["main"]
self.plotting_args["size"] = int(main_conf["size"])
self.backend = main_conf["backend"]
filetypes = Enum(
'ConfigurationFiletypes', {
'AUDIO': main_conf['audioFormats'].split(','),
'IMAGE': main_conf['imageFormats'].split(',')
})
self.file_type = filetypes[self.file_type.name]
if self.extraction:
# only import here for performance reasons
from deepspectrum.backend.extractor import KerasExtractor, PytorchExtractor
keras_net_conf = conf_parser["keras-nets"]
pytorch_net_conf = conf_parser["pytorch-nets"]
if self.net in keras_net_conf:
self.extractor = KerasExtractor
self.extraction_args["weights_path"] = keras_net_conf[
self.net]
self.extraction_args["model_key"] = self.net
elif self.net in pytorch_net_conf:
self.extractor = PytorchExtractor
self.extraction_args["model_key"] = self.net
else:
log.error(
f"No model weights defined for {self.net} in {self.config}"
)
exit(1)
# if not, create it with standard settings
else:
log.info("Writing standard config to " + self.config)
makedirs(dirname(abspath(self.config)), exist_ok=True)
# Read the defaul config file included in the package
conf_parser.read(join(dirname(realpath(__file__)), "deep.conf"))
with open(self.config, "w") as configfile:
conf_parser.write(configfile)
log.error(
f"Please initialize your configuration file in {self.config}"
)
exit(1)
| 18,723 | 34.732824 | 194 | py |
DeepSpectrum | DeepSpectrum-master/src/deepspectrum/cli/plot.py | import click
import logging
from os import environ
from tqdm import tqdm
from deepspectrum.cli.configuration import Configuration, GENERAL_OPTIONS, PLOTTING_OPTIONS
from .utils import add_options
from ..backend.plotting import PlotGenerator
environ['GLOG_minloglevel'] = '2'
environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
log = logging.getLogger(__name__)
DESCRIPTION_PLOT = 'Create plots from wav files.'
@click.command(help=DESCRIPTION_PLOT)
@add_options(GENERAL_OPTIONS)
@add_options(PLOTTING_OPTIONS)
def plot(**kwargs):
# set up the configuration object and parse commandline arguments
configuration = Configuration(extraction=False, writer=False, **kwargs)
plots = PlotGenerator(
files=configuration.files,
number_of_processes=configuration.number_of_processes,
**configuration.plotting_args)
current_name = None
with tqdm(total=len(plots),
desc='Plotting wavs...',
disable=(log.getEffectiveLevel() >= logging.ERROR)) as pbar:
for plot_tuple in plots:
if current_name is None:
current_name = plot_tuple.name
elif current_name != plot_tuple.name:
pbar.update()
current_name = plot_tuple.name
log.info('Done plotting wavs.')
| 1,284 | 30.341463 | 91 | py |
DeepSpectrum | DeepSpectrum-master/src/deepspectrum/cli/__init__.py | 0 | 0 | 0 | py |
|
DeepSpectrum | DeepSpectrum-master/src/deepspectrum/cli/features_with_parser.py | from os import environ
from .utils import add_options
import logging
import click
from deepspectrum.cli.configuration import Configuration, GENERAL_OPTIONS, PLOTTING_OPTIONS, EXTRACTION_OPTIONS, PARSER_OPTIONS, WRITER_OPTIONS, Filetypes
log = logging.getLogger(__name__)
DESCRIPTION_EXTRACT = 'Extract deep spectrum features from wav files.'
environ['GLOG_minloglevel'] = '2'
environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
@click.command(help=DESCRIPTION_EXTRACT)
@add_options(GENERAL_OPTIONS)
@add_options(PLOTTING_OPTIONS)
@add_options(EXTRACTION_OPTIONS)
@add_options(PARSER_OPTIONS)
@add_options(WRITER_OPTIONS)
def features_with_parser(**kwargs):
import importlib
from deepspectrum.backend.extractor import _batch_images
from ..backend.plotting import PlotGenerator
from ..tools.feature_writer import get_writer
from pathlib import Path
from os.path import splitext
from audeep.backend.parsers.meta import MetaParser
from audeep.backend.parsers.no_metadata import NoMetadataParser
from audeep.backend.data.data_set import Partition, Split
# set up the configuration object and parse commandline arguments
parser = kwargs.pop('parser')
if parser is not None:
module_name, class_name = parser.rsplit(".", 1)
parser_class = getattr(
importlib.import_module(module_name), class_name)
if not parser_class(basedir=Path(kwargs['input'])).can_parse():
log.error(
f'Cannot parse dataset at {kwargs["input"]} using {parser}.')
exit()
else:
parser_class = MetaParser
if not parser_class(basedir=Path(kwargs['input'])).can_parse():
parser_class = NoMetadataParser
parser = parser_class(basedir=Path(kwargs['input']))
instances = parser.parse()
num_folds = parser.num_folds
partitions = set()
if num_folds > 0:
label_dicts = [{}]*num_folds
for i in instances:
nominal = i.label_nominal is not None
fold = i.cv_folds.index(Split.VALID)
label_dicts[fold][str(i.path)] = [i.label_nominal] if nominal else [
i.label_numeric]
else:
label_dicts = {'None': {}}
for i in instances:
nominal = i.label_nominal is not None
if i.partition is None:
label_dicts['None'][str(i.path)] = [i.label_nominal] if nominal else [
i.label_numeric]
else:
if i.partition not in label_dicts:
partitions.add(i.partition)
label_dicts[i.partition] = {}
label_dicts[i.partition][str(i.path)] = [i.label_nominal] if nominal else [
i.label_numeric]
use_folds = num_folds > 1
use_partitions = len(partitions) > 1
if nominal:
labels = [("class", set(parser.label_map().keys()))]
else:
labels = [("label", "NUMERIC")]
base_output = kwargs['output']
extractor = None
if use_partitions:
for p in partitions:
log_str = f"Extracting features for audio files in {kwargs['input']} using {parser.__class__.__name__}"
output = base_output
log_str += f" for partition {p.name.lower()}"
output = splitext(output)[0] + \
f'.{p.name.lower()}' + splitext(output)[-1]
kwargs['output'] = output
log.info(log_str)
label_dict = label_dicts[p]
configuration = Configuration(plotting=True,
extraction=True,
writer=True,
parser=True,
label_dict=label_dict,
labels=labels,
file_type=Filetypes.AUDIO,
**kwargs)
plots = PlotGenerator(
files=configuration.files,
number_of_processes=configuration.number_of_processes,
**configuration.plotting_args)
log.info('Loading model and weights...')
if extractor is None:
extractor = configuration.extractor(images=plots,
**configuration.extraction_args)
else:
extractor.set_images(plots)
writer = get_writer(**configuration.writer_args)
writer.write_features(configuration.files,
extractor, hide_progress=False)
elif use_folds:
for i in range(num_folds):
log_str = f"Extracting features for audio files in {kwargs['input']} using {parser.__class__.__name__} for fold {i}"
output = base_output
output = splitext(output)[0] + f'.fold-{i}' + splitext(output)[-1]
kwargs['output'] = output
log.info(log_str)
label_dict = label_dicts[i]
configuration = Configuration(plotting=True,
extraction=True,
writer=True,
parser=True,
label_dict=label_dict,
labels=labels,
file_type=Filetypes.AUDIO,
**kwargs)
plots = PlotGenerator(
files=configuration.files,
number_of_processes=configuration.number_of_processes,
**configuration.plotting_args)
log.info('Loading model and weights...')
if extractor is None:
extractor = configuration.extractor(images=plots,
**configuration.extraction_args)
else:
extractor.set_images(plots)
writer = get_writer(**configuration.writer_args)
writer.write_features(configuration.files,
extractor, hide_progress=False)
else:
log_str = f"Extracting features for audio files in {kwargs['input']} using {parser.__class__.__name__}"
output = base_output
kwargs['output'] = output
log.info(log_str)
label_dict = label_dicts['None']
configuration = Configuration(plotting=True,
extraction=True,
writer=True,
parser=True,
label_dict=label_dict,
labels=labels,
file_type=Filetypes.AUDIO,
**kwargs)
plots = PlotGenerator(
files=configuration.files,
number_of_processes=configuration.number_of_processes,
**configuration.plotting_args)
log.info('Loading model and weights...')
extractor = configuration.extractor(images=plots,
**configuration.extraction_args)
writer = get_writer(**configuration.writer_args)
writer.write_features(configuration.files,
extractor, hide_progress=False)
log.info('Done extracting features.')
| 7,436 | 41.255682 | 154 | py |
DeepSpectrum | DeepSpectrum-master/src/deepspectrum/cli/image_features.py | import numpy as np
import click
import logging
from os import environ
from os.path import basename
from .configuration import Configuration, GENERAL_OPTIONS, EXTRACTION_OPTIONS, LABEL_OPTIONS, WRITER_OPTIONS, Filetypes
from .utils import add_options
from ..backend.plotting import PlotTuple
from ..tools.path import get_relative_path
environ['GLOG_minloglevel'] = '2'
environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
log = logging.getLogger(__name__)
DESCRIPTION_IMAGE_FEATURES = 'Extract CNN-descriptors from images.'
def image_reader(files, base_path=None, size=500):
import cv2
for image in files:
img = cv2.imread(image, cv2.IMREAD_COLOR)
img = cv2.resize(img, dsize=(size, size))
img = img[:, :, :3]
yield PlotTuple(name=get_relative_path(image, base_path),
timestamp=None,
plot=np.array(img))
@click.command(help=DESCRIPTION_IMAGE_FEATURES)
@add_options(GENERAL_OPTIONS)
@add_options(EXTRACTION_OPTIONS)
@add_options(LABEL_OPTIONS)
@add_options(WRITER_OPTIONS[:-2])
def image_features(**kwargs):
from ..tools.feature_writer import get_writer
configuration = Configuration(plotting=False,
file_type=Filetypes.IMAGE,
**kwargs)
plots = image_reader(configuration.files,
base_path=configuration.input_folder)
log.info('Loading model and weights...')
extractor = configuration.extractor(images=plots,
**configuration.extraction_args)
log.info('Extracting features from images...')
writer = get_writer(**configuration.writer_args)
writer.write_features(configuration.files, extractor)
log.info('Done extracting features.')
| 1,778 | 33.882353 | 119 | py |
DeepSpectrum | DeepSpectrum-master/src/deepspectrum/tools/feature_writer.py | import csv
from tqdm import tqdm
from .custom_arff import ArffWriter
import logging
log = logging.getLogger(__name__)
class FeatureWriter:
def __init__(self, output, label_dict, labels, continuous_labels,
write_timestamps, no_labels):
self.output = output
self.label_dict = label_dict
self.labels = labels
self.continuous_labels = continuous_labels
self.no_labels = no_labels
self.write_timestamps = write_timestamps
def write_features(self, names, features, hide_progress=False):
raise NotImplementedError('write_features must be implemented!')
def timestamp_and_label(self, file_name, timestamp):
if self.write_timestamps:
labels = self.label_dict[file_name][timestamp] if self.continuous_labels else \
self.label_dict[file_name]
return timestamp, labels
else:
return None, self.label_dict[file_name]
class ArffFeatureWriter(FeatureWriter):
def write_features(self, names, features, hide_progress=False):
with open(self.output, 'w', newline='') as output_file, tqdm(
total=len(names),
disable=log.getEffectiveLevel() >= logging.ERROR) as pbar:
writer = None
first = True
for batch in features:
for feature_tuple in batch:
if first:
old_name = feature_tuple.name
first = False
if self.no_labels:
classes = None
else:
classes = [(class_name, '{' + ','.join(class_type) +
'}') if class_type else
(class_name, 'numeric')
for class_name, class_type in self.labels]
if not writer:
attributes = _determine_attributes(
self.write_timestamps, feature_tuple.features,
classes)
writer = ArffWriter(output_file,
'Deep Spectrum Features',
attributes)
time_stamp, label = self.timestamp_and_label(
feature_tuple.name, feature_tuple.timestamp)
row = [feature_tuple.name]
if time_stamp is not None:
row.append(str(time_stamp))
row += (list(map(str, feature_tuple.features)))
if not self.no_labels:
row += label
writer.writerow(row)
if feature_tuple.name != old_name:
pbar.update()
old_name = feature_tuple.name
del feature_tuple
pbar.update()
class CsvFeatureWriter(FeatureWriter):
def write_features(self, names, features, hide_progress=False):
with open(self.output, 'w', newline='') as output_file, tqdm(
total=len(names),
disable=log.getEffectiveLevel() >= logging.ERROR) as pbar:
writer = None
first = True
for batch in features:
for feature_tuple in batch:
if first:
old_name = feature_tuple.name
first = False
if self.no_labels:
classes = None
else:
classes = [(class_name, '{' + ','.join(class_type) +
'}') if class_type else
(class_name, 'numeric')
for class_name, class_type in self.labels]
if not writer:
attributes = _determine_attributes(
self.write_timestamps, feature_tuple.features,
classes)
writer = csv.writer(output_file, delimiter=',')
writer.writerow(
[attribute[0] for attribute in attributes])
time_stamp, label = self.timestamp_and_label(
feature_tuple.name, feature_tuple.timestamp)
row = [feature_tuple.name]
if time_stamp is not None:
row.append(time_stamp)
row += (list(map(str, feature_tuple.features)))
if not self.no_labels:
row += label
writer.writerow(row)
if feature_tuple.name != old_name:
pbar.update()
old_name = feature_tuple.name
pbar.update()
def _determine_attributes(timestamp, feature_vector, classes):
if timestamp:
attributes = [('name', 'string'), ('timeStamp', 'numeric')
] + [('neuron_' + str(i), 'numeric')
for i, _ in enumerate(feature_vector)]
else:
attributes = [('name', 'string')
] + [('neuron_' + str(i), 'numeric')
for i, _ in enumerate(feature_vector)]
if classes:
attributes += classes
return attributes
def get_writer(**kwargs):
if kwargs['output'].endswith('.arff'):
return ArffFeatureWriter(**kwargs)
elif kwargs['output'].endswith('.csv'):
return CsvFeatureWriter(**kwargs)
| 5,622 | 40.043796 | 91 | py |
DeepSpectrum | DeepSpectrum-master/src/deepspectrum/tools/custom_arff.py | import logging
log = logging.getLogger(__name__)
class ArffWriter():
def __init__(self, file_object, relation_name, attributes):
self.arff_file = file_object
self.relation_name = relation_name
self.attributes = attributes
self._write_header()
def _write_header(self):
self.arff_file.write(' '.join(
['@relation', '\'{}\''.format(self.relation_name) + '\n']))
self.arff_file.write('\n')
for attribute_name, attribute_type in self.attributes:
self.arff_file.write(
' '.join(['@attribute', attribute_name, attribute_type]) +
'\n')
self.arff_file.write('\n')
self.arff_file.write('@data\n')
def writerow(self, row):
self.arff_file.write(','.join(row) + '\n')
| 808 | 30.115385 | 74 | py |
DeepSpectrum | DeepSpectrum-master/src/deepspectrum/tools/label_parser.py | import csv
import decimal
from os.path import splitext, normpath
class LabelParser():
def __init__(self,
filepath,
delimiter=',',
timecontinuous=False,
remove_extension=False):
self._timecontinuous = timecontinuous
self._filepath = filepath
self._delimiter = delimiter
self._remove_extension = remove_extension
self.labels = []
self.label_dict = {}
def parse_labels(self):
# delimiters are decided by the extension of the labels file
reader = csv.reader(open(self._filepath, newline=''),
delimiter=self._delimiter)
header = next(reader)
first_class_index = 2 if self._timecontinuous else 1
classes = header[first_class_index:]
# a list of distinct labels is needed for deciding on the nominal class values for .arff files
self.labels = [[class_name, []] for class_name in classes]
# parse the label file line by line
for row in reader:
name = splitext(normpath(
row[0]))[0] if self._remove_extension else normpath(row[0])
if self._timecontinuous:
if name not in self.label_dict:
self.label_dict[name] = {}
self.label_dict[name][decimal.Decimal(
row[1])] = row[first_class_index:]
else:
self.label_dict[name] = row[first_class_index:]
for i, label in enumerate(row[first_class_index:]):
if self._is_number(label):
self.labels[i] = (self.labels[i][0], None)
else:
self.labels[i][1].append(label)
self.labels[i] = [
self.labels[i][0],
sorted(list(set(self.labels[i][1])))
]
@staticmethod
def _is_number(s):
try:
complex(s) # for int, long, float and complex
except ValueError:
return False
return True
| 2,102 | 32.919355 | 102 | py |
DeepSpectrum | DeepSpectrum-master/src/deepspectrum/tools/path.py | import pathlib
from os.path import basename
def get_relative_path(path, prefix):
filepath = pathlib.PurePath(path)
if prefix is None:
return basename(filepath)
else:
filepath = filepath.relative_to(prefix)
return str(filepath) | 264 | 23.090909 | 47 | py |
DeepSpectrum | DeepSpectrum-master/src/deepspectrum/tools/__init__.py | 0 | 0 | 0 | py |
|
DeepSpectrum | DeepSpectrum-master/src/deepspectrum/backend/plotting.py | import matplotlib
import io
import warnings
# import librosa.display
# import librosa
import numpy as np
import pathlib
import logging
from imread import imread_from_blob
from os import environ, makedirs
from os.path import basename, join, dirname, splitext
from multiprocessing import cpu_count, Pool
from functools import partial
from collections import namedtuple
from deepspectrum.tools.path import get_relative_path
PlotTuple = namedtuple('PlotTuple', ['name', 'timestamp', 'plot'])
AudioChunk = namedtuple('AudioChunk',
['name', 'samplerate', 'timestamp', 'audio'])
environ['GLOG_minloglevel'] = '2'
label_font = {'family': 'normal', 'size': 14}
font = {'family': 'normal', 'size': 12}
matplotlib.rc('font', **font)
log = logging.getLogger(__name__)
def read_wav_data(wav_file, start=0, end=None, resample=None):
"""
Reads data from a wav-file and converts this data to single channel.
:param wav_file: path to an existing .wav file
:return: np array of audio data, frame rate
"""
import librosa
start = float(start) if start is not None else None
end = float(end) if end is not None else None
y, sr, = librosa.core.load(wav_file,
mono=True,
offset=start,
duration=end,
sr=resample)
log.debug(f'Read audio file {wav_file}. Shape: {y.shape} Samplerate: {sr}')
return y, sr
def plot_chunk(chunk,
mode='spectrogram',
output_folder=None,
base_path=None,
size=227,
nfft=None,
file_type='png',
labelling=False,
**kwargs):
"""
Plot spectrograms for a chunk of a wav-file using the described parameters.
:param chunk: audio chunk to be plotted.
:param mode: type of audio plot to create.
:param nfft: number of samples for the fast fourier transformation \
(Default: 256)
:param size: size of the spectrogram plot in pixels. Height and width are \
always identical (Default: 227)
:param output_folder: if given, the plot is saved to this path in .png \
format (Default: None)
:param kwargs: keyword args for plotting functions
:return: blob of the spectrogram plot
"""
matplotlib.use('Agg')
import matplotlib.pyplot as plt
filename, sr, ts, audio = chunk
write_index = ts is not None
if not nfft:
nfft = _next_power_of_two(int(sr * 0.025))
log.debug(f'Using nfft={nfft} for the FFT.')
fig = plt.figure(frameon=False, tight_layout=False)
if labelling:
pass
else:
fig.set_size_inches(1, 1)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
spectrogram_axes = PLOTTING_FUNCTIONS[mode](audio, sr, nfft, **kwargs)
if labelling:
original_xlim = spectrogram_axes.get_xlim()
if mode != 'chroma':
kHz_ticks = np.apply_along_axis(lambda x: x / 1000, 0,
spectrogram_axes.get_yticks())
spectrogram_axes.set_yticklabels(kHz_ticks)
spectrogram_axes.set_ylabel('Frequency [kHz]',
fontdict=label_font)
else:
spectrogram_axes.set_ylabel('Pitch Classes',
fontdict=label_font)
if labelling:
spectrogram_axes.set_xticks(spectrogram_axes.get_xticks()[::2])
spectrogram_axes.set_xlabel('Time [s]', fontdict=label_font)
spectrogram_axes.set_xlim(original_xlim)
del audio
fig.add_axes(spectrogram_axes, id='spectrogram')
if labelling:
plt.colorbar(format='%+2.1f dB')
plt.tight_layout()
if output_folder:
relative_file_name = f'{splitext(get_relative_path(filename, base_path))[0]}_{ts:g}.{file_type}' if write_index else f'{splitext(get_relative_path(filename, base_path))[0]}.{file_type}'
if base_path is None:
outfile = join(output_folder, basename(relative_file_name))
else:
outfile = join(output_folder, relative_file_name)
log.debug(f'Saving spectrogram plot to {outfile}.')
makedirs(dirname(outfile), exist_ok=True)
fig.savefig(outfile, format=file_type, dpi=size)
buf = io.BytesIO()
fig.savefig(buf, format='png', dpi=size)
buf.seek(0)
fig.clf()
plt.close(fig)
img_blob = buf.read()
buf.close()
try:
img = imread_from_blob(img_blob, 'png')
img = img[:, :, :-1]
log.debug(f'Read spectrogram plot with shape {img.shape}.')
except IOError:
log.error('Error while reading the spectrogram blob.')
return None
return PlotTuple(name=get_relative_path(filename, base_path),
timestamp=ts,
plot=img)
def _generate_chunks_filename_timestamp_wrapper(filepath,
window,
hop,
start=0,
end=None,
resample=None,
nfft=256,
wav_out_folder=None,
base_path=None):
sound_info, sr = read_wav_data(filepath,
start=start,
end=end,
resample=resample)
if not nfft:
nfft = _next_power_of_two(int(sr * 0.025))
if wav_out_folder is not None:
relative_path = get_relative_path(filepath, base_path)
wav_out = join(wav_out_folder, relative_path)
else:
wav_out = None
for idx, audio in enumerate(
_generate_chunks(sound_info, sr, window, hop, start, wav_out)):
if window or hop:
ts = start + idx * hop
else:
ts = None
if len(audio) >= nfft: # cannot plot chunks that are too short
yield AudioChunk(filepath, sr, ts, audio)
def plot_spectrogram(audio_data, sr, nfft=None, delta=None, **kwargs):
import librosa
spectrogram = librosa.stft(audio_data,
n_fft=nfft,
hop_length=int(nfft / 2),
center=False)
if delta:
spectrogram = librosa.feature.delta(spectrogram, order=delta)
spectrogram = librosa.amplitude_to_db(spectrogram, ref=np.max, top_db=None)
return _create_plot(spectrogram, sr, nfft, **kwargs)
def plot_mel_spectrogram(audio_data,
sr,
nfft=None,
melbands=64,
delta=None,
**kwargs):
import librosa
spectrogram = y_limited_spectrogram(audio_data,
sr=sr,
nfft=nfft,
ylim=kwargs['ylim'])
kwargs['scale'] = 'mel'
if delta:
spectrogram = librosa.feature.delta(spectrogram, order=delta)
spectrogram = librosa.feature.melspectrogram(S=np.abs(spectrogram)**2,
sr=sr,
n_mels=melbands)
spectrogram = librosa.power_to_db(spectrogram, ref=np.max, top_db=None)
return _create_plot(spectrogram, sr, nfft, **kwargs)
def plot_chroma(audio_data, sr, nfft=None, delta=None, **kwargs):
import librosa
spectrogram = librosa.stft(audio_data,
n_fft=nfft,
hop_length=int(nfft / 2),
center=False)
spectrogram = librosa.feature.chroma_stft(S=np.abs(spectrogram)**2, sr=sr)
kwargs['scale'] = 'chroma'
if delta:
spectrogram = librosa.feature.delta(spectrogram, order=delta)
return _create_plot(spectrogram, sr, nfft, **kwargs)
def y_limited_spectrogram(audio_data, sr, nfft=None, ylim=None):
import librosa
spectrogram = librosa.stft(audio_data,
n_fft=nfft,
hop_length=int(nfft / 2),
center=False)
if ylim:
relative_limit = ylim * 2 / sr
relative_limit = min(relative_limit, 1)
spectrogram = spectrogram[:int(relative_limit * (1 + nfft / 2)), :]
return spectrogram
def _create_plot(spectrogram,
sr,
nfft,
ylim=None,
cmap='viridis',
scale='linear',
**kwargs):
import librosa.display
if not ylim:
ylim = sr / 2
spectrogram_axes = librosa.display.specshow(spectrogram,
hop_length=int(nfft / 2),
fmax=ylim,
sr=sr,
cmap=cmap,
y_axis=scale,
x_axis='time')
if scale == 'linear':
spectrogram_axes.set_ylim(0, ylim)
return spectrogram_axes
PLOTTING_FUNCTIONS = {
'spectrogram': plot_spectrogram,
'mel': plot_mel_spectrogram,
'chroma': plot_chroma
}
def _generate_chunks(sound_info, sr, window, hop, start=0, wav_out=None):
import librosa
if not window and not hop:
yield sound_info
return
window_samples = int(window * sr)
hop_samples = int(hop * sr)
for n in range(max(int((len(sound_info)) / hop_samples), 1)):
chunk = sound_info[n *
hop_samples:min(n * hop_samples +
window_samples, len(sound_info))]
if wav_out:
makedirs(dirname(wav_out), exist_ok=True)
chunk_out = f'{splitext(wav_out)[0]}_{(start + n * hop):g}.wav'
librosa.output.write_wav(chunk_out, chunk, sr)
yield chunk
def _next_power_of_two(x):
return 1 << (x - 1).bit_length()
class PlotGenerator():
def __init__(self,
files,
output_spectrograms=None,
output_wavs=None,
number_of_processes=None,
base_path=None,
**kwargs):
self.files = files
self.number_of_processes = number_of_processes
if output_spectrograms:
makedirs(output_spectrograms, exist_ok=True)
if output_wavs:
makedirs(output_wavs, exist_ok=True)
if not self.number_of_processes:
self.number_of_processes = cpu_count()
self.chunks = (
chunk for filename in self.files
for chunk in _generate_chunks_filename_timestamp_wrapper(
filename,
wav_out_folder=output_wavs,
window=kwargs['window'],
hop=kwargs['hop'],
start=kwargs['start'],
end=kwargs['end'],
nfft=kwargs['nfft'],
resample=kwargs['resample'],
base_path=base_path))
plotting_func = partial(plot_chunk,
output_folder=output_spectrograms,
base_path=base_path,
**kwargs)
self.pool = Pool(processes=self.number_of_processes)
self.plots = self.pool.imap(plotting_func, self.chunks)
def __len__(self):
return len(self.files)
def __iter__(self):
return self
def __next__(self):
try:
return next(self.plots)
except StopIteration:
self.pool.close()
self.pool.join()
raise StopIteration
| 12,163 | 35.310448 | 193 | py |
DeepSpectrum | DeepSpectrum-master/src/deepspectrum/backend/extractor.py | import gc
from collections import namedtuple
import numpy as np
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
import tensorflow as tf
import torch
from torchvision import models, transforms
from PIL import Image
import logging
tf.compat.v1.logging.set_verbosity(logging.ERROR)
log = logging.getLogger(__name__)
tf.compat.v1.keras.backend.clear_session()
log.debug(f'Collected garbage {gc.collect()}') # if it's done something you should see a number being outputted
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.compat.v1.Session(config=config)
tf.compat.v1.keras.backend.set_session(sess)
FeatureTuple = namedtuple("FeatureTuple", ["name", "timestamp", "features"])
eps = 1e-8
def mask(func):
def mask_loss_function(*args, **kwargs):
mask = tf.cast(tf.not_equal(tf.sign(args[0]), -1), tf.float32) + eps
return func(args[0] * mask, args[1] * mask)
return mask_loss_function
class Extractor:
def __init__(self, images, batch_size):
self.batch_size = batch_size
self.set_images(images)
def __len__(self):
return len(self.images)
def __iter__(self):
return self
def __next__(self):
try:
return self.extract_features(next(self.images))
except StopIteration:
raise StopIteration
def set_images(self, images):
self.images = _batch_images(images, batch_size=self.batch_size)
def extract_features(self, images):
raise NotImplementedError(
"""Feature extractor must implement 'extract_features(self, images'\
!""")
class KerasExtractor(Extractor):
@staticmethod
def __resize(x, target_size=(224, 224)):
if (x.shape[1], x.shape[2]) != target_size:
x = np.array([
np.array(
Image.fromarray(image, mode="RGB").resize(target_size))
for image in x
])
return x
@staticmethod
def __preprocess_vgg(x):
x = x[:, :, :, ::-1]
return x
@staticmethod
def __preprocess_default(x):
x = x.astype(np.float32)
x /= 127.5
x -= 1.
return x
def __init__(self,
images,
model_key,
layer,
weights_path="imagenet",
batch_size=256):
super().__init__(images, batch_size)
# reset_keras()
self.models = {
"vgg16":
tf.keras.applications.vgg16.VGG16,
"vgg19":
tf.keras.applications.vgg19.VGG19,
"resnet50":
tf.keras.applications.resnet50.ResNet50,
"xception":
tf.keras.applications.xception.Xception,
"inception_v3":
tf.keras.applications.inception_v3,
"densenet121":
tf.keras.applications.densenet.DenseNet121,
"densenet169":
tf.keras.applications.densenet.DenseNet169,
"densenet201":
tf.keras.applications.densenet.DenseNet201,
"mobilenet":
tf.keras.applications.mobilenet.MobileNet,
"mobilenet_v2":
tf.keras.applications.mobilenet_v2.MobileNetV2,
"nasnet_large":
tf.keras.applications.nasnet.NASNetLarge,
"nasnet_mobile":
tf.keras.applications.nasnet.NASNetMobile,
"inception_resnet_v2":
tf.keras.applications.inception_resnet_v2.InceptionResNetV2,
}
self.preprocessors = {
"vgg16":
self.__preprocess_vgg,
"vgg19":
self.__preprocess_vgg,
"resnet50":
tf.keras.applications.resnet50.preprocess_input,
"xception":
tf.keras.applications.xception.preprocess_input,
"inception_v3":
tf.keras.applications.inception_v3,
"densenet121":
tf.keras.applications.densenet.preprocess_input,
"densenet169":
tf.keras.applications.densenet.preprocess_input,
"densenet201":
tf.keras.applications.densenet.preprocess_input,
"mobilenet":
tf.keras.applications.mobilenet.preprocess_input,
"mobilenet_v2":
tf.keras.applications.mobilenet_v2.preprocess_input,
"nasnet_large":
tf.keras.applications.nasnet.preprocess_input,
"nasnet_mobile":
tf.keras.applications.nasnet.preprocess_input,
"inception_resnet_v2":
tf.keras.applications.inception_resnet_v2.preprocess_input,
}
self.layer = layer
if model_key in self.models:
base_model = self.models[model_key](weights=weights_path)
self.preprocess = self.preprocessors[model_key]
else:
log.info(
f'{model_key} not available in Keras Applications. Trying to load model file from {weights_path}.'
)
base_model = tf.keras.models.load_model(
weights_path,
custom_objects={
'mask_loss_function':
mask(tf.keras.losses.categorical_crossentropy)
})
self.preprocess = self.__preprocess_default
if log.getEffectiveLevel() < logging.INFO:
base_model.summary()
self.layers = [layer.name for layer in base_model.layers]
assert (layer in self.layers
), f"Invalid layer key. Available layers: {self.layers}"
inputs = base_model.input
outputs = (base_model.get_layer(layer)
if not hasattr(base_model.get_layer(layer), "output") else
base_model.get_layer(layer).output)
self.model = tf.keras.models.Model(inputs=inputs, outputs=outputs)
def extract_features(self, tuple_batch):
name_batch, ts_batch, image_batch = tuple_batch
image_batch = self.__resize(image_batch,
target_size=self.model.input.shape[1:-1])
image_batch = self.preprocess(image_batch)
feature_batch = self.model.predict(image_batch)
dim = np.prod(feature_batch.shape[1:])
feature_batch = np.reshape(feature_batch, [-1, dim])
return map(FeatureTuple._make, zip(name_batch, ts_batch,
feature_batch))
class PytorchExtractor(Extractor):
@staticmethod
def __preprocess_alexnet(x):
preprocess = transforms.Compose(
[transforms.Resize(227),
transforms.ToTensor()])
x = torch.stack(
[preprocess(Image.fromarray(image, mode="RGB")) for image in x])
return x
@staticmethod
def __preprocess_squeezenet(x):
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
preprocess = transforms.Compose(
[transforms.Resize(224),
transforms.ToTensor(), normalize])
x = torch.stack(
[preprocess(Image.fromarray(image, mode="RGB")) for image in x])
return x
@staticmethod
def __preprocess_googlenet(x):
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
preprocess = transforms.Compose(
[transforms.Resize(224),
transforms.ToTensor(), normalize])
x = torch.stack(
[preprocess(Image.fromarray(image, mode="RGB")) for image in x])
return x
def __init__(self, images, model_key, layer, batch_size=256):
super().__init__(images, batch_size)
self.models = {
"alexnet": models.alexnet,
"squeezenet": models.squeezenet1_1,
"googlenet": models.googlenet
}
self.preprocessors = {
"alexnet": self.__preprocess_alexnet,
"squeezenet": self.__preprocess_squeezenet,
"googlenet": self.__preprocess_googlenet
}
self.layer = layer
self.model_key = model_key
self.model, self.feature_layer, self.output_size = self.__build_model(
layer)
def __build_model(self, layer):
assert (self.model_key in self.models
), f"Invalid model for pytorch extractor. Available models: \
{self.models}"
base_model = self.models[self.model_key](pretrained=True)
base_model.eval()
if self.model_key == "alexnet":
log.debug(f'Layout of base model: \n{base_model}')
layers = {"fc6": -5, "fc7": -2}
assert (layer in layers
), f"Invalid layer key. Available layers: {sorted(layers.keys())}"
feature_layer = base_model.classifier[layers[layer]]
return base_model, feature_layer, (4096, )
elif self.model_key == "squeezenet":
log.info(
f'Disregarding user choice of feature layer: Only one layer is currently available for squeezenet.'
)
base_model = torch.nn.Sequential(
base_model.features,
torch.nn.AdaptiveAvgPool2d(output_size=(2, 2)))
feature_layer = base_model[-1]
log.debug(f'Layout of model: \n{base_model}')
return base_model, feature_layer, (512, 2, 2)
elif self.model_key == "googlenet":
layers = {"avgpool": base_model.avgpool, "fc": base_model.fc}
assert (layer in layers
), f"Invalid layer key. Available layers: {sorted(layers.keys())}"
feature_layer = layers[layer]
log.debug(f'Layout of model: \n{base_model}')
return base_model, feature_layer, (1024, 1, 1)
else:
pass
def extract_features(self, tuple_batch):
name_batch, ts_batch, image_batch = tuple_batch
image_batch = self.preprocessors[self.model_key](image_batch)
feature_vec = torch.zeros(image_batch.shape[0], *self.output_size)
def copy_data(m, i, o):
feature_vec.copy_(o.data)
hook = self.feature_layer.register_forward_hook(copy_data)
_ = self.model(image_batch)
hook.remove()
feature_batch = feature_vec.numpy()
dim = np.prod(feature_batch.shape[1:])
feature_batch = np.reshape(feature_batch, [-1, dim])
return map(FeatureTuple._make, zip(name_batch, ts_batch,
feature_batch))
def _batch_images(images, batch_size=256):
current_name_batch = []
current_ts_batch = []
current_image_batch = []
index = 0
for plot_tuple in images:
name, ts, image = plot_tuple
current_name_batch.append(name)
current_ts_batch.append(ts)
current_image_batch.append(image)
del image
if (index + 1) % batch_size == 0:
name_batch, ts_batch, image_batch = (
current_name_batch,
current_ts_batch,
np.array(current_image_batch, dtype=np.uint8),
)
current_name_batch = []
current_ts_batch = []
current_image_batch = []
gc.collect()
yield (name_batch, ts_batch, image_batch)
index += 1
if current_name_batch:
name_batch, ts_batch, image_batch = (
current_name_batch,
current_ts_batch,
np.array(current_image_batch, dtype=np.uint8),
)
gc.collect()
yield (name_batch, ts_batch, image_batch)
else:
gc.collect()
return
| 11,743 | 33.745562 | 115 | py |
DeepSpectrum | DeepSpectrum-master/src/deepspectrum/backend/__init__.py | 0 | 0 | 0 | py |
|
DeepSpectrum | DeepSpectrum-master/tests/__init__.py | 0 | 0 | 0 | py |
|
DeepSpectrum | DeepSpectrum-master/tests/cli/test_features.py | from multiprocessing import cpu_count
from deepspectrum.__main__ import cli
from click.testing import CliRunner
from os.path import dirname, join
from os import listdir
cur_dir = dirname(__file__)
examples = join(dirname(dirname(cur_dir)), 'examples')
def test_features_file_level(tmpdir):
runner = CliRunner()
result = runner.invoke(cli,
args=[
'-vv', 'features',
join(examples, 'audio'), '-c',
join(tmpdir, 'deep.conf'), '-o',
join(tmpdir, 'features.csv')
])
assert 'Please initialize your configuration file' in result.output
assert result.exit_code == 1
result = runner.invoke(cli,
args=[
'-vv', 'features',
join(examples, 'audio'), '-np',
cpu_count(), '-cm', 'viridis', '-o',
join(tmpdir, 'features.csv'), '-so',
join(tmpdir,
'spectrograms'), '-en', 'squeezenet',
'-sr', 16000, '-m', 'mel', '-fs', 'mel', '-c',
join(tmpdir, 'deep.conf')
])
print(result.output)
print(listdir(join(tmpdir, 'spectrograms')))
assert 'Done' in result.output
assert result.exit_code == 0
def test_features_file_level_parser(tmpdir):
runner = CliRunner()
result = runner.invoke(cli,
args=[
'-vv', 'features-with-parser',
join(examples, 'audio'), '-c',
join(tmpdir, 'deep.conf'), '-o',
join(tmpdir, 'features.csv')
])
assert 'Please initialize your configuration file' in result.output
assert result.exit_code == 1
result = runner.invoke(cli,
args=[
'-vv', 'features-with-parser',
join(examples, 'audio'), '-np',
cpu_count(), '-cm', 'viridis', '-o',
join(tmpdir, 'features.csv'), '-so',
join(tmpdir,
'spectrograms'), '-en', 'squeezenet',
'-sr', 16000, '-m', 'mel', '-fs', 'mel', '-c',
join(tmpdir, 'deep.conf')
])
print(result.output)
print(listdir(join(tmpdir, 'spectrograms')))
assert 'Done' in result.output
assert result.exit_code == 0
def test_features_file_level_single_file(tmpdir):
runner = CliRunner()
result = runner.invoke(cli,
args=[
'-vv', 'features',
join(examples, 'audio', 'dog', '1.flac'), '-np',
cpu_count(), '-cm', 'viridis', '-o',
join(tmpdir, 'features-single-file.csv'), '-so',
join(tmpdir,
'spectrograms'), '-en', 'alexnet', '-sr',
16000, '-m', 'mel', '-fs', 'mel', '-fl', 'fc7'
])
print(result.output)
print(listdir(join(tmpdir, 'spectrograms')))
assert 'Done' in result.output
assert result.exit_code == 0
def test_features_time_continuous(tmpdir):
runner = CliRunner()
result = runner.invoke(cli,
args=[
'-vv', 'features',
join(examples, 'audio'), '-np',
cpu_count(), '-cm', 'twilight', '-o',
join(tmpdir, 'features-tc.csv'), '-en', 'vgg16',
'-sr', 16000, '-m', 'chroma', '-t', '1', '1',
'-tc', '-s', 0, '-e', '2', '-lf',
join(
examples,
'labels',
'time-continuous.csv',
), '-fl', 'fc1'
])
print(result.output)
assert 'Done' in result.output
assert result.exit_code == 0
| 4,456 | 41.855769 | 79 | py |
DeepSpectrum | DeepSpectrum-master/tests/cli/test_image_features.py | from click.testing import CliRunner
from deepspectrum.__main__ import cli
from multiprocessing import cpu_count
from os.path import join, dirname
cur_dir = dirname(__file__)
examples = join(dirname(dirname(cur_dir)), 'examples')
def test_image_features(tmpdir):
runner = CliRunner()
result = runner.invoke(cli,
args=[
'-vv', 'image-features',
join(examples, 'pictures'), '-np',
cpu_count(), '-o',
join(tmpdir, 'image-features.arff'), '-en',
'vgg16', '-el', 'justAnimals'
])
assert 'Total params' in result.output
assert 'Done' in result.output
assert result.exit_code == 0
| 805 | 34.043478 | 74 | py |
DeepSpectrum | DeepSpectrum-master/tests/cli/test_plot.py | from click.testing import CliRunner
from deepspectrum.__main__ import cli
from multiprocessing import cpu_count
from os.path import join, dirname
cur_dir = dirname(__file__)
examples = join(dirname(dirname(cur_dir)), 'examples')
def test_plot(tmpdir):
runner = CliRunner()
result = runner.invoke(cli,
args=[
'-vv', 'plot',
join(examples, 'audio'), '-np',
cpu_count(), '-cm', 'twilight', '-so',
join(tmpdir, 'pretty-spectrograms'), '-sr',
16000, '-m', 'mel', '-fs', 'spectrogram', '-fs',
'log', '-ppdfs', '-d', '1', '-wo',
join(tmpdir, 'wav-chunks'), '-t', '1', '1',
'-fql', '12000'
])
assert 'Done' in result.output
assert result.exit_code == 0
| 966 | 37.68 | 79 | py |
DeepSpectrum | DeepSpectrum-master/tests/cli/__init__.py | 0 | 0 | 0 | py |
|
ocp | ocp-main/main.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import argparse
import copy
import logging
import submitit
from ocpmodels.common.flags import flags
from ocpmodels.common.utils import (
build_config,
create_grid,
new_trainer_context,
save_experiment_log,
setup_logging,
)
class Runner(submitit.helpers.Checkpointable):
def __init__(self) -> None:
self.config = None
def __call__(self, config) -> None:
with new_trainer_context(args=args, config=config) as ctx:
self.config = ctx.config
self.task = ctx.task
self.trainer = ctx.trainer
self.task.setup(self.trainer)
self.task.run()
def checkpoint(self, *args, **kwargs):
new_runner = Runner()
self.trainer.save(checkpoint_file="checkpoint.pt", training_state=True)
self.config["checkpoint"] = self.task.chkpt_path
self.config["timestamp_id"] = self.trainer.timestamp_id
if self.trainer.logger is not None:
self.trainer.logger.mark_preempting()
return submitit.helpers.DelayedSubmission(new_runner, self.config)
if __name__ == "__main__":
setup_logging()
parser: argparse.ArgumentParser = flags.get_parser()
args, override_args = parser.parse_known_args()
config = build_config(args, override_args)
if args.submit: # Run on cluster
slurm_add_params = config.get(
"slurm", None
) # additional slurm arguments
if args.sweep_yml: # Run grid search
configs = create_grid(config, args.sweep_yml)
else:
configs = [config]
logging.info(f"Submitting {len(configs)} jobs")
executor = submitit.AutoExecutor(
folder=args.logdir / "%j", slurm_max_num_timeout=3
)
executor.update_parameters(
name=args.identifier,
mem_gb=args.slurm_mem,
timeout_min=args.slurm_timeout * 60,
slurm_partition=args.slurm_partition,
gpus_per_node=args.num_gpus,
cpus_per_task=(config["optim"]["num_workers"] + 1),
tasks_per_node=(args.num_gpus if args.distributed else 1),
nodes=args.num_nodes,
slurm_additional_parameters=slurm_add_params,
)
for config in configs:
config["slurm"] = copy.deepcopy(executor.parameters)
config["slurm"]["folder"] = str(executor.folder)
jobs = executor.map_array(Runner(), configs)
logging.info(
f"Submitted jobs: {', '.join([job.job_id for job in jobs])}"
)
log_file = save_experiment_log(args, jobs, configs)
logging.info(f"Experiment log saved to: {log_file}")
else: # Run locally
Runner()(config)
| 2,904 | 31.277778 | 79 | py |
ocp | ocp-main/setup.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
from setuptools import find_packages, setup
setup(
name="ocp-models",
version="0.0.3",
description="Machine learning models for use in catalysis as part of the Open Catalyst Project",
url="https://github.com/Open-Catalyst-Project/ocp",
packages=find_packages(),
include_package_data=True,
)
| 495 | 26.555556 | 100 | py |
ocp | ocp-main/scripts/preprocess_relaxed.py | """
Creates LMDB files with extracted graph features from provided *.extxyz files
for the S2EF task.
"""
import argparse
import glob
import multiprocessing as mp
import os
import pickle
import random
import sys
import ase.io
import lmdb
import numpy as np
import torch
from tqdm import tqdm
from ocpmodels.preprocessing import AtomsToGraphs
def write_images_to_lmdb(mp_arg) -> None:
a2g, db_path, samples, pid = mp_arg
db = lmdb.open(
db_path,
map_size=1099511627776 * 2,
subdir=False,
meminit=False,
map_async=True,
)
pbar = tqdm(
total=len(samples),
position=pid,
desc="Preprocessing data into LMDBs",
)
idx = 0
for sample in samples:
ml_relaxed = ase.io.read(sample, "-1")
data_object = a2g.convert(ml_relaxed)
sid, _ = os.path.splitext(os.path.basename(sample))
fid = -1
# add atom tags
data_object.tags = torch.LongTensor(ml_relaxed.get_tags())
data_object.sid = int(sid)
data_object.fid = fid
txn = db.begin(write=True)
txn.put(
f"{idx}".encode("ascii"),
pickle.dumps(data_object, protocol=-1),
)
txn.commit()
idx += 1
pbar.update(1)
# Save count of objects in lmdb.
txn = db.begin(write=True)
txn.put("length".encode("ascii"), pickle.dumps(idx, protocol=-1))
txn.commit()
db.sync()
db.close()
def main(args, split) -> None:
systems = glob.glob(f"{eval(f'args.{split}')}/*.traj")
systems_chunked = np.array_split(systems, args.num_workers)
# Initialize feature extractor.
a2g = AtomsToGraphs(
max_neigh=50,
radius=6,
r_energy=False,
r_forces=False,
r_distances=False,
r_fixed=True,
r_edges=True,
)
# Create output directory if it doesn't exist.
out_path = f"{args.out_path}_{split}"
os.makedirs(out_path, exist_ok=True)
# Initialize lmdb paths
db_paths = [
os.path.join(out_path, "data.%04d.lmdb" % i)
for i in range(args.num_workers)
]
pool = mp.Pool(args.num_workers)
mp_args = [
(
a2g,
db_paths[i],
systems_chunked[i],
i,
)
for i in range(args.num_workers)
]
list(pool.imap(write_images_to_lmdb, mp_args))
pool.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--id",
required=True,
help="Path to ID trajectories",
)
parser.add_argument(
"--ood-ads",
required=True,
help="Path to OOD-Ads trajectories",
)
parser.add_argument(
"--ood-cat",
required=True,
help="Path to OOD-Cat trajectories",
)
parser.add_argument(
"--ood-both",
required=True,
help="Path to OOD-Both trajectories",
)
parser.add_argument(
"--out-path",
required=True,
help="Directory to save extracted features. Will create if doesn't exist",
)
parser.add_argument(
"--num-workers",
type=int,
default=1,
help="No. of feature-extracting processes.",
)
args: argparse.Namespace = parser.parse_args()
for split in ["id", "ood_ads", "ood_cat", "ood_both"]:
main(args, split)
| 3,388 | 22.212329 | 82 | py |
ocp | ocp-main/scripts/download_data.py | import argparse
import glob
import logging
import os
from typing import Dict, Optional
import ocpmodels
"""
This script provides users with an automated way to download, preprocess (where
applicable), and organize data to readily be used by the existing config files.
"""
DOWNLOAD_LINKS_s2ef: Dict[str, Dict[str, str]] = {
"s2ef": {
"200k": "https://dl.fbaipublicfiles.com/opencatalystproject/data/s2ef_train_200K.tar",
"2M": "https://dl.fbaipublicfiles.com/opencatalystproject/data/s2ef_train_2M.tar",
"20M": "https://dl.fbaipublicfiles.com/opencatalystproject/data/s2ef_train_20M.tar",
"all": "https://dl.fbaipublicfiles.com/opencatalystproject/data/s2ef_train_all.tar",
"val_id": "https://dl.fbaipublicfiles.com/opencatalystproject/data/s2ef_val_id.tar",
"val_ood_ads": "https://dl.fbaipublicfiles.com/opencatalystproject/data/s2ef_val_ood_ads.tar",
"val_ood_cat": "https://dl.fbaipublicfiles.com/opencatalystproject/data/s2ef_val_ood_cat.tar",
"val_ood_both": "https://dl.fbaipublicfiles.com/opencatalystproject/data/s2ef_val_ood_both.tar",
"test": "https://dl.fbaipublicfiles.com/opencatalystproject/data/s2ef_test_lmdbs.tar.gz",
"rattled": "https://dl.fbaipublicfiles.com/opencatalystproject/data/s2ef_rattled.tar",
"md": "https://dl.fbaipublicfiles.com/opencatalystproject/data/s2ef_md.tar",
},
}
DOWNLOAD_LINKS_is2re: Dict[str, str] = {
"is2re": "https://dl.fbaipublicfiles.com/opencatalystproject/data/is2res_train_val_test_lmdbs.tar.gz",
}
S2EF_COUNTS = {
"s2ef": {
"200k": 200000,
"2M": 2000000,
"20M": 20000000,
"all": 133934018,
"val_id": 999866,
"val_ood_ads": 999838,
"val_ood_cat": 999809,
"val_ood_both": 999944,
"rattled": 16677031,
"md": 38315405,
},
}
def get_data(
datadir: str, task: str, split: Optional[str], del_intmd_files: bool
) -> None:
os.makedirs(datadir, exist_ok=True)
if task == "s2ef" and split is None:
raise NotImplementedError("S2EF requires a split to be defined.")
download_link: Optional[str] = None
if task == "s2ef":
assert (
split is not None
), "Split must be defined for the s2ef dataset task"
assert (
split in DOWNLOAD_LINKS_s2ef[task]
), f'S2EF "{split}" split not defined, please specify one of the following: {list(DOWNLOAD_LINKS_s2ef["s2ef"].keys())}'
download_link = DOWNLOAD_LINKS_s2ef[task][split]
elif task == "is2re":
download_link = DOWNLOAD_LINKS_is2re[task]
else:
raise Exception(f"Unrecognized task {task}")
assert download_link is not None
os.system(f"wget {download_link} -P {datadir}")
filename = os.path.join(datadir, os.path.basename(download_link))
logging.info("Extracting contents...")
os.system(f"tar -xvf {filename} -C {datadir}")
dirname = os.path.join(
datadir,
os.path.basename(filename).split(".")[0],
)
if task == "s2ef" and split != "test":
assert (
split is not None
), "Split must be defined for the s2ef dataset task"
compressed_dir = os.path.join(dirname, os.path.basename(dirname))
if split in ["200k", "2M", "20M", "all", "rattled", "md"]:
output_path = os.path.join(datadir, task, split, "train")
else:
output_path = os.path.join(datadir, task, "all", split)
uncompressed_dir = uncompress_data(compressed_dir)
preprocess_data(uncompressed_dir, output_path)
verify_count(output_path, task, split)
if task == "s2ef" and split == "test":
os.system(f"mv {dirname}/test_data/s2ef/all/test_* {datadir}/s2ef/all")
elif task == "is2re":
os.system(f"mv {dirname}/data/is2re {datadir}")
if del_intmd_files:
cleanup(filename, dirname)
def uncompress_data(compressed_dir: str) -> str:
import uncompress
parser = uncompress.get_parser()
args, _ = parser.parse_known_args()
args.ipdir = compressed_dir
args.opdir = os.path.dirname(compressed_dir) + "_uncompressed"
uncompress.main(args)
return args.opdir
def preprocess_data(uncompressed_dir: str, output_path: str) -> None:
import preprocess_ef as preprocess
parser = preprocess.get_parser()
args, _ = parser.parse_known_args()
args.data_path = uncompressed_dir
args.out_path = output_path
preprocess.main(args)
def verify_count(output_path: str, task: str, split: str) -> None:
paths = glob.glob(os.path.join(output_path, "*.txt"))
count = 0
for path in paths:
lines = open(path, "r").read().splitlines()
count += len(lines)
assert (
count == S2EF_COUNTS[task][split]
), f"S2EF {split} count incorrect, verify preprocessing has completed successfully."
def cleanup(filename: str, dirname: str) -> None:
import shutil
if os.path.exists(filename):
os.remove(filename)
if os.path.exists(dirname):
shutil.rmtree(dirname)
if os.path.exists(dirname + "_uncompressed"):
shutil.rmtree(dirname + "_uncompressed")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--task", type=str, help="Task to download")
parser.add_argument(
"--split", type=str, help="Corresponding data split to download"
)
parser.add_argument(
"--keep",
action="store_true",
help="Keep intermediate directories and files upon data retrieval/processing",
)
# Flags for S2EF train/val set preprocessing:
parser.add_argument(
"--get-edges",
action="store_true",
help="Store edge indices in LMDB, ~10x storage requirement. Default: compute edge indices on-the-fly.",
)
parser.add_argument(
"--num-workers",
type=int,
default=1,
help="No. of feature-extracting processes or no. of dataset chunks",
)
parser.add_argument(
"--ref-energy", action="store_true", help="Subtract reference energies"
)
parser.add_argument(
"--data-path",
type=str,
default=os.path.join(os.path.dirname(ocpmodels.__path__[0]), "data"),
help="Specify path to save dataset. Defaults to 'ocpmodels/data'",
)
args, _ = parser.parse_known_args()
get_data(
datadir=args.data_path,
task=args.task,
split=args.split,
del_intmd_files=not args.keep,
)
| 6,512 | 34.016129 | 127 | py |
ocp | ocp-main/scripts/make_submission_file.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import argparse
import glob
import os
import numpy as np
SPLITS = {
"OC20": ["id", "ood_ads", "ood_cat", "ood_both"],
"OC22": ["id", "ood"],
}
def write_is2re_relaxations(args) -> None:
import ase.io
from tqdm import tqdm
submission_file = {}
if not args.hybrid:
for split in SPLITS[args.dataset]:
ids = []
energies = []
systems = glob.glob(os.path.join(vars(args)[split], "*.traj"))
for system in tqdm(systems):
sid, _ = os.path.splitext(os.path.basename(system))
ids.append(str(sid))
# Read the last frame in the ML trajectory. Modify "-1" if you wish to modify which frame to use.
traj = ase.io.read(system, "-1")
energies.append(traj.get_potential_energy())
submission_file[f"{split}_ids"] = np.array(ids)
submission_file[f"{split}_energy"] = np.array(energies)
else:
for split in SPLITS[args.dataset]:
preds = np.load(vars(args)[split])
ids = []
energies = []
for sid, energy in zip(preds["ids"], preds["energy"]):
sid = sid.split("_")[0]
ids.append(sid)
energies.append(energy)
submission_file[f"{split}_ids"] = np.array(ids)
submission_file[f"{split}_energy"] = np.array(energies)
np.savez_compressed(args.out_path, **submission_file)
def write_predictions(args) -> None:
if args.is2re_relaxations:
write_is2re_relaxations(args)
else:
submission_file = {}
for split in SPLITS[args.dataset]:
res = np.load(vars(args)[split], allow_pickle=True)
contents = res.files
for i in contents:
key = "_".join([split, i])
submission_file[key] = res[i]
np.savez_compressed(args.out_path, **submission_file)
def main(args: argparse.Namespace) -> None:
for split in SPLITS[args.dataset]:
assert vars(args).get(
split
), f"Missing {split} split for {args.dataset}"
if not args.out_path.endswith(".npz"):
args.out_path = args.out_path + ".npz"
write_predictions(args)
print(f"Results saved to {args.out_path} successfully.")
if __name__ == "__main__":
"""
Create a submission file for evalAI. Ensure that for the task you are
submitting for you have generated results files on each of the splits:
OC20: id, ood_ads, ood_cat, ood_both
OC22: id, ood
Results file can be obtained as follows for the various tasks:
S2EF: config["mode"] = "predict"
IS2RE: config["mode"] = "predict"
IS2RS: config["mode"] = "run-relaxations" and config["task"]["write_pos"] = True
Use this script to join the results files (4 for OC20, 2 for OC22) in the format evalAI expects
submissions.
If writing IS2RE predictions from relaxations, paths must be directories
containg trajectory files. Additionally, --is2re-relaxations must be
provided as a command line argument.
If writing IS2RE predictions from hybrid relaxations (force only model +
energy only model), paths must be the .npz S2EF prediction files.
Additionally, --is2re-relaxations and --hybrid must be provided as a
command line argument.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"--id", help="Path to ID results. Required for OC20 and OC22."
)
parser.add_argument(
"--ood-ads", help="Path to OOD-Ads results. Required only for OC20."
)
parser.add_argument(
"--ood-cat", help="Path to OOD-Cat results. Required only for OC20."
)
parser.add_argument(
"--ood-both", help="Path to OOD-Both results. Required only for OC20."
)
parser.add_argument(
"--ood", help="Path to OOD OC22 results. Required only for OC22."
)
parser.add_argument("--out-path", help="Path to write predictions to.")
parser.add_argument(
"--is2re-relaxations",
action="store_true",
help="Write IS2RE results from trajectories. Paths specified correspond to directories containing .traj files.",
)
parser.add_argument(
"--hybrid",
action="store_true",
help="Write IS2RE results from S2EF prediction files. Paths specified correspond to S2EF NPZ files.",
)
parser.add_argument(
"--dataset",
type=str,
default="OC20",
choices=["OC20", "OC22"],
help="Which dataset to write a prediction file for, OC20 or OC22.",
)
args: argparse.Namespace = parser.parse_args()
main(args)
| 4,862 | 31.637584 | 120 | py |
ocp | ocp-main/scripts/preprocess_ef.py | """
Creates LMDB files with extracted graph features from provided *.extxyz files
for the S2EF task.
"""
import argparse
import glob
import multiprocessing as mp
import os
import pickle
import random
import sys
import ase.io
import lmdb
import numpy as np
import torch
from tqdm import tqdm
from ocpmodels.preprocessing import AtomsToGraphs
def write_images_to_lmdb(mp_arg):
a2g, db_path, samples, sampled_ids, idx, pid, args = mp_arg
db = lmdb.open(
db_path,
map_size=1099511627776 * 2,
subdir=False,
meminit=False,
map_async=True,
)
pbar = tqdm(
total=5000 * len(samples),
position=pid,
desc="Preprocessing data into LMDBs",
)
for sample in samples:
traj_logs = open(sample, "r").read().splitlines()
xyz_idx = os.path.splitext(os.path.basename(sample))[0]
traj_path = os.path.join(args.data_path, f"{xyz_idx}.extxyz")
traj_frames = ase.io.read(traj_path, ":")
for i, frame in enumerate(traj_frames):
frame_log = traj_logs[i].split(",")
sid = int(frame_log[0].split("random")[1])
fid = int(frame_log[1].split("frame")[1])
data_object = a2g.convert(frame)
# add atom tags
data_object.tags = torch.LongTensor(frame.get_tags())
data_object.sid = sid
data_object.fid = fid
# subtract off reference energy
if args.ref_energy and not args.test_data:
ref_energy = float(frame_log[2])
data_object.y -= ref_energy
txn = db.begin(write=True)
txn.put(
f"{idx}".encode("ascii"),
pickle.dumps(data_object, protocol=-1),
)
txn.commit()
idx += 1
sampled_ids.append(",".join(frame_log[:2]) + "\n")
pbar.update(1)
# Save count of objects in lmdb.
txn = db.begin(write=True)
txn.put("length".encode("ascii"), pickle.dumps(idx, protocol=-1))
txn.commit()
db.sync()
db.close()
return sampled_ids, idx
def main(args: argparse.Namespace) -> None:
xyz_logs = glob.glob(os.path.join(args.data_path, "*.txt"))
if not xyz_logs:
raise RuntimeError("No *.txt files found. Did you uncompress?")
if args.num_workers > len(xyz_logs):
args.num_workers = len(xyz_logs)
# Initialize feature extractor.
a2g = AtomsToGraphs(
max_neigh=50,
radius=6,
r_energy=not args.test_data,
r_forces=not args.test_data,
r_fixed=True,
r_distances=False,
r_edges=args.get_edges,
)
# Create output directory if it doesn't exist.
os.makedirs(os.path.join(args.out_path), exist_ok=True)
# Initialize lmdb paths
db_paths = [
os.path.join(args.out_path, "data.%04d.lmdb" % i)
for i in range(args.num_workers)
]
# Chunk the trajectories into args.num_workers splits
chunked_txt_files = np.array_split(xyz_logs, args.num_workers)
# Extract features
sampled_ids, idx = [[]] * args.num_workers, [0] * args.num_workers
pool = mp.Pool(args.num_workers)
mp_args = [
(
a2g,
db_paths[i],
chunked_txt_files[i],
sampled_ids[i],
idx[i],
i,
args,
)
for i in range(args.num_workers)
]
op = list(zip(*pool.imap(write_images_to_lmdb, mp_args)))
sampled_ids, idx = list(op[0]), list(op[1])
# Log sampled image, trajectory trace
for j, i in enumerate(range(args.num_workers)):
ids_log = open(
os.path.join(args.out_path, "data_log.%04d.txt" % i), "w"
)
ids_log.writelines(sampled_ids[j])
def get_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser()
parser.add_argument(
"--data-path",
help="Path to dir containing *.extxyz and *.txt files",
)
parser.add_argument(
"--out-path",
help="Directory to save extracted features. Will create if doesn't exist",
)
parser.add_argument(
"--get-edges",
action="store_true",
help="Store edge indices in LMDB, ~10x storage requirement. Default: compute edge indices on-the-fly.",
)
parser.add_argument(
"--num-workers",
type=int,
default=1,
help="No. of feature-extracting processes or no. of dataset chunks",
)
parser.add_argument(
"--ref-energy", action="store_true", help="Subtract reference energies"
)
parser.add_argument(
"--test-data",
action="store_true",
help="Is data being processed test data?",
)
return parser
if __name__ == "__main__":
parser: argparse.ArgumentParser = get_parser()
args: argparse.Namespace = parser.parse_args()
main(args)
| 4,893 | 27.453488 | 111 | py |
ocp | ocp-main/scripts/gif_maker_parallelized.py | """
Script to generate gifs from traj
Note:
This is just a quick way to generate gifs and visalizations from traj, there are many parameters and settings in the code that people can vary to make visualizations better. We have chosen these settings as this seem to work fine for most of our systems.
Requirements:
povray
ffmpeg
ase==3.21
"""
import argparse
import copy
import multiprocessing as mp
import os
import ase.io
import numpy as np
from ase.data import covalent_radii
from ase.io.pov import get_bondpairs
def pov_from_atoms(mp_args) -> None:
atoms, idx, out_path = mp_args
# how many extra repeats to generate on either side to look infinite
extra_cells = 2
# try and guess which atoms are adsorbates since the tags aren't correct after running in vasp
# ideally this would be fixed by getting the right adsorbate atoms from the initial configurations
atoms_organic = np.array(
[atom.symbol in set(["C", "H", "O", "N"]) for atom in atoms]
)
# get the bare surface (note: this will not behave correctly for nitrides/hydrides/carbides/etc)
atoms_surface = atoms[~atoms_organic].copy()
# replicate the bare surface
atoms_surface = atoms_surface.repeat(
(extra_cells * 2 + 1, extra_cells * 2 + 1, 1)
)
# make an image of the adsorbate in the center of the slab
atoms_adsorbate = atoms[atoms_organic]
atoms_adsorbate.positions += extra_cells * (
atoms.cell[0, :] + atoms.cell[1, :]
)
# add the adsorbate to the replicated surface, then center the positions on the adsorbate
num_surface_atoms = len(atoms_surface)
atoms_surface += atoms_adsorbate
atoms_surface.positions -= atoms_adsorbate.positions.mean(axis=0)
# only include bonds for the adsorbate atoms
bondpairs = get_bondpairs(atoms_surface)
bondpairs = [
bond
for bond in bondpairs
if bond[0] >= num_surface_atoms and bond[1] >= num_surface_atoms
]
# write the image with povray
bbox = (-6.4, -4, 6.4, 4) # clip to a small region around the adsorbate
os.chdir(f"{out_path}")
renderer = ase.io.write(
"snapshot_%04i.pov" % idx,
atoms_surface,
povray_settings={
"celllinewidth": 0,
"canvas_height": 300,
"textures": ["intermediate"] * len(atoms_surface),
"bondatoms": bondpairs,
},
bbox=bbox,
rotation="-40x",
radii=covalent_radii[atoms_surface.numbers],
)
renderer.render()
print(f"image {idx} completed!")
def parallelize_generation(traj_path, out_path: str, n_procs) -> None:
# make the covalent radii for O/C/N a little smaller to make bonds visible
covalent_radii[6] = covalent_radii[6] * 0.7
covalent_radii[7] = covalent_radii[7] * 0.7
covalent_radii[8] = covalent_radii[8] * 0.7
# name of the folder containing images and gif
file_name = os.path.basename(traj_path).split(".")[0]
out_path = os.path.join(out_path, file_name)
out_path = os.path.abspath(out_path)
os.makedirs(out_path, exist_ok=True)
atoms_list = ase.io.read(traj_path, ":")
# parallelizing image generation
mp_args_list = [
(atoms, idx, out_path) for idx, atoms in enumerate(atoms_list)
]
pool = mp.Pool(processes=n_procs)
pool.map(pov_from_atoms, mp_args_list)
# creating gif
os.system(
f"ffmpeg -pattern_type glob -i '{out_path}/*.png' {out_path}/{file_name}.gif"
)
def get_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser()
parser.add_argument("--traj-path", required=True, help="Path to traj file")
parser.add_argument(
"--out-path",
required=True,
help="Directory to save generated images and gif",
)
parser.add_argument(
"--num-workers",
type=int,
default=1,
help="Number of processes to be used",
)
return parser
if __name__ == "__main__":
parser: argparse.ArgumentParser = get_parser()
args: argparse.Namespace = parser.parse_args()
parallelize_generation(args.traj_path, args.out_path, args.num_workers)
| 4,145 | 32.168 | 254 | py |
ocp | ocp-main/scripts/make_lmdb_sizes.py | """
This script provides the functionality to generate metadata.npz files necessary
for load_balancing the DataLoader.
"""
import argparse
import multiprocessing as mp
import os
import warnings
import numpy as np
from tqdm import tqdm
from ocpmodels.datasets import SinglePointLmdbDataset, TrajectoryLmdbDataset
from ocpmodels.common.typing import assert_is_instance
def get_data(index):
data = dataset[index]
natoms = data.natoms
neighbors = None
if hasattr(data, "edge_index"):
neighbors = data.edge_index.shape[1]
return index, natoms, neighbors
def main(args) -> None:
path = assert_is_instance(args.data_path, str)
global dataset
if os.path.isdir(path):
dataset = TrajectoryLmdbDataset({"src": path})
outpath = os.path.join(path, "metadata.npz")
elif os.path.isfile(path):
dataset = SinglePointLmdbDataset({"src": path})
outpath = os.path.join(os.path.dirname(path), "metadata.npz")
output_indices = range(len(dataset))
pool = mp.Pool(assert_is_instance(args.num_workers, int))
outputs = list(
tqdm(pool.imap(get_data, output_indices), total=len(indices))
)
indices = []
natoms = []
neighbors = []
for i in outputs:
indices.append(i[0])
natoms.append(i[1])
neighbors.append(i[2])
_sort = np.argsort(indices)
sorted_natoms = np.array(natoms, dtype=np.int32)[_sort]
if None in neighbors:
warnings.warn(
f"edge_index information not found, {outpath} only supports atom-wise load balancing."
)
np.savez(outpath, natoms=sorted_natoms)
else:
sorted_neighbors = np.array(neighbors, dtype=np.int32)[_sort]
np.savez(outpath, natoms=sorted_natoms, neighbors=sorted_neighbors)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--data-path",
required=True,
type=str,
help="Path to S2EF directory or IS2R* .lmdb file",
)
parser.add_argument(
"--num-workers",
default=1,
type=int,
help="Num of workers to parallelize across",
)
args: argparse.Namespace = parser.parse_args()
main(args)
| 2,225 | 26.481481 | 98 | py |
ocp | ocp-main/scripts/__init__.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
| 178 | 24.571429 | 63 | py |
ocp | ocp-main/scripts/uncompress.py | """
Uncompresses downloaded S2EF datasets to be used by the LMDB preprocessing
script - preprocess_ef.py
"""
import argparse
import glob
import lzma
import multiprocessing as mp
import os
from typing import List, Tuple
from tqdm import tqdm
def read_lzma(inpfile: str, outfile: str) -> None:
with open(inpfile, "rb") as f:
contents = lzma.decompress(f.read())
with open(outfile, "wb") as op:
op.write(contents)
def decompress_list_of_files(ip_op_pair: Tuple[str, str]) -> None:
ip_file, op_file = ip_op_pair
read_lzma(ip_file, op_file)
def get_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser()
parser.add_argument(
"--ipdir", type=str, help="Path to compressed dataset directory"
)
parser.add_argument(
"--opdir", type=str, help="Directory path to uncompress files to"
)
parser.add_argument(
"--num-workers", type=int, help="# of processes to parallelize across"
)
return parser
def main(args: argparse.Namespace) -> None:
os.makedirs(args.opdir, exist_ok=True)
filelist = glob.glob(os.path.join(args.ipdir, "*txt.xz")) + glob.glob(
os.path.join(args.ipdir, "*extxyz.xz")
)
ip_op_pairs: List[Tuple[str, str]] = []
for filename in filelist:
fname_base = os.path.basename(filename)
ip_op_pairs.append(
(filename, os.path.join(args.opdir, fname_base[:-3]))
)
pool = mp.Pool(args.num_workers)
list(
tqdm(
pool.imap(decompress_list_of_files, ip_op_pairs),
total=len(ip_op_pairs),
desc=f"Uncompressing {args.ipdir}",
)
)
if __name__ == "__main__":
parser: argparse.ArgumentParser = get_parser()
args: argparse.Namespace = parser.parse_args()
main(args)
| 1,819 | 25.376812 | 78 | py |
ocp | ocp-main/scripts/make_challenge_submission_file.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
ONLY for use in the NeurIPS 2021 Open Catalyst Challenge. For all other submissions
please use make_submission_file.py.
"""
import argparse
import glob
import os
import numpy as np
def write_is2re_relaxations(path: str, filename: str, hybrid) -> None:
import ase.io
from tqdm import tqdm
submission_file = {}
if not hybrid:
ids = []
energies = []
systems = glob.glob(os.path.join(path, "*.traj"))
for system in tqdm(systems):
sid, _ = os.path.splitext(os.path.basename(system))
ids.append(str(sid))
traj = ase.io.read(system, "-1")
energies.append(traj.get_potential_energy())
submission_file["challenge_ids"] = np.array(ids)
submission_file["challenge_energy"] = np.array(energies)
else:
preds = np.load(path)
ids = []
energies = []
for sid, energy in zip(preds["ids"], preds["energy"]):
sid = sid.split("_")[0]
ids.append(sid)
energies.append(energy)
submission_file["challenge_ids"] = np.array(ids)
submission_file["challenge_energy"] = np.array(energies)
np.savez_compressed(filename, **submission_file)
def write_predictions(path: str, filename: str) -> None:
submission_file = {}
res = np.load(path, allow_pickle=True)
contents = res.files
for i in contents:
key = "_".join(["challenge", i])
submission_file[key] = res[i]
np.savez_compressed(filename, **submission_file)
def main(args: argparse.Namespace) -> None:
path = args.path
if not args.out_path.endswith(".npz"):
args.out_path = args.out_path + ".npz"
if not args.is2re_relaxations:
write_predictions(path, filename=args.out_path)
else:
write_is2re_relaxations(
path, filename=args.out_path, hybrid=args.hybrid
)
print(f"Results saved to {args.out_path} successfully.")
if __name__ == "__main__":
"""
Create a submission file for the NeurIPS 2021 Open Catalyst Challenge.
Results file can be obtained as follows for the various tasks:
S2EF: config["mode"] = "predict"
IS2RE: config["mode"] = "predict"
IS2RS: config["mode"] = "run-relaxations" and config["task"]["write_pos"] = True
Use this script to write your results files in the format evalAI expects
submissions.
If writing IS2RE predictions from relaxations, the path specified must be a
directory containg trajectory (.traj) files. Additionally, --is2re-relaxations must be
provided as a command line argument.
If writing IS2RE predictions from hybrid relaxations (force only model +
energy only model), paths must be the .npz S2EF prediction files.
Additionally, --is2re-relaxations and --hybrid must be provided as a
command line argument.
"""
parser = argparse.ArgumentParser()
parser.add_argument("--path", help="Path to results")
parser.add_argument("--out-path", help="Path to write predictions to.")
parser.add_argument(
"--is2re-relaxations",
action="store_true",
help="Write IS2RE results from trajectories. Path specified must be a directory containing .traj files.",
)
parser.add_argument(
"--hybrid",
action="store_true",
help="Write IS2RE results from S2EF prediction files. Path specified must be a S2EF NPZ file.",
)
args: argparse.Namespace = parser.parse_args()
main(args)
| 3,667 | 29.823529 | 113 | py |
ocp | ocp-main/scripts/hpo/run_tune.py | import os
import ray
from ray import tune
from ray.tune import CLIReporter
from ray.tune.schedulers import ASHAScheduler
from ocpmodels.common.flags import flags
from ocpmodels.common.registry import registry
from ocpmodels.common.utils import build_config, setup_imports
# this function is general and should work for any ocp trainer
def ocp_trainable(config, checkpoint_dir=None) -> None:
setup_imports()
# trainer defaults are changed to run HPO
trainer = registry.get_trainer_class(config.get("trainer", "energy"))(
task=config["task"],
model=config["model"],
dataset=config["dataset"],
optimizer=config["optim"],
identifier=config["identifier"],
run_dir=config.get("run_dir", "./"),
is_debug=config.get("is_debug", False),
is_vis=config.get("is_vis", False),
is_hpo=config.get("is_hpo", True), # hpo
print_every=config.get("print_every", 10),
seed=config.get("seed", 0),
logger=config.get("logger", None), # hpo
local_rank=config["local_rank"],
amp=config.get("amp", False),
cpu=config.get("cpu", False),
)
# add checkpoint here
if checkpoint_dir:
checkpoint = os.path.join(checkpoint_dir, "checkpoint")
trainer.load_pretrained(checkpoint)
# start training
trainer.train()
# this section defines the hyperparameters to tune and all the Ray Tune settings
# current params/settings are an example for ForceNet
def main() -> None:
# parse config
parser = flags.get_parser()
args, override_args = parser.parse_known_args()
config = build_config(args, override_args)
# add parameters to tune using grid or random search
config["model"].update(
hidden_channels=tune.choice([256, 384, 512, 640, 704]),
decoder_hidden_channels=tune.choice([256, 384, 512, 640, 704]),
depth_mlp_edge=tune.choice([1, 2, 3, 4, 5]),
depth_mlp_node=tune.choice([1, 2, 3, 4, 5]),
num_interactions=tune.choice([3, 4, 5, 6]),
)
# define scheduler
scheduler = ASHAScheduler(
time_attr="steps",
metric="val_loss",
mode="min",
max_t=100000,
grace_period=2000,
reduction_factor=4,
brackets=1,
)
# ray init
# for debug
# ray.init(local_mode=True)
# for slurm cluster
ray.init(
address="auto",
_node_ip_address=os.environ["ip_head"].split(":")[0],
_redis_password=os.environ["redis_password"],
)
# define command line reporter
reporter = CLIReporter(
print_intermediate_tables=True,
metric="val_loss",
mode="min",
metric_columns={
"steps": "steps",
"epochs": "epochs",
"training_iteration": "training_iteration",
"val_loss": "val_loss",
"val_forces_mae": "val_forces_mae",
},
)
# define run parameters
analysis = tune.run(
ocp_trainable,
resources_per_trial={"cpu": 8, "gpu": 1},
config=config,
fail_fast=False,
local_dir=config.get("run_dir", "./"),
num_samples=500,
progress_reporter=reporter,
scheduler=scheduler,
)
print(
"Best config is:",
analysis.get_best_config(
metric="val_forces_mae", mode="min", scope="last"
),
)
if __name__ == "__main__":
main()
| 3,426 | 29.598214 | 80 | py |
ocp | ocp-main/scripts/hpo/run_tune_pbt.py | import logging
import os
import ray
from ray import tune
from ray.tune import CLIReporter
from ray.tune.schedulers import PopulationBasedTraining
from ocpmodels.common.flags import flags
from ocpmodels.common.registry import registry
from ocpmodels.common.utils import build_config, setup_imports
# this function is general and should work for any ocp trainer
def ocp_trainable(config, checkpoint_dir=None) -> None:
setup_imports()
# update config for PBT learning rate
config["optim"].update(lr_initial=config["lr"])
# trainer defaults are changed to run HPO
trainer = registry.get_trainer_class(config.get("trainer", "energy"))(
task=config["task"],
model=config["model"],
dataset=config["dataset"],
optimizer=config["optim"],
identifier=config["identifier"],
run_dir=config.get("run_dir", "./"),
is_debug=config.get("is_debug", False),
is_vis=config.get("is_vis", False),
is_hpo=config.get("is_hpo", True), # hpo
print_every=config.get("print_every", 10),
seed=config.get("seed", 0),
logger=config.get("logger", None), # hpo
local_rank=config["local_rank"],
amp=config.get("amp", False),
cpu=config.get("cpu", False),
)
# add checkpoint here
if checkpoint_dir:
checkpoint = os.path.join(checkpoint_dir, "checkpoint")
trainer.load_pretrained(checkpoint)
# set learning rate
for g in trainer.optimizer.param_groups:
g["lr"] = config["lr"]
# start training
trainer.train()
# this section defines all the Ray Tune run parameters
def main() -> None:
# parse config
parser = flags.get_parser()
args, override_args = parser.parse_known_args()
config = build_config(args, override_args)
# add parameters to tune using grid or random search
config["lr"] = tune.loguniform(0.0001, 0.01)
# define scheduler
scheduler = PopulationBasedTraining(
time_attr="training_iteration",
metric="val_loss",
mode="min",
perturbation_interval=1,
hyperparam_mutations={
"lr": tune.loguniform(0.000001, 0.01),
},
)
# ray init
ray.init(
address="auto",
_node_ip_address=os.environ["ip_head"].split(":")[0],
_redis_password=os.environ["redis_password"],
)
# define command line reporter
reporter = CLIReporter(
print_intermediate_tables=True,
metric="val_loss",
mode="min",
metric_columns={
"act_lr": "act_lr",
"steps": "steps",
"epochs": "epochs",
"training_iteration": "training_iteration",
"val_loss": "val_loss",
"val_forces_mae": "val_forces_mae",
},
)
# define run parameters
analysis = tune.run(
ocp_trainable,
resources_per_trial={"cpu": 8, "gpu": 1},
config=config,
stop={"epochs": 12},
# time_budget_s=28200,
fail_fast=False,
local_dir=config.get("run_dir", "./"),
num_samples=8,
progress_reporter=reporter,
scheduler=scheduler,
)
print(
"Best config is:",
analysis.get_best_config(
metric="val_forces_mae", mode="min", scope="last"
),
)
if __name__ == "__main__":
main()
| 3,355 | 29.509091 | 74 | py |
ocp | ocp-main/scripts/hpo/__init__.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
| 178 | 24.571429 | 63 | py |
ocp | ocp-main/tests/conftest.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
from typing import TYPE_CHECKING, Optional, Union
import numpy as np
import pytest
from syrupy.extensions.amber import AmberSnapshotExtension
if TYPE_CHECKING:
from syrupy.types import SerializableData, SerializedData, SnapshotIndex
DEFAULT_RTOL = 1.0e-03
DEFAULT_ATOL = 1.0e-03
class Approx:
"""
Wrapper object for approximately compared numpy arrays.
"""
def __init__(
self,
data: Union[np.ndarray, list],
*,
rtol: Optional[float] = None,
atol: Optional[float] = None,
) -> None:
if isinstance(data, list):
self.data = np.array(data)
elif isinstance(data, np.ndarray):
self.data = data
else:
raise TypeError(f"Cannot convert {type(data)} to np.array")
self.rtol = rtol if rtol is not None else DEFAULT_RTOL
self.atol = atol if atol is not None else DEFAULT_ATOL
self.tol_repr = True
def __repr__(self) -> str:
data = np.array_repr(self.data)
data = "\n".join(f"\t{line}" for line in data.splitlines())
tol_repr = ""
if self.tol_repr:
tol_repr = f", \n\trtol={self.rtol}, \n\tatol={self.atol}"
return f"Approx(\n{data}{tol_repr}\n)"
class _ApproxNumpyFormatter:
def __init__(self, data) -> None:
self.data = data
def __repr__(self) -> str:
return Approx(
self.data.expected,
rtol=self.data.rel,
atol=self.data.abs,
).__repr__()
def _try_parse_approx(data: "SerializableData") -> Optional[Approx]:
"""
Parse the string representation of an Approx object.
We can just use eval here, since we know the string is safe.
"""
if not isinstance(data, str):
return None
data = data.strip()
if not data.startswith("Approx("):
return None
approx = eval(
data.replace("dtype=", "dtype=np."),
{"Approx": Approx, "np": np},
{"array": np.array},
)
if not isinstance(approx, Approx):
return None
return approx
class ApproxExtension(AmberSnapshotExtension):
"""
By default, syrupy uses the __repr__ of the expected (snapshot) and actual values
to serialize them into strings. Then, it compares the strings to see if they match.
However, this behavior is not ideal for comparing floats/ndarrays. For example,
if we have a snapshot with a float value of 0.1, and the actual value is 0.10000000000000001,
then the strings will not match, even though the values are effectively equal.
To work around this, we override the serialize method to seralize the expected value
into a special representation. Then, we override the matches function (which originally does a
simple string comparison) to parse the expected and actual values into numpy arrays.
Finally, we compare the arrays using np.allclose.
"""
def matches(
self,
*,
serialized_data: "SerializableData",
snapshot_data: "SerializableData",
) -> bool:
# if both serialized_data and snapshot_data are serialized Approx objects,
# then we can load them as numpy arrays and compare them using np.allclose
serialized_approx = _try_parse_approx(serialized_data)
snapshot_approx = _try_parse_approx(snapshot_data)
if serialized_approx is not None and snapshot_approx is not None:
return np.allclose(
snapshot_approx.data,
serialized_approx.data,
rtol=serialized_approx.rtol,
atol=serialized_approx.atol,
)
return super().matches(
serialized_data=serialized_data, snapshot_data=snapshot_data
)
def serialize(self, data, **kwargs):
# we override the existing serialization behavior
# of the `pytest.approx()` object to serialize it into a special string.
if isinstance(data, type(pytest.approx(np.array(0.0)))):
return super().serialize(_ApproxNumpyFormatter(data), **kwargs)
elif isinstance(data, type(pytest.approx(0.0))):
raise NotImplementedError("Scalar approx not implemented yet")
return super().serialize(data, **kwargs)
def write_snapshot(
self, *, data: "SerializedData", index: "SnapshotIndex"
) -> None:
# Right before writing to file, we update the serialized snapshot data
# and remove the atol/rtol from the string representation.
# This is an implementation detail, and is not necessary for the extension to work.
# It just makes the snapshot files a bit cleaner.
approx = _try_parse_approx(data)
if approx is not None:
approx.tol_repr = False
data = self.serialize(approx)
return super().write_snapshot(data=data, index=index)
@pytest.fixture
def snapshot(snapshot):
return snapshot.use_extension(ApproxExtension)
| 5,137 | 33.02649 | 98 | py |
ocp | ocp-main/tests/__init__.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
| 178 | 24.571429 | 63 | py |
ocp | ocp-main/tests/common/test_data_parallel_batch_sampler.py | import tempfile
from contextlib import contextmanager
from pathlib import Path
from typing import TypeVar
import numpy as np
import pytest
from torch.utils.data import Dataset
from ocpmodels.common.data_parallel import BalancedBatchSampler
DATA = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
SIZE_ATOMS = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
SIZE_NEIGHBORS = [4, 4, 4, 4, 4, 4, 4, 4, 4, 4]
T_co = TypeVar("T_co", covariant=True)
@contextmanager
def _temp_file(name: str):
with tempfile.TemporaryDirectory() as tmpdir:
yield Path(tmpdir) / name
@pytest.fixture
def valid_path_dataset():
class _Dataset(Dataset[T_co]):
def __init__(self, data, fpath: Path) -> None:
self.data = data
self.metadata_path = fpath
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
return self.data[idx]
with _temp_file("metadata.npz") as file:
np.savez(
natoms=np.array(SIZE_ATOMS),
neighbors=np.array(SIZE_NEIGHBORS),
file=file,
)
yield _Dataset(DATA, file)
@pytest.fixture
def invalid_path_dataset():
class _Dataset(Dataset):
def __init__(self, data) -> None:
self.data = data
self.metadata_path = Path("/tmp/does/not/exist.np")
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
return self.data[idx]
return _Dataset(DATA)
@pytest.fixture
def invalid_dataset():
class _Dataset(Dataset):
def __init__(self, data) -> None:
self.data = data
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
return self.data[idx]
return _Dataset(DATA)
def test_lowercase(invalid_dataset) -> None:
sampler = BalancedBatchSampler(
dataset=invalid_dataset,
batch_size=1,
rank=0,
num_replicas=2,
device=None,
mode="ATOMS",
throw_on_error=False,
)
assert sampler.mode == "atoms"
sampler = BalancedBatchSampler(
dataset=invalid_dataset,
batch_size=1,
rank=0,
num_replicas=2,
device=None,
mode="NEIGHBORS",
throw_on_error=False,
)
assert sampler.mode == "neighbors"
def test_invalid_mode(invalid_dataset) -> None:
with pytest.raises(
ValueError, match="Must be one of 'atoms', 'neighbors', or a boolean."
):
BalancedBatchSampler(
dataset=invalid_dataset,
batch_size=1,
rank=0,
num_replicas=2,
device=None,
mode="natoms",
throw_on_error=True,
)
with pytest.raises(
ValueError, match="Must be one of 'atoms', 'neighbors', or a boolean."
):
BalancedBatchSampler(
dataset=invalid_dataset,
batch_size=1,
rank=0,
num_replicas=2,
device=None,
mode="nneighbors",
throw_on_error=True,
)
def test_invalid_dataset(invalid_dataset) -> None:
with pytest.raises(
RuntimeError,
match="does not have a metadata_path attribute. BalancedBatchSampler has to load the data to determine batch sizes, which incurs significant overhead!",
):
BalancedBatchSampler(
dataset=invalid_dataset,
batch_size=1,
rank=0,
num_replicas=2,
device=None,
mode="atoms",
throw_on_error=True,
force_balancing=True,
)
with pytest.raises(
RuntimeError,
match="does not have a metadata_path attribute. Batches will not be balanced, which can incur significant overhead!",
):
BalancedBatchSampler(
dataset=invalid_dataset,
batch_size=1,
rank=0,
num_replicas=2,
device=None,
mode="atoms",
throw_on_error=True,
force_balancing=False,
)
def test_invalid_path_dataset(invalid_path_dataset) -> None:
with pytest.raises(
RuntimeError,
match="Metadata file .+ does not exist. BalancedBatchSampler has to load the data to determine batch sizes, which incurs significant overhead!",
):
BalancedBatchSampler(
dataset=invalid_path_dataset,
batch_size=1,
rank=0,
num_replicas=2,
device=None,
mode="atoms",
throw_on_error=True,
force_balancing=True,
)
with pytest.raises(
RuntimeError,
match="Metadata file .+ does not exist. Batches will not be balanced, which can incur significant overhead!",
):
BalancedBatchSampler(
dataset=invalid_path_dataset,
batch_size=1,
rank=0,
num_replicas=2,
device=None,
mode="atoms",
throw_on_error=True,
force_balancing=False,
)
def test_valid_dataset(valid_path_dataset) -> None:
sampler = BalancedBatchSampler(
dataset=valid_path_dataset,
batch_size=1,
rank=0,
num_replicas=2,
device=None,
mode="atoms",
throw_on_error=True,
)
assert (sampler.sizes == np.array(SIZE_ATOMS)).all()
sampler = BalancedBatchSampler(
dataset=valid_path_dataset,
batch_size=1,
rank=0,
num_replicas=2,
device=None,
mode="neighbors",
throw_on_error=True,
)
assert (sampler.sizes == np.array(SIZE_NEIGHBORS)).all()
def test_disabled(valid_path_dataset) -> None:
sampler = BalancedBatchSampler(
dataset=valid_path_dataset,
batch_size=1,
rank=0,
num_replicas=2,
device=None,
mode=False,
throw_on_error=True,
)
assert sampler.balance_batches is False
def test_single_node(valid_path_dataset) -> None:
sampler = BalancedBatchSampler(
dataset=valid_path_dataset,
batch_size=1,
rank=0,
num_replicas=1,
device=None,
mode="atoms",
throw_on_error=True,
)
assert sampler.balance_batches is False
| 6,251 | 25.05 | 161 | py |
ocp | ocp-main/tests/models/test_schnet.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import os
import random
import numpy as np
import pytest
import torch
from ase.io import read
from ocpmodels.common.registry import registry
from ocpmodels.common.transforms import RandomRotate
from ocpmodels.common.utils import setup_imports
from ocpmodels.datasets import data_list_collater
from ocpmodels.preprocessing import AtomsToGraphs
@pytest.fixture(scope="class")
def load_data(request) -> None:
atoms = read(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "atoms.json"),
index=0,
format="json",
)
a2g = AtomsToGraphs(
max_neigh=200,
radius=6,
r_energy=True,
r_forces=True,
r_distances=True,
)
data_list = a2g.convert_all([atoms])
request.cls.data = data_list[0]
@pytest.fixture(scope="class")
def load_model(request) -> None:
torch.manual_seed(4)
setup_imports()
model = registry.get_model_class("schnet")(
None, 32, 1, cutoff=6.0, regress_forces=True, use_pbc=True
)
request.cls.model = model
@pytest.mark.usefixtures("load_data")
@pytest.mark.usefixtures("load_model")
class TestSchNet:
def test_rotation_invariance(self) -> None:
random.seed(1)
data = self.data
# Sampling a random rotation within [-180, 180] for all axes.
transform = RandomRotate([-180, 180], [0, 1, 2])
data_rotated, rot, inv_rot = transform(data.clone())
assert not np.array_equal(data.pos, data_rotated.pos)
# Pass it through the model.
batch = data_list_collater([data, data_rotated])
out = self.model(batch)
# Compare predicted energies and forces (after inv-rotation).
energies = out[0].detach()
np.testing.assert_almost_equal(energies[0], energies[1], decimal=5)
forces = out[1].detach()
np.testing.assert_array_almost_equal(
forces[: forces.shape[0] // 2],
torch.matmul(forces[forces.shape[0] // 2 :], inv_rot),
decimal=4,
)
def test_energy_force_shape(self, snapshot) -> None:
# Recreate the Data object to only keep the necessary features.
data = self.data
# Pass it through the model.
energy, forces = self.model(data_list_collater([data]))
assert snapshot == energy.shape
assert snapshot == pytest.approx(energy.detach())
assert snapshot == forces.shape
assert snapshot == pytest.approx(forces.detach())
| 2,649 | 28.120879 | 79 | py |
ocp | ocp-main/tests/models/test_gemnet_oc.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import io
import logging
import os
import random
import numpy as np
import pytest
import requests
import torch
from ase.io import read
from ocpmodels.common.registry import registry
from ocpmodels.common.transforms import RandomRotate
from ocpmodels.common.utils import load_state_dict, setup_imports
from ocpmodels.datasets import data_list_collater
from ocpmodels.preprocessing import AtomsToGraphs
@pytest.fixture(scope="class")
def load_data(request) -> None:
atoms = read(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "atoms.json"),
index=0,
format="json",
)
a2g = AtomsToGraphs(
max_neigh=200,
radius=6,
r_energy=True,
r_forces=True,
r_distances=True,
)
data_list = a2g.convert_all([atoms])
request.cls.data = data_list[0]
@pytest.fixture(scope="class")
def load_model(request) -> None:
torch.manual_seed(4)
setup_imports()
# download and load weights.
checkpoint_url = "https://dl.fbaipublicfiles.com/opencatalystproject/models/2022_07/s2ef/gemnet_oc_base_s2ef_all.pt"
# load buffer into memory as a stream
# and then load it with torch.load
r = requests.get(checkpoint_url, stream=True)
r.raise_for_status()
checkpoint = torch.load(
io.BytesIO(r.content), map_location=torch.device("cpu")
)
model = registry.get_model_class("gemnet_oc")(
None,
-1,
1,
num_spherical=7,
num_radial=128,
num_blocks=4,
emb_size_atom=256,
emb_size_edge=512,
emb_size_trip_in=64,
emb_size_trip_out=64,
emb_size_quad_in=32,
emb_size_quad_out=32,
emb_size_aint_in=64,
emb_size_aint_out=64,
emb_size_rbf=16,
emb_size_cbf=16,
emb_size_sbf=32,
num_before_skip=2,
num_after_skip=2,
num_concat=1,
num_atom=3,
num_output_afteratom=3,
num_atom_emb_layers=2,
num_global_out_layers=2,
regress_forces=True,
direct_forces=True,
use_pbc=True,
cutoff=12.0,
cutoff_qint=12.0,
cutoff_aeaint=12.0,
cutoff_aint=12.0,
max_neighbors=30,
max_neighbors_qint=8,
max_neighbors_aeaint=20,
max_neighbors_aint=1000,
rbf={"name": "gaussian"},
envelope={"name": "polynomial", "exponent": 5},
cbf={"name": "spherical_harmonics"},
sbf={"name": "legendre_outer"},
extensive=True,
forces_coupled=False,
output_init="HeOrthogonal",
activation="silu",
quad_interaction=True,
atom_edge_interaction=True,
edge_atom_interaction=True,
atom_interaction=True,
qint_tags=[1, 2],
scale_file=checkpoint["scale_dict"],
)
new_dict = {
k[len("module.") * 2 :]: v for k, v in checkpoint["state_dict"].items()
}
load_state_dict(model, new_dict)
request.cls.model = model
@pytest.mark.usefixtures("load_data")
@pytest.mark.usefixtures("load_model")
class TestGemNetOC:
def test_rotation_invariance(self) -> None:
random.seed(1)
data = self.data
# Sampling a random rotation within [-180, 180] for all axes.
transform = RandomRotate([-180, 180], [0, 1, 2])
data_rotated, rot, inv_rot = transform(data.clone())
assert not np.array_equal(data.pos, data_rotated.pos)
# Pass it through the model.
batch = data_list_collater([data, data_rotated])
out = self.model(batch)
# Compare predicted energies and forces (after inv-rotation).
energies = out[0].detach()
np.testing.assert_almost_equal(energies[0], energies[1], decimal=3)
forces = out[1].detach()
logging.info(forces)
np.testing.assert_array_almost_equal(
forces[: forces.shape[0] // 2],
torch.matmul(forces[forces.shape[0] // 2 :], inv_rot),
decimal=3,
)
def test_energy_force_shape(self, snapshot) -> None:
# Recreate the Data object to only keep the necessary features.
data = self.data
# Pass it through the model.
energy, forces = self.model(data_list_collater([data]))
assert snapshot == energy.shape
assert snapshot == pytest.approx(energy.detach())
assert snapshot == forces.shape
assert snapshot == pytest.approx(forces.detach())
| 4,631 | 27.95 | 120 | py |
ocp | ocp-main/tests/models/test_gemnet.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import logging
import os
import random
import numpy as np
import pytest
import torch
from ase.io import read
from ocpmodels.common.registry import registry
from ocpmodels.common.transforms import RandomRotate
from ocpmodels.common.utils import setup_imports
from ocpmodels.datasets import data_list_collater
from ocpmodels.preprocessing import AtomsToGraphs
@pytest.fixture(scope="class")
def load_data(request) -> None:
atoms = read(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "atoms.json"),
index=0,
format="json",
)
a2g = AtomsToGraphs(
max_neigh=200,
radius=6,
r_energy=True,
r_forces=True,
r_distances=True,
)
data_list = a2g.convert_all([atoms])
request.cls.data = data_list[0]
@pytest.fixture(scope="class")
def load_model(request) -> None:
torch.manual_seed(4)
setup_imports()
model = registry.get_model_class("gemnet_t")(
None,
-1,
1,
cutoff=6.0,
num_spherical=7,
num_radial=128,
num_blocks=3,
emb_size_atom=16,
emb_size_edge=16,
emb_size_trip=16,
emb_size_rbf=16,
emb_size_cbf=16,
emb_size_bil_trip=64,
num_before_skip=1,
num_after_skip=2,
num_concat=1,
num_atom=3,
regress_forces=True,
direct_forces=True,
scale_file=os.path.join(
os.path.dirname(os.path.abspath(__file__)), "gemnet-dT-scales.json"
),
)
request.cls.model = model
@pytest.mark.usefixtures("load_data")
@pytest.mark.usefixtures("load_model")
class TestGemNetT:
def test_rotation_invariance(self) -> None:
random.seed(1)
data = self.data
# Sampling a random rotation within [-180, 180] for all axes.
transform = RandomRotate([-180, 180], [0, 1, 2])
data_rotated, rot, inv_rot = transform(data.clone())
assert not np.array_equal(data.pos, data_rotated.pos)
# Pass it through the model.
batch = data_list_collater([data, data_rotated])
out = self.model(batch)
# Compare predicted energies and forces (after inv-rotation).
energies = out[0].detach()
np.testing.assert_almost_equal(energies[0], energies[1], decimal=5)
forces = out[1].detach()
logging.info(forces)
np.testing.assert_array_almost_equal(
forces[: forces.shape[0] // 2],
torch.matmul(forces[forces.shape[0] // 2 :], inv_rot),
decimal=4,
)
def test_energy_force_shape(self, snapshot) -> None:
# Recreate the Data object to only keep the necessary features.
data = self.data
# Pass it through the model.
energy, forces = self.model(data_list_collater([data]))
assert snapshot == energy.shape
assert snapshot == pytest.approx(energy.detach())
assert snapshot == forces.shape
assert snapshot == pytest.approx(forces.detach())
| 3,191 | 27 | 79 | py |
ocp | ocp-main/tests/models/test_dimenet.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import os
import random
import numpy as np
import pytest
import torch
from ase.io import read
from ocpmodels.common.registry import registry
from ocpmodels.common.transforms import RandomRotate
from ocpmodels.common.utils import setup_imports
from ocpmodels.datasets import data_list_collater
from ocpmodels.preprocessing import AtomsToGraphs
@pytest.fixture(scope="class")
def load_data(request) -> None:
atoms = read(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "atoms.json"),
index=0,
format="json",
)
a2g = AtomsToGraphs(
max_neigh=200,
radius=6,
r_energy=True,
r_forces=True,
r_distances=True,
)
data_list = a2g.convert_all([atoms])
request.cls.data = data_list[0]
@pytest.fixture(scope="class")
def load_model(request) -> None:
torch.manual_seed(4)
setup_imports()
model = registry.get_model_class("dimenet")(
None,
32,
1,
cutoff=6.0,
regress_forces=True,
use_pbc=False,
)
request.cls.model = model
@pytest.mark.usefixtures("load_data")
@pytest.mark.usefixtures("load_model")
class TestDimeNet:
def test_rotation_invariance(self) -> None:
random.seed(1)
data = self.data
# Sampling a random rotation within [-180, 180] for all axes.
transform = RandomRotate([-180, 180], [0, 1, 2])
data_rotated, rot, inv_rot = transform(data.clone())
assert not np.array_equal(data.pos, data_rotated.pos)
# Pass it through the model.
batch = data_list_collater([data, data_rotated])
out = self.model(batch)
# Compare predicted energies and forces (after inv-rotation).
energies = out[0].detach()
np.testing.assert_almost_equal(energies[0], energies[1], decimal=5)
forces = out[1].detach()
np.testing.assert_array_almost_equal(
forces[: forces.shape[0] // 2],
torch.matmul(forces[forces.shape[0] // 2 :], inv_rot),
decimal=5,
)
def test_energy_force_shape(self, snapshot) -> None:
# Recreate the Data object to only keep the necessary features.
data = self.data
# Pass it through the model.
energy, forces = self.model(data_list_collater([data]))
assert snapshot == energy.shape
assert snapshot == pytest.approx(energy.detach())
assert snapshot == forces.shape
assert snapshot == pytest.approx(forces.detach())
| 2,693 | 27.0625 | 79 | py |
ocp | ocp-main/tests/models/test_cgcnn.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import os
import random
import numpy as np
import pytest
import torch
from ase.io import read
from ocpmodels.common.registry import registry
from ocpmodels.common.transforms import RandomRotate
from ocpmodels.common.utils import setup_imports
from ocpmodels.datasets import data_list_collater
from ocpmodels.preprocessing import AtomsToGraphs
@pytest.fixture(scope="class")
def load_data(request) -> None:
atoms = read(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "atoms.json"),
index=0,
format="json",
)
a2g = AtomsToGraphs(
max_neigh=200,
radius=6,
r_energy=True,
r_forces=True,
r_distances=True,
)
data_list = a2g.convert_all([atoms])
request.cls.data = data_list[0]
@pytest.fixture(scope="class")
def load_model(request) -> None:
torch.manual_seed(4)
setup_imports()
num_gaussians = 50
model = registry.get_model_class("cgcnn")(
None,
num_gaussians,
1,
cutoff=6.0,
num_gaussians=num_gaussians,
regress_forces=True,
use_pbc=True,
)
request.cls.model = model
@pytest.mark.usefixtures("load_data")
@pytest.mark.usefixtures("load_model")
class TestCGCNN:
def test_rotation_invariance(self) -> None:
random.seed(1)
data = self.data
# Sampling a random rotation within [-180, 180] for all axes.
transform = RandomRotate([-180, 180], [0, 1, 2])
data_rotated, rot, inv_rot = transform(data.clone())
assert not np.array_equal(data.pos, data_rotated.pos)
# Pass it through the model.
batch = data_list_collater([data, data_rotated])
out = self.model(batch)
# Compare predicted energies and forces (after inv-rotation).
energies = out[0].detach()
np.testing.assert_almost_equal(energies[0], energies[1], decimal=5)
forces = out[1].detach()
np.testing.assert_array_almost_equal(
forces[: forces.shape[0] // 2],
torch.matmul(forces[forces.shape[0] // 2 :], inv_rot),
decimal=5,
)
def test_energy_force_shape(self, snapshot) -> None:
# Recreate the Data object to only keep the necessary features.
data = self.data
# Pass it through the model.
energy, forces = self.model(data_list_collater([data]))
assert snapshot == energy.shape
assert snapshot == pytest.approx(energy.detach())
assert snapshot == forces.shape
assert snapshot == pytest.approx(forces.detach())
| 2,759 | 27.163265 | 79 | py |
ocp | ocp-main/tests/models/test_forcenet.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import os
import numpy as np
import pytest
from ase.io import read
from ocpmodels.common.registry import registry
from ocpmodels.common.utils import setup_imports
from ocpmodels.datasets import data_list_collater
from ocpmodels.preprocessing import AtomsToGraphs
@pytest.fixture(scope="class")
def load_data(request) -> None:
atoms = read(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "atoms.json"),
index=0,
format="json",
)
a2g = AtomsToGraphs(
max_neigh=200,
radius=6,
r_energy=True,
r_forces=True,
r_distances=True,
)
data_list = a2g.convert_all([atoms])
request.cls.data = data_list[0]
@pytest.fixture(scope="class")
def load_model(request) -> None:
setup_imports()
model = registry.get_model_class("forcenet")(
None,
32,
1,
cutoff=6.0,
)
request.cls.model = model
@pytest.mark.usefixtures("load_data")
@pytest.mark.usefixtures("load_model")
class TestForceNet:
def test_energy_force_shape(self, snapshot) -> None:
# Recreate the Data object to only keep the necessary features.
data = self.data
# Pass it through the model.
energy, forces = self.model(data_list_collater([data]))
assert snapshot == energy.shape
assert snapshot == pytest.approx(energy.detach())
assert snapshot == forces.shape
assert snapshot == pytest.approx(forces.detach())
| 1,657 | 24.121212 | 79 | py |
ocp | ocp-main/tests/models/test_gemnet_oc_scaling_mismatch.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import io
import pytest
import requests
import torch
from ocpmodels.common.registry import registry
from ocpmodels.common.utils import load_state_dict, setup_imports
from ocpmodels.modules.scaling import ScaleFactor
from ocpmodels.modules.scaling.compat import load_scales_compat
from ocpmodels.modules.scaling.util import ensure_fitted
class TestGemNetOC:
def test_no_scaling_mismatch(self) -> None:
torch.manual_seed(4)
setup_imports()
# download and load weights.
checkpoint_url = "https://dl.fbaipublicfiles.com/opencatalystproject/models/2022_07/s2ef/gemnet_oc_base_s2ef_all.pt"
# load buffer into memory as a stream
# and then load it with torch.load
r = requests.get(checkpoint_url, stream=True)
r.raise_for_status()
checkpoint = torch.load(
io.BytesIO(r.content), map_location=torch.device("cpu")
)
model = registry.get_model_class("gemnet_oc")(
None,
-1,
1,
num_spherical=7,
num_radial=128,
num_blocks=4,
emb_size_atom=256,
emb_size_edge=512,
emb_size_trip_in=64,
emb_size_trip_out=64,
emb_size_quad_in=32,
emb_size_quad_out=32,
emb_size_aint_in=64,
emb_size_aint_out=64,
emb_size_rbf=16,
emb_size_cbf=16,
emb_size_sbf=32,
num_before_skip=2,
num_after_skip=2,
num_concat=1,
num_atom=3,
num_output_afteratom=3,
num_atom_emb_layers=2,
num_global_out_layers=2,
regress_forces=True,
direct_forces=True,
use_pbc=True,
cutoff=12.0,
cutoff_qint=12.0,
cutoff_aeaint=12.0,
cutoff_aint=12.0,
max_neighbors=30,
max_neighbors_qint=8,
max_neighbors_aeaint=20,
max_neighbors_aint=1000,
rbf={"name": "gaussian"},
envelope={"name": "polynomial", "exponent": 5},
cbf={"name": "spherical_harmonics"},
sbf={"name": "legendre_outer"},
extensive=True,
forces_coupled=False,
output_init="HeOrthogonal",
activation="silu",
quad_interaction=True,
atom_edge_interaction=True,
edge_atom_interaction=True,
atom_interaction=True,
qint_tags=[1, 2],
scale_file=checkpoint["scale_dict"],
)
new_dict = {
k[len("module.") * 2 :]: v
for k, v in checkpoint["state_dict"].items()
}
try:
load_state_dict(model, new_dict)
except ValueError as e:
assert False, f"'load_state_dict' raised an exception {e}"
def test_scaling_mismatch(self) -> None:
torch.manual_seed(4)
setup_imports()
# download and load weights.
checkpoint_url = "https://dl.fbaipublicfiles.com/opencatalystproject/models/2022_07/s2ef/gemnet_oc_base_s2ef_all.pt"
# load buffer into memory as a stream
# and then load it with torch.load
r = requests.get(checkpoint_url, stream=True)
r.raise_for_status()
checkpoint = torch.load(
io.BytesIO(r.content), map_location=torch.device("cpu")
)
model = registry.get_model_class("gemnet_oc")(
None,
-1,
1,
num_spherical=7,
num_radial=128,
num_blocks=4,
emb_size_atom=256,
emb_size_edge=512,
emb_size_trip_in=64,
emb_size_trip_out=64,
emb_size_quad_in=32,
emb_size_quad_out=32,
emb_size_aint_in=64,
emb_size_aint_out=64,
emb_size_rbf=16,
emb_size_cbf=16,
emb_size_sbf=32,
num_before_skip=2,
num_after_skip=2,
num_concat=1,
num_atom=3,
num_output_afteratom=3,
num_atom_emb_layers=2,
num_global_out_layers=2,
regress_forces=True,
direct_forces=True,
use_pbc=True,
cutoff=12.0,
cutoff_qint=12.0,
cutoff_aeaint=12.0,
cutoff_aint=12.0,
max_neighbors=30,
max_neighbors_qint=8,
max_neighbors_aeaint=20,
max_neighbors_aint=1000,
rbf={"name": "gaussian"},
envelope={"name": "polynomial", "exponent": 5},
cbf={"name": "spherical_harmonics"},
sbf={"name": "legendre_outer"},
extensive=True,
forces_coupled=False,
output_init="HeOrthogonal",
activation="silu",
quad_interaction=True,
atom_edge_interaction=True,
edge_atom_interaction=True,
atom_interaction=True,
qint_tags=[1, 2],
scale_file=checkpoint["scale_dict"],
)
for key in checkpoint["scale_dict"]:
for submodule in model.modules():
if not isinstance(submodule, ScaleFactor):
continue
submodule.reset_()
load_scales_compat(model, checkpoint["scale_dict"])
new_dict = {
k[len("module.") * 2 :]: v
for k, v in checkpoint["state_dict"].items()
}
param_key = f"{key}.scale_factor"
new_dict[param_key] = checkpoint["scale_dict"][key] - 10.0
with pytest.raises(
ValueError,
match=f"Scale factor parameter {param_key} is inconsistent with the loaded state dict.",
):
load_state_dict(model, new_dict)
def test_no_file_exists(self) -> None:
torch.manual_seed(4)
setup_imports()
with pytest.raises(ValueError):
registry.get_model_class("gemnet_oc")(
None,
-1,
1,
num_spherical=7,
num_radial=128,
num_blocks=4,
emb_size_atom=256,
emb_size_edge=512,
emb_size_trip_in=64,
emb_size_trip_out=64,
emb_size_quad_in=32,
emb_size_quad_out=32,
emb_size_aint_in=64,
emb_size_aint_out=64,
emb_size_rbf=16,
emb_size_cbf=16,
emb_size_sbf=32,
num_before_skip=2,
num_after_skip=2,
num_concat=1,
num_atom=3,
num_output_afteratom=3,
num_atom_emb_layers=2,
num_global_out_layers=2,
regress_forces=True,
direct_forces=True,
use_pbc=True,
cutoff=12.0,
cutoff_qint=12.0,
cutoff_aeaint=12.0,
cutoff_aint=12.0,
max_neighbors=30,
max_neighbors_qint=8,
max_neighbors_aeaint=20,
max_neighbors_aint=1000,
rbf={"name": "gaussian"},
envelope={"name": "polynomial", "exponent": 5},
cbf={"name": "spherical_harmonics"},
sbf={"name": "legendre_outer"},
extensive=True,
forces_coupled=False,
output_init="HeOrthogonal",
activation="silu",
quad_interaction=True,
atom_edge_interaction=True,
edge_atom_interaction=True,
atom_interaction=True,
qint_tags=[1, 2],
scale_file="/tmp/this/file/does/not/exist.pt",
)
def test_not_fitted(self) -> None:
torch.manual_seed(4)
setup_imports()
model = registry.get_model_class("gemnet_oc")(
None,
-1,
1,
num_spherical=7,
num_radial=128,
num_blocks=4,
emb_size_atom=256,
emb_size_edge=512,
emb_size_trip_in=64,
emb_size_trip_out=64,
emb_size_quad_in=32,
emb_size_quad_out=32,
emb_size_aint_in=64,
emb_size_aint_out=64,
emb_size_rbf=16,
emb_size_cbf=16,
emb_size_sbf=32,
num_before_skip=2,
num_after_skip=2,
num_concat=1,
num_atom=3,
num_output_afteratom=3,
num_atom_emb_layers=2,
num_global_out_layers=2,
regress_forces=True,
direct_forces=True,
use_pbc=True,
cutoff=12.0,
cutoff_qint=12.0,
cutoff_aeaint=12.0,
cutoff_aint=12.0,
max_neighbors=30,
max_neighbors_qint=8,
max_neighbors_aeaint=20,
max_neighbors_aint=1000,
rbf={"name": "gaussian"},
envelope={"name": "polynomial", "exponent": 5},
cbf={"name": "spherical_harmonics"},
sbf={"name": "legendre_outer"},
extensive=True,
forces_coupled=False,
output_init="HeOrthogonal",
activation="silu",
quad_interaction=True,
atom_edge_interaction=True,
edge_atom_interaction=True,
atom_interaction=True,
qint_tags=[1, 2],
scale_file=None,
)
with pytest.raises(ValueError):
ensure_fitted(model)
| 9,850 | 31.511551 | 124 | py |
ocp | ocp-main/tests/models/test_dimenetpp.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import logging
import os
import random
import numpy as np
import pytest
import torch
from ase.io import read
from ocpmodels.common.registry import registry
from ocpmodels.common.transforms import RandomRotate
from ocpmodels.common.utils import setup_imports
from ocpmodels.datasets import data_list_collater
from ocpmodels.preprocessing import AtomsToGraphs
@pytest.fixture(scope="class")
def load_data(request) -> None:
atoms = read(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "atoms.json"),
index=0,
format="json",
)
a2g = AtomsToGraphs(
max_neigh=200,
radius=6,
r_energy=True,
r_forces=True,
r_distances=True,
)
data_list = a2g.convert_all([atoms])
request.cls.data = data_list[0]
@pytest.fixture(scope="class")
def load_model(request) -> None:
torch.manual_seed(4)
setup_imports()
model = registry.get_model_class("dimenetplusplus")(
None,
32,
1,
cutoff=6.0,
regress_forces=True,
use_pbc=False,
)
request.cls.model = model
@pytest.mark.usefixtures("load_data")
@pytest.mark.usefixtures("load_model")
class TestDimeNet:
def test_rotation_invariance(self) -> None:
random.seed(1)
data = self.data
# Sampling a random rotation within [-180, 180] for all axes.
transform = RandomRotate([-180, 180], [0, 1, 2])
data_rotated, rot, inv_rot = transform(data.clone())
assert not np.array_equal(data.pos, data_rotated.pos)
# Pass it through the model.
batch = data_list_collater([data, data_rotated])
out = self.model(batch)
# Compare predicted energies and forces (after inv-rotation).
energies = out[0].detach()
np.testing.assert_almost_equal(energies[0], energies[1], decimal=5)
forces = out[1].detach()
logging.info(forces)
np.testing.assert_array_almost_equal(
forces[: forces.shape[0] // 2],
torch.matmul(forces[forces.shape[0] // 2 :], inv_rot),
decimal=5,
)
def test_energy_force_shape(self, snapshot) -> None:
# Recreate the Data object to only keep the necessary features.
data = self.data
# Pass it through the model.
energy, forces = self.model(data_list_collater([data]))
assert snapshot == energy.shape
assert snapshot == pytest.approx(energy.detach())
assert snapshot == forces.shape
assert snapshot == pytest.approx(forces.detach())
| 2,745 | 27.020408 | 79 | py |
ocp | ocp-main/tests/evaluator/test_evaluator.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import numpy as np
import pytest
import torch
from ocpmodels.modules.evaluator import (
Evaluator,
cosine_similarity,
magnitude_error,
)
@pytest.fixture(scope="class")
def load_evaluator_s2ef(request) -> None:
request.cls.evaluator = Evaluator(task="s2ef")
prediction = {
"energy": torch.randn(6),
"forces": torch.randn(1000000, 3),
"natoms": torch.tensor(
(100000, 200000, 300000, 200000, 100000, 100000)
),
}
target = {
"energy": torch.randn(6),
"forces": torch.randn(1000000, 3),
"natoms": torch.tensor(
(100000, 200000, 300000, 200000, 100000, 100000)
),
}
request.cls.metrics = request.cls.evaluator.eval(prediction, target)
@pytest.fixture(scope="class")
def load_evaluator_is2rs(request) -> None:
request.cls.evaluator = Evaluator(task="is2rs")
prediction = {
"positions": torch.randn(50, 3),
"natoms": torch.tensor((5, 5, 10, 12, 18)),
"cell": torch.randn(5, 3, 3),
"pbc": torch.tensor([True, True, True]),
}
target = {
"positions": torch.randn(50, 3),
"cell": torch.randn(5, 3, 3),
"natoms": torch.tensor((5, 5, 10, 12, 18)),
"pbc": torch.tensor([True, True, True]),
}
request.cls.metrics = request.cls.evaluator.eval(prediction, target)
@pytest.fixture(scope="class")
def load_evaluator_is2re(request) -> None:
request.cls.evaluator = Evaluator(task="is2re")
prediction = {
"energy": torch.randn(50),
}
target = {
"energy": torch.randn(50),
}
request.cls.metrics = request.cls.evaluator.eval(prediction, target)
class TestMetrics:
def test_cosine_similarity(self) -> None:
v1, v2 = torch.randn(1000000, 3), torch.randn(1000000, 3)
res = cosine_similarity(v1, v2)
np.testing.assert_almost_equal(res["metric"], 0, decimal=2)
np.testing.assert_almost_equal(
res["total"] / res["numel"], res["metric"]
)
def test_magnitude_error(self) -> None:
v1, v2 = (
torch.tensor([[0.0, 1], [-1, 0]]),
torch.tensor([[0.0, 0], [0, 0]]),
)
res = magnitude_error(v1, v2)
np.testing.assert_equal(res["metric"], 1.0)
@pytest.mark.usefixtures("load_evaluator_s2ef")
class TestS2EFEval:
def test_metrics_exist(self) -> None:
assert "energy_mae" in self.metrics
assert "forces_mae" in self.metrics
assert "forces_cos" in self.metrics
assert "energy_force_within_threshold" in self.metrics
@pytest.mark.usefixtures("load_evaluator_is2rs")
class TestIS2RSEval:
def test_metrics_exist(self) -> None:
assert "average_distance_within_threshold" in self.metrics
@pytest.mark.usefixtures("load_evaluator_is2re")
class TestIS2REEval:
def test_metrics_exist(self) -> None:
assert "energy_mae" in self.metrics
assert "energy_mse" in self.metrics
assert "energy_within_threshold" in self.metrics
| 3,210 | 28.731481 | 72 | py |
ocp | ocp-main/tests/datasets/test_ase_lmdb.py | import os
from pathlib import Path
import numpy as np
import pytest
import tqdm
from ase import build
from ase.calculators.singlepoint import SinglePointCalculator
from ase.constraints import FixAtoms
from ase.io import write
from ocpmodels.datasets.lmdb_database import LMDBDatabase
DB_NAME = "ase_lmdb.lmdb"
N_WRITES = 100
N_READS = 200
def cleanup_asedb() -> None:
if Path(DB_NAME).is_file():
Path(DB_NAME).unlink()
if Path(f"{DB_NAME}-lock").is_file():
Path(f"{DB_NAME}-lock").unlink()
test_structures = [
build.molecule("H2O", vacuum=4),
build.bulk("Cu"),
build.fcc111("Pt", size=[2, 2, 3], vacuum=8, periodic=True),
]
test_structures[2].set_constraint(FixAtoms(indices=[0, 1]))
def generate_random_structure():
# Make base slab
slab = build.fcc111("Cu", size=(4, 4, 3), vacuum=10.0)
# Randomly set some elements
slab.set_chemical_symbols(
np.random.choice(["Cu", "Ag", "Au", "Pd"], size=(len(slab)))
)
# Randomly set some positions
slab.positions = np.random.normal(size=slab.positions.shape)
# Add entries for energy/forces/stress/magmom/etc.
# Property must be one of the ASE core properties to
# go in to a singlepointcalculator and get stored as
# fields correctly
spc = SinglePointCalculator(
slab,
energy=np.random.normal(),
forces=np.random.normal(size=slab.positions.shape),
stress=np.random.normal(size=(3, 3)),
magmom=np.random.normal(size=(len(slab))),
)
slab.set_calculator(spc)
# Make up some other properties to show how we can include arbitrary outputs
slab.info["test_info_property_1"] = np.random.normal(size=(3, 3))
slab.info["test_info_property_2"] = np.random.normal(size=(len(slab), 3))
return slab
def write_random_atoms() -> None:
slab = build.fcc111("Cu", size=(4, 4, 3), vacuum=10.0)
with LMDBDatabase(DB_NAME) as db:
for structure in test_structures:
db.write(structure)
for i in tqdm.tqdm(range(N_WRITES)):
slab = generate_random_structure()
# Save the slab info, and make sure the info gets put in as data
db.write(slab, data=slab.info)
def test_aselmdb_write() -> None:
# Representative structure
write_random_atoms()
with LMDBDatabase(DB_NAME, readonly=True) as db:
for i, structure in enumerate(test_structures):
assert str(structure) == str(db._get_row_by_index(i).toatoms())
cleanup_asedb()
def test_aselmdb_count() -> None:
# Representative structure
write_random_atoms()
with LMDBDatabase(DB_NAME, readonly=True) as db:
assert db.count() == N_WRITES + len(test_structures)
cleanup_asedb()
def test_aselmdb_delete() -> None:
cleanup_asedb()
# Representative structure
write_random_atoms()
with LMDBDatabase(DB_NAME) as db:
for i in range(5):
# Note the available ids list is updating
# but the ids themselves are fixed.
db.delete([db.ids[0]])
assert db.count() == N_WRITES + len(test_structures) - 5
cleanup_asedb()
def test_aselmdb_randomreads() -> None:
write_random_atoms()
with LMDBDatabase(DB_NAME, readonly=True) as db:
for i in tqdm.tqdm(range(N_READS)):
total_size = db.count()
row = db._get_row_by_index(np.random.choice(total_size)).toatoms()
del row
cleanup_asedb()
def test_aselmdb_constraintread() -> None:
write_random_atoms()
with LMDBDatabase(DB_NAME, readonly=True) as db:
atoms = db._get_row_by_index(2).toatoms()
assert type(atoms.constraints[0]) == FixAtoms
cleanup_asedb()
def update_keyvalue_pair() -> None:
write_random_atoms()
with LMDBDatabase(DB_NAME) as db:
db.update(1, test=5)
with LMDBDatabase(DB_NAME) as db:
row = db.get_row_by_id(1)
assert row.test == 5
cleanup_asedb()
def update_atoms() -> None:
write_random_atoms()
with LMDBDatabase(DB_NAME) as db:
db.update(40, atoms=test_structures[-1])
with LMDBDatabase(DB_NAME) as db:
row = db.get_row_by_id(40)
assert str(row.toatoms()) == str(test_structures[-1])
cleanup_asedb()
def test_metadata() -> None:
write_random_atoms()
with LMDBDatabase(DB_NAME) as db:
db.metadata = {"test": True}
with LMDBDatabase(DB_NAME, readonly=True) as db:
assert db.metadata["test"] is True
cleanup_asedb()
| 4,513 | 24.942529 | 80 | py |
ocp | ocp-main/tests/datasets/test_ase_datasets.py | import os
import numpy as np
import pytest
from ase import build, db
from ase.calculators.singlepoint import SinglePointCalculator
from ase.io import Trajectory, write
from ocpmodels.datasets import (
AseDBDataset,
AseReadDataset,
AseReadMultiStructureDataset,
)
from ocpmodels.datasets.lmdb_database import LMDBDatabase
structures = [
build.molecule("H2O", vacuum=4),
build.bulk("Cu"),
build.fcc111("Pt", size=[2, 2, 3], vacuum=8, periodic=True),
]
for atoms in structures:
calc = SinglePointCalculator(atoms, energy=1, forces=atoms.positions)
atoms.calc = calc
atoms.info["test_extensive_property"] = 3 * len(atoms)
structures[2].set_pbc(True)
def test_ase_read_dataset() -> None:
for i, structure in enumerate(structures):
write(
os.path.join(
os.path.dirname(os.path.abspath(__file__)), f"{i}.cif"
),
structure,
)
dataset = AseReadDataset(
config={
"src": os.path.join(os.path.dirname(os.path.abspath(__file__))),
"pattern": "*.cif",
}
)
assert len(dataset) == len(structures)
data = dataset[0]
del data
for i in range(len(structures)):
os.remove(
os.path.join(
os.path.dirname(os.path.abspath(__file__)), f"{i}.cif"
)
)
dataset.close_db()
def test_ase_db_dataset() -> None:
try:
os.remove(
os.path.join(
os.path.dirname(os.path.abspath(__file__)), "asedb.db"
)
)
except FileNotFoundError:
pass
with db.connect(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "asedb.db")
) as database:
for i, structure in enumerate(structures):
database.write(structure)
dataset = AseDBDataset(
config={
"src": os.path.join(
os.path.dirname(os.path.abspath(__file__)), "asedb.db"
),
}
)
assert len(dataset) == len(structures)
data = dataset[0]
del data
os.remove(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "asedb.db")
)
def test_ase_db_dataset_folder() -> None:
try:
os.remove(
os.path.join(
os.path.dirname(os.path.abspath(__file__)), "asedb1.db"
)
)
os.remove(
os.path.join(
os.path.dirname(os.path.abspath(__file__)), "asedb2.db"
)
)
except FileNotFoundError:
pass
with db.connect(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "asedb1.db")
) as database:
for i, structure in enumerate(structures):
database.write(structure)
with db.connect(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "asedb2.db")
) as database:
for i, structure in enumerate(structures):
database.write(structure)
dataset = AseDBDataset(
config={
"src": os.path.join(
os.path.dirname(os.path.abspath(__file__)), "./"
),
}
)
assert len(dataset) == len(structures) * 2
data = dataset[0]
del data
os.remove(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "asedb1.db")
)
os.remove(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "asedb2.db")
)
def test_ase_db_dataset_list() -> None:
try:
os.remove(
os.path.join(
os.path.dirname(os.path.abspath(__file__)), "asedb1.db"
)
)
os.remove(
os.path.join(
os.path.dirname(os.path.abspath(__file__)), "asedb2.db"
)
)
except FileNotFoundError:
pass
with db.connect(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "asedb1.db")
) as database:
for i, structure in enumerate(structures):
database.write(structure)
with db.connect(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "asedb2.db")
) as database:
for i, structure in enumerate(structures):
database.write(structure)
dataset = AseDBDataset(
config={
"src": [
os.path.join(
os.path.dirname(os.path.abspath(__file__)), "asedb1.db"
),
os.path.join(
os.path.dirname(os.path.abspath(__file__)), "asedb2.db"
),
]
}
)
assert len(dataset) == len(structures) * 2
data = dataset[0]
del data
os.remove(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "asedb1.db")
)
os.remove(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "asedb2.db")
)
def test_ase_lmdb_dataset() -> None:
try:
os.remove(
os.path.join(
os.path.dirname(os.path.abspath(__file__)), "asedb.lmdb"
)
)
except FileNotFoundError:
pass
with LMDBDatabase(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "asedb.lmdb")
) as database:
for i, structure in enumerate(structures):
database.write(structure)
dataset = AseDBDataset(
config={
"src": os.path.join(
os.path.dirname(os.path.abspath(__file__)), "asedb.lmdb"
),
}
)
assert len(dataset) == len(structures)
data = dataset[0]
del data
os.remove(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "asedb.lmdb")
)
def test_lmdb_metadata_guesser() -> None:
# Cleanup old lmdb in case it's left over from previous tests
try:
os.remove(
os.path.join(
os.path.dirname(os.path.abspath(__file__)), "asedb.lmdb"
)
)
except FileNotFoundError:
pass
# Write an LMDB
with LMDBDatabase(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "asedb.lmdb")
) as database:
for i, structure in enumerate(structures):
database.write(structure, data=structure.info)
dataset = AseDBDataset(
config={
"src": os.path.join(
os.path.dirname(os.path.abspath(__file__)), "asedb.lmdb"
),
}
)
metadata = dataset.get_metadata()
# Confirm energy metadata guessed properly
assert metadata["targets"]["energy"]["extensive"] is False
assert metadata["targets"]["energy"]["shape"] == ()
assert metadata["targets"]["energy"]["type"] == "per-image"
# Confirm forces metadata guessed properly
assert metadata["targets"]["forces"]["shape"] == (3,)
assert metadata["targets"]["forces"]["extensive"] is True
assert metadata["targets"]["forces"]["type"] == "per-atom"
# Confirm forces metadata guessed properly
assert (
metadata["targets"]["info.test_extensive_property"]["extensive"]
is True
)
assert metadata["targets"]["info.test_extensive_property"]["shape"] == ()
assert (
metadata["targets"]["info.test_extensive_property"]["type"]
== "per-image"
)
os.remove(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "asedb.lmdb")
)
def test_ase_metadata_guesser() -> None:
try:
os.remove(
os.path.join(
os.path.dirname(os.path.abspath(__file__)), "asedb.db"
)
)
except FileNotFoundError:
pass
with db.connect(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "asedb.db")
) as database:
for i, structure in enumerate(structures):
database.write(structure, data=structure.info)
dataset = AseDBDataset(
config={
"src": os.path.join(
os.path.dirname(os.path.abspath(__file__)), "asedb.db"
),
}
)
metadata = dataset.get_metadata()
# Confirm energy metadata guessed properly
assert metadata["targets"]["energy"]["extensive"] is False
assert metadata["targets"]["energy"]["shape"] == ()
assert metadata["targets"]["energy"]["type"] == "per-image"
# Confirm forces metadata guessed properly
assert metadata["targets"]["forces"]["shape"] == (3,)
assert metadata["targets"]["forces"]["extensive"] is True
assert metadata["targets"]["forces"]["type"] == "per-atom"
# Confirm forces metadata guessed properly
assert (
metadata["targets"]["info.test_extensive_property"]["extensive"]
is True
)
assert metadata["targets"]["info.test_extensive_property"]["shape"] == ()
assert (
metadata["targets"]["info.test_extensive_property"]["type"]
== "per-image"
)
dataset = AseDBDataset(
config={
"src": os.path.join(
os.path.dirname(os.path.abspath(__file__)), "asedb.db"
),
}
)
database.delete([1])
new_structures = [
build.molecule("CH3COOH", vacuum=4),
build.bulk("Al"),
]
for i, structure in enumerate(new_structures):
database.write(structure)
dataset = AseDBDataset(
config={
"src": os.path.join(
os.path.dirname(os.path.abspath(__file__)), "asedb.db"
),
}
)
assert len(dataset) == len(structures) + len(new_structures) - 1
data = dataset[:]
assert data
os.remove(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "asedb.db")
)
dataset.close_db()
def test_ase_multiread_dataset() -> None:
try:
os.remove(
os.path.join(
os.path.dirname(os.path.abspath(__file__)), "test.traj"
)
)
except FileNotFoundError:
pass
try:
os.remove(
os.path.join(
os.path.dirname(os.path.abspath(__file__)), "test_index_file"
)
)
except FileNotFoundError:
pass
atoms_objects = [build.bulk("Cu", a=a) for a in np.linspace(3.5, 3.7, 10)]
traj = Trajectory(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "test.traj"),
mode="w",
)
for atoms in atoms_objects:
traj.write(atoms)
dataset = AseReadMultiStructureDataset(
config={
"src": os.path.join(os.path.dirname(os.path.abspath(__file__))),
"pattern": "*.traj",
"keep_in_memory": True,
"atoms_transform_args": {
"skip_always": True,
},
}
)
assert len(dataset) == len(atoms_objects)
[dataset[:]]
f = open(
os.path.join(
os.path.dirname(os.path.abspath(__file__)), "test_index_file"
),
"w",
)
f.write(
f"{os.path.join(os.path.dirname(os.path.abspath(__file__)), 'test.traj')} {len(atoms_objects)}"
)
f.close()
dataset = AseReadMultiStructureDataset(
config={
"index_file": os.path.join(
os.path.dirname(os.path.abspath(__file__)), "test_index_file"
)
},
)
assert len(dataset) == len(atoms_objects)
[dataset[:]]
os.remove(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "test.traj")
)
os.remove(
os.path.join(
os.path.dirname(os.path.abspath(__file__)), "test_index_file"
)
)
| 11,501 | 25.441379 | 103 | py |
ocp | ocp-main/tests/preprocessing/test_radius_graph_pbc.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import os
import ase
import numpy as np
import pytest
import torch
from ase.io import read
from ase.lattice.cubic import FaceCenteredCubic
from ase.build import molecule
from pymatgen.io.ase import AseAtomsAdaptor
from torch_geometric.transforms.radius_graph import RadiusGraph
from torch_geometric.utils.sort_edge_index import sort_edge_index
from ocpmodels.common.utils import get_pbc_distances, radius_graph_pbc
from ocpmodels.datasets import data_list_collater
from ocpmodels.preprocessing import AtomsToGraphs
@pytest.fixture(scope="class")
def load_data(request) -> None:
atoms = read(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "atoms.json"),
index=0,
format="json",
)
a2g = AtomsToGraphs(
max_neigh=200,
radius=6,
r_energy=True,
r_forces=True,
r_distances=True,
)
data_list = a2g.convert_all([atoms])
request.cls.data = data_list[0]
def check_features_match(
edge_index_1, cell_offsets_1, edge_index_2, cell_offsets_2
) -> bool:
# Combine both edge indices and offsets to one tensor
features_1 = torch.cat((edge_index_1, cell_offsets_1.T), dim=0).T
features_2 = torch.cat((edge_index_2, cell_offsets_2.T), dim=0).T.long()
# Convert rows of tensors to sets. The order of edges is not guaranteed
features_1_set = {tuple(x.tolist()) for x in features_1}
features_2_set = {tuple(x.tolist()) for x in features_2}
# Ensure sets are not empty
assert len(features_1_set) > 0
assert len(features_2_set) > 0
# Ensure sets are the same
assert features_1_set == features_2_set
return True
@pytest.mark.usefixtures("load_data")
class TestRadiusGraphPBC:
def test_radius_graph_pbc(self) -> None:
data = self.data
batch = data_list_collater([data] * 5)
edge_index, cell_offsets, neighbors = radius_graph_pbc(
batch,
radius=6,
max_num_neighbors_threshold=2000,
pbc=[True, True, False],
)
assert check_features_match(
batch.edge_index, batch.cell_offsets, edge_index, cell_offsets
)
def test_bulk(self) -> None:
radius = 10
# Must be sufficiently large to ensure all edges are retained
max_neigh = 2000
a2g = AtomsToGraphs(radius=radius, max_neigh=max_neigh)
structure = FaceCenteredCubic("Pt", size=[1, 2, 3])
data = a2g.convert(structure)
batch = data_list_collater([data])
# Ensure adequate distance between repeated cells
structure.cell[0] *= radius
structure.cell[1] *= radius
structure.cell[2] *= radius
# [False, False, False]
data = a2g.convert(structure)
non_pbc = data.edge_index.shape[1]
out = radius_graph_pbc(
batch,
radius=radius,
max_num_neighbors_threshold=max_neigh,
pbc=[False, False, False],
)
assert check_features_match(
data.edge_index, data.cell_offsets, out[0], out[1]
)
# [True, False, False]
structure.cell[0] /= radius
data = a2g.convert(structure)
pbc_x = data.edge_index.shape[1]
out = radius_graph_pbc(
batch,
radius=radius,
max_num_neighbors_threshold=max_neigh,
pbc=[True, False, False],
)
assert check_features_match(
data.edge_index, data.cell_offsets, out[0], out[1]
)
# [True, True, False]
structure.cell[1] /= radius
data = a2g.convert(structure)
pbc_xy = data.edge_index.shape[1]
out = radius_graph_pbc(
batch,
radius=radius,
max_num_neighbors_threshold=max_neigh,
pbc=[True, True, False],
)
assert check_features_match(
data.edge_index, data.cell_offsets, out[0], out[1]
)
# [False, True, False]
structure.cell[0] *= radius
data = a2g.convert(structure)
pbc_y = data.edge_index.shape[1]
out = radius_graph_pbc(
batch,
radius=radius,
max_num_neighbors_threshold=max_neigh,
pbc=[False, True, False],
)
assert check_features_match(
data.edge_index, data.cell_offsets, out[0], out[1]
)
# [False, True, True]
structure.cell[2] /= radius
data = a2g.convert(structure)
pbc_yz = data.edge_index.shape[1]
out = radius_graph_pbc(
batch,
radius=radius,
max_num_neighbors_threshold=max_neigh,
pbc=[False, True, True],
)
assert check_features_match(
data.edge_index, data.cell_offsets, out[0], out[1]
)
# [False, False, True]
structure.cell[1] *= radius
data = a2g.convert(structure)
pbc_z = data.edge_index.shape[1]
out = radius_graph_pbc(
batch,
radius=radius,
max_num_neighbors_threshold=max_neigh,
pbc=[False, False, True],
)
assert check_features_match(
data.edge_index, data.cell_offsets, out[0], out[1]
)
# [True, False, True]
structure.cell[0] /= radius
data = a2g.convert(structure)
pbc_xz = data.edge_index.shape[1]
out = radius_graph_pbc(
batch,
radius=radius,
max_num_neighbors_threshold=max_neigh,
pbc=[True, False, True],
)
assert check_features_match(
data.edge_index, data.cell_offsets, out[0], out[1]
)
# [True, True, True]
structure.cell[1] /= radius
data = a2g.convert(structure)
pbc_all = data.edge_index.shape[1]
out = radius_graph_pbc(
batch,
radius=radius,
max_num_neighbors_threshold=max_neigh,
pbc=[True, True, True],
)
assert check_features_match(
data.edge_index, data.cell_offsets, out[0], out[1]
)
# Ensure edges are actually found
assert non_pbc > 0
assert pbc_x > non_pbc
assert pbc_y > non_pbc
assert pbc_z > non_pbc
assert pbc_xy > max(pbc_x, pbc_y)
assert pbc_yz > max(pbc_y, pbc_z)
assert pbc_xz > max(pbc_x, pbc_z)
assert pbc_all > max(pbc_xy, pbc_yz, pbc_xz)
structure = FaceCenteredCubic("Pt", size=[1, 2, 3])
# Ensure radius_graph_pbc matches radius_graph for non-PBC condition
RG = RadiusGraph(r=radius, max_num_neighbors=max_neigh)
radgraph = RG(batch)
out = radius_graph_pbc(
batch,
radius=radius,
max_num_neighbors_threshold=max_neigh,
pbc=[False, False, False],
)
assert (
sort_edge_index(out[0]) == sort_edge_index(radgraph.edge_index)
).all()
def test_molecule(self) -> None:
radius = 6
max_neigh = 1000
a2g = AtomsToGraphs(radius=radius, max_neigh=max_neigh)
structure = molecule("CH3COOH")
structure.cell = [[20, 0, 0], [0, 20, 0], [0, 0, 20]]
data = a2g.convert(structure)
batch = data_list_collater([data])
out = radius_graph_pbc(
batch,
radius=radius,
max_num_neighbors_threshold=max_neigh,
pbc=[False, False, False],
)
assert check_features_match(
data.edge_index, data.cell_offsets, out[0], out[1]
)
| 7,781 | 28.589354 | 79 | py |
ocp | ocp-main/tests/preprocessing/test_atoms_to_graphs.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import os
import numpy as np
import pytest
from ase.io import read
from ase.neighborlist import NeighborList, NewPrimitiveNeighborList
from ocpmodels.preprocessing import AtomsToGraphs
@pytest.fixture(scope="class")
def atoms_to_graphs_internals(request) -> None:
atoms = read(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "atoms.json"),
index=0,
format="json",
)
test_object = AtomsToGraphs(
max_neigh=200,
radius=6,
r_energy=True,
r_forces=True,
r_distances=True,
)
request.cls.atg = test_object
request.cls.atoms = atoms
@pytest.mark.usefixtures("atoms_to_graphs_internals")
class TestAtomsToGraphs:
def test_gen_neighbors_pymatgen(self) -> None:
# call the internal function
(
c_index,
n_index,
n_distances,
offsets,
) = self.atg._get_neighbors_pymatgen(self.atoms)
edge_index, edge_distances, cell_offsets = self.atg._reshape_features(
c_index, n_index, n_distances, offsets
)
# use ase to compare distances and indices
n = NeighborList(
cutoffs=[self.atg.radius / 2.0] * len(self.atoms),
self_interaction=False,
skin=0,
bothways=True,
primitive=NewPrimitiveNeighborList,
)
n.update(self.atoms)
ase_neighbors = [
n.get_neighbors(index) for index in range(len(self.atoms))
]
ase_s_index = []
ase_n_index = []
ase_offsets = []
for i, n in enumerate(ase_neighbors):
nidx = n[0]
ncount = len(nidx)
ase_s_index += [i] * ncount
ase_n_index += nidx.tolist()
ase_offsets.append(n[1])
ase_s_index = np.array(ase_s_index)
ase_n_index = np.array(ase_n_index)
ase_offsets = np.concatenate(ase_offsets)
# compute ase distance
cell = self.atoms.cell
positions = self.atoms.positions
distance_vec = positions[ase_s_index] - positions[ase_n_index]
_offsets = np.dot(ase_offsets, cell)
distance_vec -= _offsets
act_dist = np.linalg.norm(distance_vec, axis=-1)
act_dist = np.sort(act_dist)
act_index = np.sort(ase_n_index)
test_dist = np.sort(edge_distances)
test_index = np.sort(edge_index[0, :])
# check that the distance and neighbor index values are correct
np.testing.assert_allclose(act_dist, test_dist)
np.testing.assert_array_equal(act_index, test_index)
def test_convert(self) -> None:
# run convert on a single atoms obj
data = self.atg.convert(self.atoms)
# atomic numbers
act_atomic_numbers = self.atoms.get_atomic_numbers()
atomic_numbers = data.atomic_numbers.numpy()
np.testing.assert_equal(act_atomic_numbers, atomic_numbers)
# positions
act_positions = self.atoms.get_positions()
positions = data.pos.numpy()
np.testing.assert_allclose(act_positions, positions)
# check energy value
act_energy = self.atoms.get_potential_energy(apply_constraint=False)
test_energy = data.y
np.testing.assert_equal(act_energy, test_energy)
# forces
act_forces = self.atoms.get_forces(apply_constraint=False)
forces = data.force.numpy()
np.testing.assert_allclose(act_forces, forces)
def test_convert_all(self) -> None:
# run convert_all on a list with one atoms object
# this does not test the atoms.db functionality
atoms_list = [self.atoms]
data_list = self.atg.convert_all(atoms_list)
# check shape/values of features
# atomic numbers
act_atomic_nubmers = self.atoms.get_atomic_numbers()
atomic_numbers = data_list[0].atomic_numbers.numpy()
np.testing.assert_equal(act_atomic_nubmers, atomic_numbers)
# positions
act_positions = self.atoms.get_positions()
positions = data_list[0].pos.numpy()
np.testing.assert_allclose(act_positions, positions)
# check energy value
act_energy = self.atoms.get_potential_energy(apply_constraint=False)
test_energy = data_list[0].y
np.testing.assert_equal(act_energy, test_energy)
# forces
act_forces = self.atoms.get_forces(apply_constraint=False)
forces = data_list[0].force.numpy()
np.testing.assert_allclose(act_forces, forces)
| 4,715 | 34.727273 | 79 | py |
ocp | ocp-main/tests/preprocessing/test_pbc.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import os
import numpy as np
import pytest
from ase.io import read
from ocpmodels.common.utils import get_pbc_distances
from ocpmodels.datasets import data_list_collater
from ocpmodels.preprocessing import AtomsToGraphs
@pytest.fixture(scope="class")
def load_data(request) -> None:
atoms = read(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "atoms.json"),
index=0,
format="json",
)
a2g = AtomsToGraphs(
max_neigh=12,
radius=6,
r_energy=True,
r_forces=True,
r_distances=True,
)
data_list = a2g.convert_all([atoms])
request.cls.data = data_list[0]
@pytest.mark.usefixtures("load_data")
class TestPBC:
def test_pbc_distances(self) -> None:
data = self.data
batch = data_list_collater([data] * 5)
out = get_pbc_distances(
batch.pos,
batch.edge_index,
batch.cell,
batch.cell_offsets,
batch.neighbors,
)
edge_index, pbc_distances = out["edge_index"], out["distances"]
np.testing.assert_array_equal(
batch.edge_index,
edge_index,
)
np.testing.assert_array_almost_equal(batch.distances, pbc_distances)
| 1,433 | 24.607143 | 79 | py |
ocp | ocp-main/tests/preprocessing/__init__.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
| 178 | 24.571429 | 63 | py |
ocp | ocp-main/docs/source/conf.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
from typing import List
sys.path.insert(0, os.path.abspath("../../"))
# -- Project information -----------------------------------------------------
project = "Open Catalyst Project"
copyright = "2020, Facebook, Inc."
author = "Anuroop Sriram"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.coverage",
"sphinx.ext.napoleon",
"sphinx_rtd_theme",
"nbsphinx",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns: List[str] = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
master_doc = "index"
| 2,204 | 30.956522 | 79 | py |
ocp | ocp-main/ocpmodels/__init__.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
| 178 | 24.571429 | 63 | py |
ocp | ocp-main/ocpmodels/modules/normalizer.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import torch
class Normalizer:
"""Normalize a Tensor and restore it later."""
def __init__(self, tensor=None, mean=None, std=None, device=None) -> None:
"""tensor is taken as a sample to calculate the mean and std"""
if tensor is None and mean is None:
return
if device is None:
device = "cpu"
if tensor is not None:
self.mean = torch.mean(tensor, dim=0).to(device)
self.std = torch.std(tensor, dim=0).to(device)
return
if mean is not None and std is not None:
self.mean = torch.tensor(mean).to(device)
self.std = torch.tensor(std).to(device)
def to(self, device) -> None:
self.mean = self.mean.to(device)
self.std = self.std.to(device)
def norm(self, tensor):
return (tensor - self.mean) / self.std
def denorm(self, normed_tensor):
return normed_tensor * self.std + self.mean
def state_dict(self):
return {"mean": self.mean, "std": self.std}
def load_state_dict(self, state_dict) -> None:
self.mean = state_dict["mean"].to(self.mean.device)
self.std = state_dict["std"].to(self.mean.device)
| 1,390 | 28.595745 | 78 | py |
ocp | ocp-main/ocpmodels/modules/loss.py | import logging
from typing import Optional
import torch
from torch import nn
from ocpmodels.common import distutils
class L2MAELoss(nn.Module):
def __init__(self, reduction: str = "mean") -> None:
super().__init__()
self.reduction = reduction
assert reduction in ["mean", "sum"]
def forward(self, input: torch.Tensor, target: torch.Tensor):
dists = torch.norm(input - target, p=2, dim=-1)
if self.reduction == "mean":
return torch.mean(dists)
elif self.reduction == "sum":
return torch.sum(dists)
class AtomwiseL2Loss(nn.Module):
def __init__(self, reduction: str = "mean") -> None:
super().__init__()
self.reduction = reduction
assert reduction in ["mean", "sum"]
def forward(
self,
input: torch.Tensor,
target: torch.Tensor,
natoms: torch.Tensor,
):
assert natoms.shape[0] == input.shape[0] == target.shape[0]
assert len(natoms.shape) == 1 # (nAtoms, )
dists = torch.norm(input - target, p=2, dim=-1)
loss = natoms * dists
if self.reduction == "mean":
return torch.mean(loss)
elif self.reduction == "sum":
return torch.sum(loss)
class DDPLoss(nn.Module):
def __init__(self, loss_fn, reduction: str = "mean") -> None:
super().__init__()
self.loss_fn = loss_fn
self.loss_fn.reduction = "sum"
self.reduction = reduction
assert reduction in ["mean", "sum"]
def forward(
self,
input: torch.Tensor,
target: torch.Tensor,
natoms: Optional[torch.Tensor] = None,
batch_size: Optional[int] = None,
):
# zero out nans, if any
found_nans_or_infs = not torch.all(input.isfinite())
if found_nans_or_infs is True:
logging.warning("Found nans while computing loss")
input = torch.nan_to_num(input, nan=0.0)
if natoms is None:
loss = self.loss_fn(input, target)
else: # atom-wise loss
loss = self.loss_fn(input, target, natoms)
if self.reduction == "mean":
num_samples = (
batch_size if batch_size is not None else input.shape[0]
)
num_samples = distutils.all_reduce(
num_samples, device=input.device
)
# Multiply by world size since gradients are averaged
# across DDP replicas
return loss * distutils.get_world_size() / num_samples
else:
return loss
| 2,597 | 29.564706 | 72 | py |
ocp | ocp-main/ocpmodels/modules/exponential_moving_average.py | """
Copied (and improved) from:
https://github.com/fadel/pytorch_ema/blob/master/torch_ema/ema.py (MIT license)
"""
from __future__ import division, unicode_literals
import copy
import weakref
from typing import List, Iterable, Optional
import torch
from ocpmodels.common.typing import none_throws
# Partially based on:
# https://github.com/tensorflow/tensorflow/blob/r1.13/tensorflow/python/training/moving_averages.py
class ExponentialMovingAverage:
"""
Maintains (exponential) moving average of a set of parameters.
Args:
parameters: Iterable of `torch.nn.Parameter` (typically from
`model.parameters()`).
decay: The exponential decay.
use_num_updates: Whether to use number of updates when computing
averages.
"""
def __init__(
self,
parameters: Iterable[torch.nn.Parameter],
decay: float,
use_num_updates: bool = False,
) -> None:
if decay < 0.0 or decay > 1.0:
raise ValueError("Decay must be between 0 and 1")
self.decay = decay
self.num_updates = 0 if use_num_updates else None
parameters = list(parameters)
self.shadow_params = [
p.clone().detach() for p in parameters if p.requires_grad
]
self.collected_params: List[torch.nn.Parameter] = []
# By maintaining only a weakref to each parameter,
# we maintain the old GC behaviour of ExponentialMovingAverage:
# if the model goes out of scope but the ExponentialMovingAverage
# is kept, no references to the model or its parameters will be
# maintained, and the model will be cleaned up.
self._params_refs = [
weakref.ref(p) for p in parameters if p.requires_grad
]
def _get_parameters(
self, parameters: Optional[Iterable[torch.nn.Parameter]]
) -> Iterable[torch.nn.Parameter]:
none_msg = (
"(One of) the parameters with which this "
"ExponentialMovingAverage "
"was initialized no longer exists (was garbage collected);"
" please either provide `parameters` explicitly or keep "
"the model to which they belong from being garbage "
"collected."
)
if parameters is None:
return [none_throws(p(), none_msg) for p in self._params_refs]
else:
return [p for p in parameters if p.requires_grad]
def update(
self, parameters: Optional[Iterable[torch.nn.Parameter]] = None
) -> None:
"""
Update currently maintained parameters.
Call this every time the parameters are updated, such as the result of
the `optimizer.step()` call.
Args:
parameters: Iterable of `torch.nn.Parameter`; usually the same set of
parameters used to initialize this object. If `None`, the
parameters with which this `ExponentialMovingAverage` was
initialized will be used.
"""
parameters = self._get_parameters(parameters)
decay = self.decay
if self.num_updates is not None:
self.num_updates += 1
decay = min(
decay, (1 + self.num_updates) / (10 + self.num_updates)
)
one_minus_decay = 1.0 - decay
with torch.no_grad():
for s_param, param in zip(self.shadow_params, parameters):
tmp = param - s_param
s_param.add_(tmp, alpha=one_minus_decay)
def copy_to(
self, parameters: Optional[Iterable[torch.nn.Parameter]] = None
) -> None:
"""
Copy current parameters into given collection of parameters.
Args:
parameters: Iterable of `torch.nn.Parameter`; the parameters to be
updated with the stored moving averages. If `None`, the
parameters with which this `ExponentialMovingAverage` was
initialized will be used.
"""
parameters = self._get_parameters(parameters)
for s_param, param in zip(self.shadow_params, parameters):
param.data.copy_(s_param.data)
def store(
self, parameters: Optional[Iterable[torch.nn.Parameter]] = None
) -> None:
"""
Save the current parameters for restoring later.
Args:
parameters: Iterable of `torch.nn.Parameter`; the parameters to be
temporarily stored. If `None`, the parameters of with which this
`ExponentialMovingAverage` was initialized will be used.
"""
parameters = self._get_parameters(parameters)
self.collected_params = [param.clone() for param in parameters]
def restore(
self, parameters: Optional[Iterable[torch.nn.Parameter]] = None
) -> None:
"""
Restore the parameters stored with the `store` method.
Useful to validate the model with EMA parameters without affecting the
original optimization process. Store the parameters before the
`copy_to` method. After validation (or model saving), use this to
restore the former parameters.
Args:
parameters: Iterable of `torch.nn.Parameter`; the parameters to be
updated with the stored parameters. If `None`, the
parameters with which this `ExponentialMovingAverage` was
initialized will be used.
"""
parameters = self._get_parameters(parameters)
for c_param, param in zip(self.collected_params, parameters):
param.data.copy_(c_param.data)
def state_dict(self) -> dict:
r"""Returns the state of the ExponentialMovingAverage as a dict."""
# Following PyTorch conventions, references to tensors are returned:
# "returns a reference to the state and not its copy!" -
# https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict
return {
"decay": self.decay,
"num_updates": self.num_updates,
"shadow_params": self.shadow_params,
"collected_params": self.collected_params,
}
def load_state_dict(self, state_dict: dict) -> None:
r"""Loads the ExponentialMovingAverage state.
Args:
state_dict (dict): EMA state. Should be an object returned
from a call to :meth:`state_dict`.
"""
# deepcopy, to be consistent with module API
state_dict = copy.deepcopy(state_dict)
self.decay = state_dict["decay"]
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("Decay must be between 0 and 1")
self.num_updates = state_dict["num_updates"]
assert self.num_updates is None or isinstance(
self.num_updates, int
), "Invalid num_updates"
assert isinstance(
state_dict["shadow_params"], list
), "shadow_params must be a list"
self.shadow_params = [
p.to(self.shadow_params[i].device)
for i, p in enumerate(state_dict["shadow_params"])
]
assert all(
isinstance(p, torch.Tensor) for p in self.shadow_params
), "shadow_params must all be Tensors"
assert isinstance(
state_dict["collected_params"], list
), "collected_params must be a list"
# collected_params is empty at initialization,
# so use shadow_params for device instead
self.collected_params = [
p.to(self.shadow_params[i].device)
for i, p in enumerate(state_dict["collected_params"])
]
assert all(
isinstance(p, torch.Tensor) for p in self.collected_params
), "collected_params must all be Tensors"
| 7,758 | 37.410891 | 99 | py |
ocp | ocp-main/ocpmodels/modules/scheduler.py | import inspect
import torch.optim.lr_scheduler as lr_scheduler
from ocpmodels.common.utils import warmup_lr_lambda
class LRScheduler:
"""
Learning rate scheduler class for torch.optim learning rate schedulers
Notes:
If no learning rate scheduler is specified in the config the default
scheduler is warmup_lr_lambda (ocpmodels.common.utils) not no scheduler,
this is for backward-compatibility reasons. To run without a lr scheduler
specify scheduler: "Null" in the optim section of the config.
Args:
optimizer (obj): torch optim object
config (dict): Optim dict from the input config
"""
def __init__(self, optimizer, config) -> None:
self.optimizer = optimizer
self.config = config.copy()
if "scheduler" in self.config:
self.scheduler_type = self.config["scheduler"]
else:
self.scheduler_type = "LambdaLR"
scheduler_lambda_fn = lambda x: warmup_lr_lambda(x, self.config)
self.config["lr_lambda"] = scheduler_lambda_fn
if self.scheduler_type != "Null":
self.scheduler = getattr(lr_scheduler, self.scheduler_type)
scheduler_args = self.filter_kwargs(config)
self.scheduler = self.scheduler(optimizer, **scheduler_args)
def step(self, metrics=None, epoch=None) -> None:
if self.scheduler_type == "Null":
return
if self.scheduler_type == "ReduceLROnPlateau":
if metrics is None:
raise Exception(
"Validation set required for ReduceLROnPlateau."
)
self.scheduler.step(metrics)
else:
self.scheduler.step()
def filter_kwargs(self, config):
# adapted from https://stackoverflow.com/questions/26515595/
sig = inspect.signature(self.scheduler)
filter_keys = [
param.name
for param in sig.parameters.values()
if param.kind == param.POSITIONAL_OR_KEYWORD
]
filter_keys.remove("optimizer")
scheduler_args = {
arg: self.config[arg] for arg in self.config if arg in filter_keys
}
return scheduler_args
def get_lr(self):
for group in self.optimizer.param_groups:
return group["lr"]
| 2,342 | 33.970149 | 81 | py |
ocp | ocp-main/ocpmodels/modules/evaluator.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import numpy as np
import torch
from typing import Dict, Union
"""
An evaluation module for use with the OCP dataset and suite of tasks. It should
be possible to import this independently of the rest of the codebase, e.g:
```
from ocpmodels.modules import Evaluator
evaluator = Evaluator(task="is2re")
perf = evaluator.eval(prediction, target)
```
task: "s2ef", "is2rs", "is2re".
We specify a default set of metrics for each task, but should be easy to extend
to add more metrics. `evaluator.eval` takes as input two dictionaries, one for
predictions and another for targets to check against. It returns a dictionary
with the relevant metrics computed.
"""
class Evaluator:
task_metrics = {
"s2ef": [
"forcesx_mae",
"forcesy_mae",
"forcesz_mae",
"forces_mae",
"forces_cos",
"forces_magnitude",
"energy_mae",
"energy_force_within_threshold",
],
"is2rs": [
"average_distance_within_threshold",
"positions_mae",
"positions_mse",
],
"is2re": ["energy_mae", "energy_mse", "energy_within_threshold"],
}
task_attributes = {
"s2ef": ["energy", "forces", "natoms"],
"is2rs": ["positions", "cell", "pbc", "natoms"],
"is2re": ["energy"],
}
task_primary_metric = {
"s2ef": "energy_force_within_threshold",
"is2rs": "average_distance_within_threshold",
"is2re": "energy_mae",
}
def __init__(self, task: str) -> None:
assert task in ["s2ef", "is2rs", "is2re"]
self.task = task
self.metric_fn = self.task_metrics[task]
def eval(self, prediction, target, prev_metrics={}):
for attr in self.task_attributes[self.task]:
assert attr in prediction
assert attr in target
assert prediction[attr].shape == target[attr].shape
metrics = prev_metrics
for fn in self.task_metrics[self.task]:
res = eval(fn)(prediction, target)
metrics = self.update(fn, res, metrics)
return metrics
def update(self, key, stat, metrics):
if key not in metrics:
metrics[key] = {
"metric": None,
"total": 0,
"numel": 0,
}
if isinstance(stat, dict):
# If dictionary, we expect it to have `metric`, `total`, `numel`.
metrics[key]["total"] += stat["total"]
metrics[key]["numel"] += stat["numel"]
metrics[key]["metric"] = (
metrics[key]["total"] / metrics[key]["numel"]
)
elif isinstance(stat, float) or isinstance(stat, int):
# If float or int, just add to the total and increment numel by 1.
metrics[key]["total"] += stat
metrics[key]["numel"] += 1
metrics[key]["metric"] = (
metrics[key]["total"] / metrics[key]["numel"]
)
elif torch.is_tensor(stat):
raise NotImplementedError
return metrics
def energy_mae(prediction, target):
return absolute_error(prediction["energy"], target["energy"])
def energy_mse(prediction, target):
return squared_error(prediction["energy"], target["energy"])
def forcesx_mae(prediction, target):
return absolute_error(prediction["forces"][:, 0], target["forces"][:, 0])
def forcesx_mse(prediction, target):
return squared_error(prediction["forces"][:, 0], target["forces"][:, 0])
def forcesy_mae(prediction, target):
return absolute_error(prediction["forces"][:, 1], target["forces"][:, 1])
def forcesy_mse(prediction, target):
return squared_error(prediction["forces"][:, 1], target["forces"][:, 1])
def forcesz_mae(prediction, target):
return absolute_error(prediction["forces"][:, 2], target["forces"][:, 2])
def forcesz_mse(prediction, target):
return squared_error(prediction["forces"][:, 2], target["forces"][:, 2])
def forces_mae(prediction, target):
return absolute_error(prediction["forces"], target["forces"])
def forces_mse(prediction, target):
return squared_error(prediction["forces"], target["forces"])
def forces_cos(prediction, target):
return cosine_similarity(prediction["forces"], target["forces"])
def forces_magnitude(prediction, target):
return magnitude_error(prediction["forces"], target["forces"], p=2)
def positions_mae(prediction, target):
return absolute_error(prediction["positions"], target["positions"])
def positions_mse(prediction, target):
return squared_error(prediction["positions"], target["positions"])
def energy_force_within_threshold(
prediction, target
) -> Dict[str, Union[float, int]]:
# Note that this natoms should be the count of free atoms we evaluate over.
assert target["natoms"].sum() == prediction["forces"].size(0)
assert target["natoms"].size(0) == prediction["energy"].size(0)
# compute absolute error on per-atom forces and energy per system.
# then count the no. of systems where max force error is < 0.03 and max
# energy error is < 0.02.
f_thresh = 0.03
e_thresh = 0.02
success = 0
total = int(target["natoms"].size(0))
error_forces = torch.abs(target["forces"] - prediction["forces"])
error_energy = torch.abs(target["energy"] - prediction["energy"])
start_idx = 0
for i, n in enumerate(target["natoms"]):
if (
error_energy[i] < e_thresh
and error_forces[start_idx : start_idx + n].max() < f_thresh
):
success += 1
start_idx += n
return {
"metric": success / total,
"total": success,
"numel": total,
}
def energy_within_threshold(
prediction, target
) -> Dict[str, Union[float, int]]:
# compute absolute error on energy per system.
# then count the no. of systems where max energy error is < 0.02.
e_thresh = 0.02
error_energy = torch.abs(target["energy"] - prediction["energy"])
success = (error_energy < e_thresh).sum().item()
total = target["energy"].size(0)
return {
"metric": success / total,
"total": success,
"numel": total,
}
def average_distance_within_threshold(
prediction, target
) -> Dict[str, Union[float, int]]:
pred_pos = torch.split(
prediction["positions"], prediction["natoms"].tolist()
)
target_pos = torch.split(target["positions"], target["natoms"].tolist())
mean_distance = []
for idx, ml_pos in enumerate(pred_pos):
mean_distance.append(
np.mean(
np.linalg.norm(
min_diff(
ml_pos.detach().cpu().numpy(),
target_pos[idx].detach().cpu().numpy(),
target["cell"][idx].detach().cpu().numpy(),
target["pbc"].tolist(),
),
axis=1,
)
)
)
success = 0
intv = np.arange(0.01, 0.5, 0.001)
for i in intv:
success += sum(np.array(mean_distance) < i)
total = len(mean_distance) * len(intv)
return {"metric": success / total, "total": success, "numel": total}
def min_diff(pred_pos, dft_pos, cell, pbc):
pos_diff = pred_pos - dft_pos
fractional = np.linalg.solve(cell.T, pos_diff.T).T
for i, periodic in enumerate(pbc):
# Yes, we need to do it twice
if periodic:
fractional[:, i] %= 1.0
fractional[:, i] %= 1.0
fractional[fractional > 0.5] -= 1
return np.matmul(fractional, cell)
def cosine_similarity(prediction: torch.Tensor, target: torch.Tensor):
error = torch.cosine_similarity(prediction, target)
return {
"metric": torch.mean(error).item(),
"total": torch.sum(error).item(),
"numel": error.numel(),
}
def absolute_error(
prediction: torch.Tensor, target: torch.Tensor
) -> Dict[str, Union[float, int]]:
error = torch.abs(target - prediction)
return {
"metric": torch.mean(error).item(),
"total": torch.sum(error).item(),
"numel": prediction.numel(),
}
def squared_error(
prediction: torch.Tensor, target: torch.Tensor
) -> Dict[str, Union[float, int]]:
error = (target - prediction) ** 2
return {
"metric": torch.mean(error).item(),
"total": torch.sum(error).item(),
"numel": prediction.numel(),
}
def magnitude_error(
prediction: torch.Tensor, target: torch.Tensor, p: int = 2
) -> Dict[str, Union[float, int]]:
assert prediction.shape[1] > 1
error = torch.abs(
torch.norm(prediction, p=p, dim=-1) - torch.norm(target, p=p, dim=-1)
)
return {
"metric": torch.mean(error).item(),
"total": torch.sum(error).item(),
"numel": error.numel(),
}
| 9,085 | 28.028754 | 79 | py |
ocp | ocp-main/ocpmodels/modules/__init__.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
| 178 | 24.571429 | 63 | py |
ocp | ocp-main/ocpmodels/modules/scaling/fit.py | import logging
import math
import readline
import sys
from itertools import islice
from pathlib import Path
from typing import TYPE_CHECKING, Dict, Literal
import torch
import torch.nn as nn
from torch.nn.parallel.distributed import DistributedDataParallel
from ocpmodels.common.data_parallel import OCPDataParallel
from ocpmodels.common.flags import flags
from ocpmodels.common.utils import (
build_config,
new_trainer_context,
setup_logging,
)
from ocpmodels.modules.scaling import ScaleFactor
from ocpmodels.modules.scaling.compat import load_scales_compat
if TYPE_CHECKING:
from ocpmodels.trainers.base_trainer import BaseTrainer
def _prefilled_input(prompt: str, prefill: str = "") -> str:
readline.set_startup_hook(lambda: readline.insert_text(prefill))
try:
return input(prompt)
finally:
readline.set_startup_hook()
def _train_batch(trainer: "BaseTrainer", batch) -> None:
with torch.no_grad():
with torch.cuda.amp.autocast(enabled=trainer.scaler is not None):
out = trainer._forward(batch)
loss = trainer._compute_loss(out, batch)
del out, loss
def main(*, num_batches: int = 16) -> None:
# region args/config setup
setup_logging()
parser = flags.get_parser()
args, override_args = parser.parse_known_args()
_config = build_config(args, override_args)
_config["logger"] = "tensorboard"
# endregion
assert not args.distributed, "This doesn't work with DDP"
with new_trainer_context(args=args, config=_config) as ctx:
config = ctx.config
trainer = ctx.trainer
ckpt_file = config.get("checkpoint", None)
assert (
ckpt_file is not None
), "Checkpoint file not specified. Please specify --checkpoint <path>"
ckpt_file = Path(ckpt_file)
logging.info(
f"Input checkpoint path: {ckpt_file}, {ckpt_file.exists()=}"
)
model: nn.Module = trainer.model
val_loader = trainer.val_loader
assert (
val_loader is not None
), "Val dataset is required for making predictions"
if ckpt_file.exists():
trainer.load_checkpoint(str(ckpt_file))
# region reoad scale file contents if necessary
# unwrap module from DP/DDP
unwrapped_model = model
while isinstance(
unwrapped_model, (DistributedDataParallel, OCPDataParallel)
):
unwrapped_model = unwrapped_model.module
assert isinstance(
unwrapped_model, nn.Module
), "Model is not a nn.Module"
load_scales_compat(unwrapped_model, config.get("scale_file", None))
# endregion
model.eval()
# recursively go through the submodules and get the ScaleFactor modules
scale_factors: Dict[str, ScaleFactor] = {
name: module
for name, module in model.named_modules()
if isinstance(module, ScaleFactor)
}
mode: Literal["all", "unfitted"] = "all"
# region detect fitted/unfitted factors
fitted_scale_factors = [
f"{name}: {module.scale_factor.item():.3f}"
for name, module in scale_factors.items()
if module.fitted
]
unfitted_scale_factors = [
name for name, module in scale_factors.items() if not module.fitted
]
fitted_scale_factors_str = ", ".join(fitted_scale_factors)
logging.info(f"Fitted scale factors: [{fitted_scale_factors_str}]")
unfitted_scale_factors_str = ", ".join(unfitted_scale_factors)
logging.info(f"Unfitted scale factors: [{unfitted_scale_factors_str}]")
if fitted_scale_factors:
flag = input(
"Do you want to continue and fit all scale factors (1), "
"only fit the variables not fitted yet (2), or exit (3)? "
)
if str(flag) == "1":
mode = "all"
logging.info("Fitting all scale factors.")
elif str(flag) == "2":
mode = "unfitted"
logging.info("Only fitting unfitted variables.")
else:
print(flag)
logging.info("Exiting script")
sys.exit()
# endregion
# region get the output path
out_path = Path(
_prefilled_input(
"Enter output path for fitted scale factors: ",
prefill=str(ckpt_file),
)
)
if out_path.exists():
logging.warning(f"Already found existing file: {out_path}")
flag = input(
"Do you want to continue and overwrite existing file (1), "
"or exit (2)? "
)
if str(flag) == "1":
logging.info("Overwriting existing file.")
else:
logging.info("Exiting script")
sys.exit()
logging.info(
f"Output path for fitted scale factors: {out_path}, {out_path.exists()=}"
)
# endregion
# region reset the scale factors if mode == "all"
if mode == "all":
logging.info("Fitting all scale factors.")
for name, scale_factor in scale_factors.items():
if scale_factor.fitted:
logging.info(
f"{name} is already fitted in the checkpoint, resetting it. {scale_factor.scale_factor}"
)
scale_factor.reset_()
# endregion
# region we do a single pass through the network to get the correct execution order of the scale factors
scale_factor_indices: Dict[str, int] = {}
max_idx = 0
# initialize all scale factors
for name, module in scale_factors.items():
def index_fn(name: str = name) -> None:
nonlocal max_idx
assert name is not None
if name not in scale_factor_indices:
scale_factor_indices[name] = max_idx
logging.debug(f"Scale factor for {name} = {max_idx}")
max_idx += 1
module.initialize_(index_fn=index_fn)
# single pass through network
_train_batch(trainer, next(iter(val_loader)))
# sort the scale factors by their computation order
sorted_factors = sorted(
scale_factors.items(),
key=lambda x: scale_factor_indices.get(x[0], math.inf),
)
logging.info("Sorted scale factors by computation order:")
for name, _ in sorted_factors:
logging.info(f"{name}: {scale_factor_indices[name]}")
# endregion
# loop over the scale factors in the computation order
# and fit them one by one
logging.info("Start fitting")
for name, module in sorted_factors:
if mode == "unfitted" and module.fitted:
logging.info(f"Skipping {name} (already fitted)")
continue
logging.info(f"Fitting {name}...")
with module.fit_context_():
for batch in islice(val_loader, num_batches):
_train_batch(trainer, batch)
stats, ratio, value = module.fit_()
logging.info(
f"Variable: {name}, "
f"Var_in: {stats['variance_in']:.3f}, "
f"Var_out: {stats['variance_out']:.3f}, "
f"Ratio: {ratio:.3f} => Scaling factor: {value:.3f}"
)
# make sure all scale factors are fitted
for name, module in sorted_factors:
assert module.fitted, f"{name} is not fitted"
# region save the scale factors to the checkpoint file
trainer.config["cmd"]["checkpoint_dir"] = out_path.parent
trainer.is_debug = False
out_file = trainer.save(
metrics=None,
checkpoint_file=out_path.name,
training_state=False,
)
assert out_file is not None, "Failed to save checkpoint"
out_file = Path(out_file)
assert out_file.exists(), f"Failed to save checkpoint to {out_file}"
# endregion
logging.info(f"Saved results to: {out_file}")
if __name__ == "__main__":
main()
| 8,347 | 33.495868 | 112 | py |
ocp | ocp-main/ocpmodels/modules/scaling/scale_factor.py | import itertools
import logging
import math
from contextlib import contextmanager
from typing import Callable, Optional, TypedDict, Union
import torch
import torch.nn as nn
class _Stats(TypedDict):
variance_in: float
variance_out: float
n_samples: int
IndexFn = Callable[[], None]
def _check_consistency(old: torch.Tensor, new: torch.Tensor, key: str) -> None:
if not torch.allclose(old, new):
raise ValueError(
f"Scale factor parameter {key} is inconsistent with the loaded state dict.\n"
f"Old: {old}\n"
f"Actual: {new}"
)
class ScaleFactor(nn.Module):
scale_factor: torch.Tensor
name: Optional[str] = None
index_fn: Optional[IndexFn] = None
stats: Optional[_Stats] = None
def __init__(
self,
name: Optional[str] = None,
enforce_consistency: bool = True,
) -> None:
super().__init__()
self.name = name
self.index_fn = None
self.stats = None
self.scale_factor = nn.parameter.Parameter(
torch.tensor(0.0), requires_grad=False
)
if enforce_consistency:
self._register_load_state_dict_pre_hook(self._enforce_consistency)
def _enforce_consistency(
self,
state_dict,
prefix,
_local_metadata,
_strict,
_missing_keys,
_unexpected_keys,
_error_msgs,
) -> None:
if not self.fitted:
return
persistent_buffers = {
k: v
for k, v in self._buffers.items()
if k not in self._non_persistent_buffers_set
}
local_name_params = itertools.chain(
self._parameters.items(), persistent_buffers.items()
)
local_state = {k: v for k, v in local_name_params if v is not None}
for name, param in local_state.items():
key = prefix + name
if key not in state_dict:
continue
input_param = state_dict[key]
_check_consistency(old=param, new=input_param, key=key)
@property
def fitted(self) -> bool:
return bool((self.scale_factor != 0.0).item())
@torch.jit.unused
def reset_(self) -> None:
self.scale_factor.zero_()
@torch.jit.unused
def set_(self, scale: Union[float, torch.Tensor]) -> None:
if self.fitted:
_check_consistency(
old=self.scale_factor,
new=torch.tensor(scale) if isinstance(scale, float) else scale,
key="scale_factor",
)
self.scale_factor.fill_(scale)
@torch.jit.unused
def initialize_(self, *, index_fn: Optional[IndexFn] = None) -> None:
self.index_fn = index_fn
@contextmanager
@torch.jit.unused
def fit_context_(self):
self.stats = _Stats(variance_in=0.0, variance_out=0.0, n_samples=0)
yield
del self.stats
self.stats = None
@torch.jit.unused
def fit_(self):
assert self.stats, "Stats not set"
for k, v in self.stats.items():
assert v > 0, f"{k} is {v}"
self.stats["variance_in"] = (
self.stats["variance_in"] / self.stats["n_samples"]
)
self.stats["variance_out"] = (
self.stats["variance_out"] / self.stats["n_samples"]
)
ratio = self.stats["variance_out"] / self.stats["variance_in"]
value = math.sqrt(1 / ratio)
self.set_(value)
stats = dict(**self.stats)
return stats, ratio, value
@torch.no_grad()
@torch.jit.unused
def _observe(
self, x: torch.Tensor, ref: Optional[torch.Tensor] = None
) -> None:
if self.stats is None:
logging.debug("Observer not initialized but self.observe() called")
return
n_samples = x.shape[0]
self.stats["variance_out"] += (
torch.mean(torch.var(x, dim=0)).item() * n_samples
)
if ref is None:
self.stats["variance_in"] += n_samples
else:
self.stats["variance_in"] += (
torch.mean(torch.var(ref, dim=0)).item() * n_samples
)
self.stats["n_samples"] += n_samples
def forward(
self,
x: torch.Tensor,
*,
ref: Optional[torch.Tensor] = None,
) -> torch.Tensor:
if self.index_fn is not None:
self.index_fn()
if self.fitted:
x = x * self.scale_factor
if not torch.jit.is_scripting():
self._observe(x, ref=ref)
return x
| 4,613 | 25.67052 | 89 | py |
ocp | ocp-main/ocpmodels/modules/scaling/util.py | import logging
import torch.nn as nn
from .scale_factor import ScaleFactor
def ensure_fitted(module: nn.Module, warn: bool = False) -> None:
for name, child in module.named_modules():
if not isinstance(child, ScaleFactor) or child.fitted:
continue
if child.name is not None:
name = f"{child.name} ({name})"
msg = (
f"Scale factor {name} is not fitted. "
"Please make sure that you either (1) load a checkpoint with fitted scale factors, "
"(2) explicitly load scale factors using the `model.scale_file` attribute, or "
"(3) fit the scale factors using the `fit.py` script."
)
if warn:
logging.warning(msg)
else:
raise ValueError(msg)
| 786 | 31.791667 | 96 | py |
ocp | ocp-main/ocpmodels/modules/scaling/__init__.py | from .scale_factor import ScaleFactor
__all__ = ["ScaleFactor"]
| 65 | 15.5 | 37 | py |
ocp | ocp-main/ocpmodels/modules/scaling/compat.py | import json
import logging
from pathlib import Path
from typing import Dict, Optional, Union
import torch
import torch.nn as nn
from .scale_factor import ScaleFactor
ScaleDict = Union[Dict[str, float], Dict[str, torch.Tensor]]
def _load_scale_dict(scale_file: Optional[Union[str, ScaleDict]]):
"""
Loads scale factors from either:
- a JSON file mapping scale factor names to scale values
- a python dictionary pickled object (loaded using `torch.load`) mapping scale factor names to scale values
- a dictionary mapping scale factor names to scale values
"""
if not scale_file:
return None
if isinstance(scale_file, dict):
if not scale_file:
logging.warning("Empty scale dictionary provided to model.")
return scale_file
path = Path(scale_file)
if not path.exists():
raise ValueError(f"Scale file {path} does not exist.")
scale_dict: Optional[ScaleDict] = None
if path.suffix == ".pt":
scale_dict = torch.load(path)
elif path.suffix == ".json":
with open(path, "r") as f:
scale_dict = json.load(f)
if isinstance(scale_dict, dict):
# old json scale factors have a comment field that has the model name
scale_dict.pop("comment", None)
else:
raise ValueError(f"Unsupported scale file extension: {path.suffix}")
if not scale_dict:
return None
return scale_dict
def load_scales_compat(
module: nn.Module, scale_file: Optional[Union[str, ScaleDict]]
) -> None:
scale_dict = _load_scale_dict(scale_file)
if not scale_dict:
return
scale_factors = {
module.name or name: (module, name)
for name, module in module.named_modules()
if isinstance(module, ScaleFactor)
}
logging.debug(
f"Found the following scale factors: {[(k, name) for k, (_, name) in scale_factors.items()]}"
)
for name, scale in scale_dict.items():
if name not in scale_factors:
logging.warning(f"Scale factor {name} not found in model")
continue
scale_module, module_name = scale_factors[name]
logging.debug(
f"Loading scale factor {scale} for ({name} => {module_name})"
)
scale_module.set_(scale)
| 2,303 | 28.922078 | 111 | py |
ocp | ocp-main/ocpmodels/common/distutils.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import logging
import os
import subprocess
import torch
import torch.distributed as dist
from ocpmodels.common.typing import none_throws
def os_environ_get_or_throw(x: str) -> str:
if x not in os.environ:
raise RuntimeError(f"Could not find {x} in ENV variables")
return none_throws(os.environ.get(x))
def setup(config) -> None:
if config["submit"]:
node_list = os.environ.get("SLURM_STEP_NODELIST")
if node_list is None:
node_list = os.environ.get("SLURM_JOB_NODELIST")
if node_list is not None:
try:
hostnames = subprocess.check_output(
["scontrol", "show", "hostnames", node_list]
)
config["init_method"] = "tcp://{host}:{port}".format(
host=hostnames.split()[0].decode("utf-8"),
port=config["distributed_port"],
)
nnodes = int(os_environ_get_or_throw("SLURM_NNODES"))
ntasks_per_node = os.environ.get("SLURM_NTASKS_PER_NODE")
if ntasks_per_node is not None:
ntasks_per_node = int(ntasks_per_node)
else:
ntasks = int(os_environ_get_or_throw("SLURM_NTASKS"))
nnodes = int(os_environ_get_or_throw("SLURM_NNODES"))
assert ntasks % nnodes == 0
ntasks_per_node = int(ntasks / nnodes)
if ntasks_per_node == 1:
assert config["world_size"] % nnodes == 0
gpus_per_node = config["world_size"] // nnodes
node_id = int(os_environ_get_or_throw("SLURM_NODEID"))
config["rank"] = node_id * gpus_per_node
config["local_rank"] = 0
else:
assert ntasks_per_node == config["world_size"] // nnodes
config["rank"] = int(
os_environ_get_or_throw("SLURM_PROCID")
)
config["local_rank"] = int(
os_environ_get_or_throw("SLURM_LOCALID")
)
logging.info(
f"Init: {config['init_method']}, {config['world_size']}, {config['rank']}"
)
# ensures GPU0 does not have extra context/higher peak memory
torch.cuda.set_device(config["local_rank"])
dist.init_process_group(
backend=config["distributed_backend"],
init_method=config["init_method"],
world_size=config["world_size"],
rank=config["rank"],
)
except subprocess.CalledProcessError as e: # scontrol failed
raise e
except FileNotFoundError: # Slurm is not installed
pass
elif config["summit"]:
world_size = int(os.environ["OMPI_COMM_WORLD_SIZE"])
world_rank = int(os.environ["OMPI_COMM_WORLD_RANK"])
get_master = (
"echo $(cat {} | sort | uniq | grep -v batch | grep -v login | head -1)"
).format(os.environ["LSB_DJOB_HOSTFILE"])
os.environ["MASTER_ADDR"] = str(
subprocess.check_output(get_master, shell=True)
)[2:-3]
os.environ["MASTER_PORT"] = "23456"
os.environ["WORLD_SIZE"] = os.environ["OMPI_COMM_WORLD_SIZE"]
os.environ["RANK"] = os.environ["OMPI_COMM_WORLD_RANK"]
# NCCL and MPI initialization
dist.init_process_group(
backend="nccl",
rank=world_rank,
world_size=world_size,
init_method="env://",
)
else:
dist.init_process_group(
backend=config["distributed_backend"], init_method="env://"
)
# TODO: SLURM
def cleanup() -> None:
dist.destroy_process_group()
def initialized():
return dist.is_available() and dist.is_initialized()
def get_rank():
return dist.get_rank() if initialized() else 0
def get_world_size():
return dist.get_world_size() if initialized() else 1
def is_master():
return get_rank() == 0
def synchronize() -> None:
if get_world_size() == 1:
return
dist.barrier()
def broadcast(
tensor: torch.Tensor, src, group=dist.group.WORLD, async_op: bool = False
) -> None:
if get_world_size() == 1:
return
dist.broadcast(tensor, src, group, async_op)
def all_reduce(
data, group=dist.group.WORLD, average: bool = False, device=None
):
if get_world_size() == 1:
return data
tensor = data
if not isinstance(data, torch.Tensor):
tensor = torch.tensor(data)
if device is not None:
tensor = tensor.cuda(device)
dist.all_reduce(tensor, group=group)
if average:
tensor /= get_world_size()
if not isinstance(data, torch.Tensor):
result = tensor.cpu().numpy() if tensor.numel() > 1 else tensor.item()
else:
result = tensor
return result
def all_gather(data, group=dist.group.WORLD, device=None):
if get_world_size() == 1:
return data
tensor = data
if not isinstance(data, torch.Tensor):
tensor = torch.tensor(data)
if device is not None:
tensor = tensor.cuda(device)
tensor_list = [
tensor.new_zeros(tensor.shape) for _ in range(get_world_size())
]
dist.all_gather(tensor_list, tensor, group=group)
if not isinstance(data, torch.Tensor):
result = [tensor.cpu().numpy() for tensor in tensor_list]
else:
result = tensor_list
return result
| 5,803 | 32.165714 | 94 | py |
ocp | ocp-main/ocpmodels/common/data_parallel.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import heapq
import logging
from itertools import chain
from pathlib import Path
from typing import List, Literal, Protocol, Union, runtime_checkable
import numba
import numpy as np
import torch
from torch.utils.data import BatchSampler, DistributedSampler, Sampler
from ocpmodels.common import distutils, gp_utils
from ocpmodels.datasets import data_list_collater
class OCPDataParallel(torch.nn.DataParallel):
def __init__(self, module, output_device, num_gpus: int) -> None:
if num_gpus < 0:
raise ValueError("# GPUs must be positive.")
if num_gpus > torch.cuda.device_count():
raise ValueError("# GPUs specified larger than available")
self.src_device = torch.device(output_device)
self.cpu = False
if num_gpus == 0:
self.cpu = True
elif num_gpus == 1:
device_ids = [self.src_device]
else:
if (
self.src_device.type == "cuda"
and self.src_device.index >= num_gpus
):
raise ValueError("Main device must be less than # of GPUs")
device_ids = list(range(num_gpus))
if self.cpu:
super(torch.nn.DataParallel, self).__init__()
self.module = module
else:
super(OCPDataParallel, self).__init__(
module=module,
device_ids=device_ids,
output_device=self.src_device,
)
def forward(self, batch_list, **kwargs):
if self.cpu:
return self.module(batch_list[0])
if len(self.device_ids) == 1:
return self.module(
batch_list[0].to(f"cuda:{self.device_ids[0]}"), **kwargs
)
for t in chain(self.module.parameters(), self.module.buffers()):
if t.device != self.src_device:
raise RuntimeError(
(
"Module must have its parameters and buffers on device "
"{} but found one of them on device {}."
).format(self.src_device, t.device)
)
inputs = [
batch.to(f"cuda:{self.device_ids[i]}")
for i, batch in enumerate(batch_list)
]
replicas = self.replicate(self.module, self.device_ids[: len(inputs)])
outputs = self.parallel_apply(replicas, inputs, kwargs)
return self.gather(outputs, self.output_device)
class ParallelCollater:
def __init__(self, num_gpus: int, otf_graph: bool = False) -> None:
self.num_gpus = num_gpus
self.otf_graph = otf_graph
def __call__(self, data_list):
if self.num_gpus in [0, 1]: # adds cpu-only case
batch = data_list_collater(data_list, otf_graph=self.otf_graph)
return [batch]
else:
num_devices = min(self.num_gpus, len(data_list))
count = torch.tensor([data.num_nodes for data in data_list])
cumsum = count.cumsum(0)
cumsum = torch.cat([cumsum.new_zeros(1), cumsum], dim=0)
device_id = (
num_devices * cumsum.to(torch.float) / cumsum[-1].item()
)
device_id = (device_id[:-1] + device_id[1:]) / 2.0
device_id = device_id.to(torch.long)
split = device_id.bincount().cumsum(0)
split = torch.cat([split.new_zeros(1), split], dim=0)
split = torch.unique(split, sorted=True)
split = split.tolist()
return [
data_list_collater(data_list[split[i] : split[i + 1]])
for i in range(len(split) - 1)
]
@numba.njit
def balanced_partition(sizes, num_parts: int):
"""
Greedily partition the given set by always inserting
the largest element into the smallest partition.
"""
sort_idx = np.argsort(-sizes) # Sort in descending order
heap = []
for idx in sort_idx[:num_parts]:
heap.append((sizes[idx], [idx]))
heapq.heapify(heap)
for idx in sort_idx[num_parts:]:
smallest_part = heapq.heappop(heap)
new_size = smallest_part[0] + sizes[idx]
new_idx = smallest_part[1] + [idx]
heapq.heappush(heap, (new_size, new_idx))
idx_balanced = [part[1] for part in heap]
return idx_balanced
@runtime_checkable
class _HasMetadata(Protocol):
@property
def metadata_path(self) -> Path:
...
class BalancedBatchSampler(Sampler):
def _load_dataset(self, dataset, mode: Literal["atoms", "neighbors"]):
errors: List[str] = []
if not isinstance(dataset, _HasMetadata):
errors.append(
f"Dataset {dataset} does not have a metadata_path attribute."
)
return None, errors
if not dataset.metadata_path.exists():
errors.append(
f"Metadata file {dataset.metadata_path} does not exist."
)
return None, errors
key = {"atoms": "natoms", "neighbors": "neighbors"}[mode]
sizes = np.load(dataset.metadata_path)[key]
return sizes, errors
def __init__(
self,
dataset,
batch_size: int,
num_replicas: int,
rank: int,
device,
mode: Union[str, bool] = "atoms",
shuffle: bool = True,
drop_last: bool = False,
force_balancing: bool = False,
throw_on_error: bool = False,
) -> None:
if mode is True:
mode = "atoms"
if isinstance(mode, str):
mode = mode.lower()
if mode not in ("atoms", "neighbors"):
raise ValueError(
f"Invalid mode {mode}. Must be one of 'atoms', 'neighbors', or a boolean."
)
self.dataset = dataset
self.batch_size = batch_size
self.num_replicas = num_replicas
self.rank = rank
self.device = device
self.mode = mode
self.shuffle = shuffle
self.drop_last = drop_last
self.single_sampler = DistributedSampler(
self.dataset,
num_replicas=num_replicas,
rank=rank,
shuffle=shuffle,
drop_last=drop_last,
)
self.batch_sampler = BatchSampler(
self.single_sampler,
batch_size,
drop_last=drop_last,
)
self.sizes = None
self.balance_batches = False
if self.num_replicas <= 1:
logging.info(
"Batch balancing is disabled for single GPU training."
)
return
if self.mode is False:
logging.info(
"Batch balancing is disabled because `optim.load_balancing` is `False`"
)
return
self.sizes, errors = self._load_dataset(dataset, self.mode)
if self.sizes is None:
self.balance_batches = force_balancing
if force_balancing:
errors.append(
"BalancedBatchSampler has to load the data to determine batch sizes, which incurs significant overhead! "
"You can disable balancing by setting `optim.load_balancing` to `False`."
)
else:
errors.append(
"Batches will not be balanced, which can incur significant overhead!"
)
else:
self.balance_batches = True
if errors:
msg = "BalancedBatchSampler: " + " ".join(errors)
if throw_on_error:
raise RuntimeError(msg)
else:
logging.warning(msg)
def __len__(self) -> int:
return len(self.batch_sampler)
def set_epoch(self, epoch: int) -> None:
self.single_sampler.set_epoch(epoch)
def __iter__(self):
if not self.balance_batches:
yield from self.batch_sampler
return
for batch_idx in self.batch_sampler:
if self.sizes is None:
# Unfortunately, we need to load the data to know the image sizes
data_list = [self.dataset[idx] for idx in batch_idx]
if self.mode == "atoms":
sizes = [data.num_nodes for data in data_list]
elif self.mode == "neighbors":
sizes = [data.edge_index.shape[1] for data in data_list]
else:
raise NotImplementedError(
f"Unknown load balancing mode: {self.mode}"
)
else:
sizes = [self.sizes[idx] for idx in batch_idx]
idx_sizes = torch.stack(
[torch.tensor(batch_idx), torch.tensor(sizes)]
)
idx_sizes_all = distutils.all_gather(idx_sizes, device=self.device)
idx_sizes_all = torch.cat(idx_sizes_all, dim=-1).cpu()
if gp_utils.initialized():
idx_sizes_all = torch.unique(input=idx_sizes_all, dim=1)
idx_all = idx_sizes_all[0]
sizes_all = idx_sizes_all[1]
local_idx_balanced = balanced_partition(
sizes_all.numpy(), num_parts=self.num_replicas
)
# Since DistributedSampler pads the last batch
# this should always have an entry for each replica.
yield idx_all[local_idx_balanced[self.rank]]
| 9,608 | 32.597902 | 126 | py |
ocp | ocp-main/ocpmodels/common/registry.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
# Copyright (c) Facebook, Inc. and its affiliates.
# Borrowed from https://github.com/facebookresearch/pythia/blob/master/pythia/common/registry.py.
"""
Registry is central source of truth. Inspired from Redux's concept of
global store, Registry maintains mappings of various information to unique
keys. Special functions in registry can be used as decorators to register
different kind of classes.
Import the global registry object using
``from ocpmodels.common.registry import registry``
Various decorators for registry different kind of classes with unique keys
- Register a model: ``@registry.register_model``
"""
import importlib
def _get_absolute_mapping(name: str):
# in this case, the `name` should be the fully qualified name of the class
# e.g., `ocpmodels.tasks.base_task.BaseTask`
# we can use importlib to get the module (e.g., `ocpmodels.tasks.base_task`)
# and then import the class (e.g., `BaseTask`)
module_name = ".".join(name.split(".")[:-1])
class_name = name.split(".")[-1]
try:
module = importlib.import_module(module_name)
except (ModuleNotFoundError, ValueError) as e:
raise RuntimeError(
f"Could not import module `{module_name}` for import `{name}`"
) from e
try:
return getattr(module, class_name)
except AttributeError as e:
raise RuntimeError(
f"Could not import class `{class_name}` from module `{module_name}`"
) from e
class Registry:
r"""Class for registry object which acts as central source of truth."""
mapping = {
# Mappings to respective classes.
"task_name_mapping": {},
"dataset_name_mapping": {},
"model_name_mapping": {},
"logger_name_mapping": {},
"trainer_name_mapping": {},
"state": {},
}
@classmethod
def register_task(cls, name: str):
r"""Register a new task to registry with key 'name'
Args:
name: Key with which the task will be registered.
Usage::
from ocpmodels.common.registry import registry
from ocpmodels.tasks import BaseTask
@registry.register_task("train")
class TrainTask(BaseTask):
...
"""
def wrap(func):
cls.mapping["task_name_mapping"][name] = func
return func
return wrap
@classmethod
def register_dataset(cls, name: str):
r"""Register a dataset to registry with key 'name'
Args:
name: Key with which the dataset will be registered.
Usage::
from ocpmodels.common.registry import registry
from ocpmodels.datasets import BaseDataset
@registry.register_dataset("qm9")
class QM9(BaseDataset):
...
"""
def wrap(func):
cls.mapping["dataset_name_mapping"][name] = func
return func
return wrap
@classmethod
def register_model(cls, name: str):
r"""Register a model to registry with key 'name'
Args:
name: Key with which the model will be registered.
Usage::
from ocpmodels.common.registry import registry
from ocpmodels.modules.layers import CGCNNConv
@registry.register_model("cgcnn")
class CGCNN():
...
"""
def wrap(func):
cls.mapping["model_name_mapping"][name] = func
return func
return wrap
@classmethod
def register_logger(cls, name: str):
r"""Register a logger to registry with key 'name'
Args:
name: Key with which the logger will be registered.
Usage::
from ocpmodels.common.registry import registry
@registry.register_logger("tensorboard")
class WandB():
...
"""
def wrap(func):
from ocpmodels.common.logger import Logger
assert issubclass(
func, Logger
), "All loggers must inherit Logger class"
cls.mapping["logger_name_mapping"][name] = func
return func
return wrap
@classmethod
def register_trainer(cls, name: str):
r"""Register a trainer to registry with key 'name'
Args:
name: Key with which the trainer will be registered.
Usage::
from ocpmodels.common.registry import registry
@registry.register_trainer("active_discovery")
class ActiveDiscoveryTrainer():
...
"""
def wrap(func):
cls.mapping["trainer_name_mapping"][name] = func
return func
return wrap
@classmethod
def register(cls, name, obj) -> None:
r"""Register an item to registry with key 'name'
Args:
name: Key with which the item will be registered.
Usage::
from ocpmodels.common.registry import registry
registry.register("config", {})
"""
path = name.split(".")
current = cls.mapping["state"]
for part in path[:-1]:
if part not in current:
current[part] = {}
current = current[part]
current[path[-1]] = obj
@classmethod
def __import_error(cls, name: str, mapping_name: str) -> RuntimeError:
kind = mapping_name[: -len("_name_mapping")]
mapping = cls.mapping.get(mapping_name, {})
existing_keys = list(mapping.keys())
existing_cls_path = (
mapping.get(existing_keys[-1], None) if existing_keys else None
)
if existing_cls_path is not None:
existing_cls_path = f"{existing_cls_path.__module__}.{existing_cls_path.__qualname__}"
else:
existing_cls_path = "ocpmodels.trainers.ForcesTrainer"
existing_keys = [f"'{name}'" for name in existing_keys]
existing_keys = (
", ".join(existing_keys[:-1]) + " or " + existing_keys[-1]
)
existing_keys_str = (
f" (one of {existing_keys})" if existing_keys else ""
)
return RuntimeError(
f"Failed to find the {kind} '{name}'. "
f"You may either use a {kind} from the registry{existing_keys_str} "
f"or provide the full import path to the {kind} (e.g., '{existing_cls_path}')."
)
@classmethod
def get_class(cls, name: str, mapping_name: str):
existing_mapping = cls.mapping[mapping_name].get(name, None)
if existing_mapping is not None:
return existing_mapping
# mapping be class path of type `{module_name}.{class_name}` (e.g., `ocpmodels.trainers.ForcesTrainer`)
if name.count(".") < 1:
raise cls.__import_error(name, mapping_name)
try:
return _get_absolute_mapping(name)
except RuntimeError as e:
raise cls.__import_error(name, mapping_name) from e
@classmethod
def get_task_class(cls, name: str):
return cls.get_class(name, "task_name_mapping")
@classmethod
def get_dataset_class(cls, name: str):
return cls.get_class(name, "dataset_name_mapping")
@classmethod
def get_model_class(cls, name: str):
return cls.get_class(name, "model_name_mapping")
@classmethod
def get_logger_class(cls, name: str):
return cls.get_class(name, "logger_name_mapping")
@classmethod
def get_trainer_class(cls, name: str):
return cls.get_class(name, "trainer_name_mapping")
@classmethod
def get(cls, name: str, default=None, no_warning: bool = False):
r"""Get an item from registry with key 'name'
Args:
name (string): Key whose value needs to be retrieved.
default: If passed and key is not in registry, default value will
be returned with a warning. Default: None
no_warning (bool): If passed as True, warning when key doesn't exist
will not be generated. Useful for cgcnn's
internal operations. Default: False
Usage::
from ocpmodels.common.registry import registry
config = registry.get("config")
"""
original_name = name
split_name = name.split(".")
value = cls.mapping["state"]
for subname in split_name:
value = value.get(subname, default)
if value is default:
break
if (
"writer" in cls.mapping["state"]
and value == default
and no_warning is False
):
cls.mapping["state"]["writer"].write(
"Key {} is not present in registry, returning default value "
"of {}".format(original_name, default)
)
return value
@classmethod
def unregister(cls, name: str):
r"""Remove an item from registry with key 'name'
Args:
name: Key which needs to be removed.
Usage::
from ocpmodels.common.registry import registry
config = registry.unregister("config")
"""
return cls.mapping["state"].pop(name, None)
registry = Registry()
| 9,503 | 29.267516 | 111 | py |
ocp | ocp-main/ocpmodels/common/typing.py | from typing import Optional, TypeVar, Type
_T = TypeVar("_T")
def assert_is_instance(obj: object, cls: Type[_T]) -> _T:
if not isinstance(obj, cls):
raise TypeError(f"obj is not an instance of cls: obj={obj}, cls={cls}")
return obj
def none_throws(x: Optional[_T], msg: Optional[str] = None) -> _T:
if x is None:
if msg:
raise ValueError(msg)
else:
raise ValueError("x cannot be None")
return x
| 464 | 23.473684 | 79 | py |
ocp | ocp-main/ocpmodels/common/utils.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import ast
import collections
import copy
import importlib
import itertools
import json
import logging
import os
import sys
import time
from argparse import Namespace
from bisect import bisect
from contextlib import contextmanager
from dataclasses import dataclass
from functools import wraps
from itertools import product
from pathlib import Path
from typing import TYPE_CHECKING, Any, Dict, List, Mapping, Optional, Tuple
import numpy as np
import torch
import torch.nn as nn
import torch_geometric
import yaml
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from torch_geometric.data import Data
from torch_geometric.utils import remove_self_loops
from torch_scatter import scatter, segment_coo, segment_csr
if TYPE_CHECKING:
from torch.nn.modules.module import _IncompatibleKeys
def pyg2_data_transform(data: Data):
"""
if we're on the new pyg (2.0 or later) and if the Data stored is in older format
we need to convert the data to the new format
"""
if torch_geometric.__version__ >= "2.0" and "_store" not in data.__dict__:
return Data(
**{k: v for k, v in data.__dict__.items() if v is not None}
)
return data
def save_checkpoint(
state,
checkpoint_dir: str = "checkpoints/",
checkpoint_file: str = "checkpoint.pt",
) -> str:
filename = os.path.join(checkpoint_dir, checkpoint_file)
torch.save(state, filename)
return filename
class Complete:
def __call__(self, data):
device = data.edge_index.device
row = torch.arange(data.num_nodes, dtype=torch.long, device=device)
col = torch.arange(data.num_nodes, dtype=torch.long, device=device)
row = row.view(-1, 1).repeat(1, data.num_nodes).view(-1)
col = col.repeat(data.num_nodes)
edge_index = torch.stack([row, col], dim=0)
edge_attr = None
if data.edge_attr is not None:
idx = data.edge_index[0] * data.num_nodes + data.edge_index[1]
size = list(data.edge_attr.size())
size[0] = data.num_nodes * data.num_nodes
edge_attr = data.edge_attr.new_zeros(size)
edge_attr[idx] = data.edge_attr
edge_index, edge_attr = remove_self_loops(edge_index, edge_attr)
data.edge_attr = edge_attr
data.edge_index = edge_index
return data
def warmup_lr_lambda(current_step, optim_config):
"""Returns a learning rate multiplier.
Till `warmup_steps`, learning rate linearly increases to `initial_lr`,
and then gets multiplied by `lr_gamma` every time a milestone is crossed.
"""
# keep this block for older configs that have warmup_epochs instead of warmup_steps
# and lr_milestones are defined in epochs
if (
any(x < 100 for x in optim_config["lr_milestones"])
or "warmup_epochs" in optim_config
):
raise Exception(
"ConfigError: please define lr_milestones in steps not epochs and define warmup_steps instead of warmup_epochs"
)
if current_step <= optim_config["warmup_steps"]:
alpha = current_step / float(optim_config["warmup_steps"])
return optim_config["warmup_factor"] * (1.0 - alpha) + alpha
else:
idx = bisect(optim_config["lr_milestones"], current_step)
return pow(optim_config["lr_gamma"], idx)
def print_cuda_usage() -> None:
print("Memory Allocated:", torch.cuda.memory_allocated() / (1024 * 1024))
print(
"Max Memory Allocated:",
torch.cuda.max_memory_allocated() / (1024 * 1024),
)
print("Memory Cached:", torch.cuda.memory_cached() / (1024 * 1024))
print("Max Memory Cached:", torch.cuda.max_memory_cached() / (1024 * 1024))
def conditional_grad(dec):
"Decorator to enable/disable grad depending on whether force/energy predictions are being made"
# Adapted from https://stackoverflow.com/questions/60907323/accessing-class-property-as-decorator-argument
def decorator(func):
@wraps(func)
def cls_method(self, *args, **kwargs):
f = func
if self.regress_forces and not getattr(self, "direct_forces", 0):
f = dec(func)
return f(self, *args, **kwargs)
return cls_method
return decorator
def plot_histogram(data, xlabel: str = "", ylabel: str = "", title: str = ""):
assert isinstance(data, list)
# Preset
fig = Figure(figsize=(5, 4), dpi=150)
canvas = FigureCanvas(fig)
ax = fig.gca()
# Plot
ax.hist(data, bins=20, rwidth=0.9, zorder=3)
# Axes
ax.grid(color="0.95", zorder=0)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_title(title)
fig.tight_layout(pad=2)
# Return numpy array
canvas.draw()
image_from_plot = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8)
image_from_plot = image_from_plot.reshape(
fig.canvas.get_width_height()[::-1] + (3,)
)
return image_from_plot
# Override the collation method in `pytorch_geometric.data.InMemoryDataset`
def collate(data_list):
keys = data_list[0].keys
data = data_list[0].__class__()
for key in keys:
data[key] = []
slices = {key: [0] for key in keys}
for item, key in product(data_list, keys):
data[key].append(item[key])
if torch.is_tensor(item[key]):
s = slices[key][-1] + item[key].size(
item.__cat_dim__(key, item[key])
)
elif isinstance(item[key], int) or isinstance(item[key], float):
s = slices[key][-1] + 1
else:
raise ValueError("Unsupported attribute type")
slices[key].append(s)
if hasattr(data_list[0], "__num_nodes__"):
data.__num_nodes__ = []
for item in data_list:
data.__num_nodes__.append(item.num_nodes)
for key in keys:
if torch.is_tensor(data_list[0][key]):
data[key] = torch.cat(
data[key], dim=data.__cat_dim__(key, data_list[0][key])
)
else:
data[key] = torch.tensor(data[key])
slices[key] = torch.tensor(slices[key], dtype=torch.long)
return data, slices
def add_edge_distance_to_graph(
batch,
device="cpu",
dmin=0.0,
dmax=6.0,
num_gaussians=50,
):
# Make sure x has positions.
if not all(batch.pos[0][:] == batch.x[0][-3:]):
batch.x = torch.cat([batch.x, batch.pos.float()], dim=1)
# First set computations to be tracked for positions.
batch.x = batch.x.requires_grad_(True)
# Then compute Euclidean distance between edge endpoints.
pdist = torch.nn.PairwiseDistance(p=2.0)
distances = pdist(
batch.x[batch.edge_index[0]][:, -3:],
batch.x[batch.edge_index[1]][:, -3:],
)
# Expand it using a gaussian basis filter.
gdf_filter = torch.linspace(dmin, dmax, num_gaussians)
var = gdf_filter[1] - gdf_filter[0]
gdf_filter, var = gdf_filter.to(device), var.to(device)
gdf_distances = torch.exp(
-((distances.view(-1, 1) - gdf_filter) ** 2) / var**2
)
# Reassign edge attributes.
batch.edge_weight = distances
batch.edge_attr = gdf_distances.float()
return batch
def _import_local_file(path: Path, *, project_root: Path) -> None:
"""
Imports a Python file as a module
:param path: The path to the file to import
:type path: Path
:param project_root: The root directory of the project (i.e., the "ocp" folder)
:type project_root: Path
"""
path = path.resolve()
project_root = project_root.resolve()
module_name = ".".join(
path.absolute()
.relative_to(project_root.absolute())
.with_suffix("")
.parts
)
logging.debug(f"Resolved module name of {path} to {module_name}")
importlib.import_module(module_name)
def setup_experimental_imports(project_root: Path) -> None:
experimental_folder = (project_root / "experimental").resolve()
if not experimental_folder.exists() or not experimental_folder.is_dir():
return
experimental_files = [
f.resolve().absolute() for f in experimental_folder.rglob("*.py")
]
# Ignore certain directories within experimental
ignore_file = experimental_folder / ".ignore"
if ignore_file.exists():
with open(ignore_file, "r") as f:
for line in f.read().splitlines():
for ignored_file in (experimental_folder / line).rglob("*.py"):
experimental_files.remove(
ignored_file.resolve().absolute()
)
for f in experimental_files:
_import_local_file(f, project_root=project_root)
def _get_project_root() -> Path:
"""
Gets the root folder of the project (the "ocp" folder)
:return: The absolute path to the project root.
"""
from ocpmodels.common.registry import registry
# Automatically load all of the modules, so that
# they register with registry
root_folder = registry.get("ocpmodels_root", no_warning=True)
if root_folder is not None:
assert isinstance(root_folder, str), "ocpmodels_root must be a string"
root_folder = Path(root_folder).resolve().absolute()
assert root_folder.exists(), f"{root_folder} does not exist"
assert root_folder.is_dir(), f"{root_folder} is not a directory"
else:
root_folder = Path(__file__).resolve().absolute().parent.parent
# root_folder is the "ocpmodes" folder, so we need to go up one more level
return root_folder.parent
# Copied from https://github.com/facebookresearch/mmf/blob/master/mmf/utils/env.py#L89.
def setup_imports(config: Optional[dict] = None) -> None:
from ocpmodels.common.registry import registry
skip_experimental_imports = (config or {}).get(
"skip_experimental_imports", None
)
# First, check if imports are already setup
has_already_setup = registry.get("imports_setup", no_warning=True)
if has_already_setup:
return
try:
project_root = _get_project_root()
logging.info(f"Project root: {project_root}")
importlib.import_module("ocpmodels.common.logger")
import_keys = ["trainers", "datasets", "models", "tasks"]
for key in import_keys:
for f in (project_root / "ocpmodels" / key).rglob("*.py"):
_import_local_file(f, project_root=project_root)
if not skip_experimental_imports:
setup_experimental_imports(project_root)
finally:
registry.register("imports_setup", True)
def dict_set_recursively(dictionary, key_sequence, val) -> None:
top_key = key_sequence.pop(0)
if len(key_sequence) == 0:
dictionary[top_key] = val
else:
if top_key not in dictionary:
dictionary[top_key] = {}
dict_set_recursively(dictionary[top_key], key_sequence, val)
def parse_value(value):
"""
Parse string as Python literal if possible and fallback to string.
"""
try:
return ast.literal_eval(value)
except (ValueError, SyntaxError):
# Use as string if nothing else worked
return value
def create_dict_from_args(args: list, sep: str = "."):
"""
Create a (nested) dictionary from console arguments.
Keys in different dictionary levels are separated by sep.
"""
return_dict = {}
for arg in args:
arg = arg.strip("--")
keys_concat, val = arg.split("=")
val = parse_value(val)
key_sequence = keys_concat.split(sep)
dict_set_recursively(return_dict, key_sequence, val)
return return_dict
def load_config(path: str, previous_includes: list = []):
path = Path(path)
if path in previous_includes:
raise ValueError(
f"Cyclic config include detected. {path} included in sequence {previous_includes}."
)
previous_includes = previous_includes + [path]
direct_config = yaml.safe_load(open(path, "r"))
# Load config from included files.
if "includes" in direct_config:
includes = direct_config.pop("includes")
else:
includes = []
if not isinstance(includes, list):
raise AttributeError(
"Includes must be a list, '{}' provided".format(type(includes))
)
config = {}
duplicates_warning = []
duplicates_error = []
for include in includes:
include_config, inc_dup_warning, inc_dup_error = load_config(
include, previous_includes
)
duplicates_warning += inc_dup_warning
duplicates_error += inc_dup_error
# Duplicates between includes causes an error
config, merge_dup_error = merge_dicts(config, include_config)
duplicates_error += merge_dup_error
# Duplicates between included and main file causes warnings
config, merge_dup_warning = merge_dicts(config, direct_config)
duplicates_warning += merge_dup_warning
return config, duplicates_warning, duplicates_error
def build_config(args, args_override):
config, duplicates_warning, duplicates_error = load_config(args.config_yml)
if len(duplicates_warning) > 0:
logging.warning(
f"Overwritten config parameters from included configs "
f"(non-included parameters take precedence): {duplicates_warning}"
)
if len(duplicates_error) > 0:
raise ValueError(
f"Conflicting (duplicate) parameters in simultaneously "
f"included configs: {duplicates_error}"
)
# Check for overridden parameters.
if args_override != []:
overrides = create_dict_from_args(args_override)
config, _ = merge_dicts(config, overrides)
# Some other flags.
config["mode"] = args.mode
config["identifier"] = args.identifier
config["timestamp_id"] = args.timestamp_id
config["seed"] = args.seed
config["is_debug"] = args.debug
config["run_dir"] = args.run_dir
config["print_every"] = args.print_every
config["amp"] = args.amp
config["checkpoint"] = args.checkpoint
config["cpu"] = args.cpu
# Submit
config["submit"] = args.submit
config["summit"] = args.summit
# Distributed
config["local_rank"] = args.local_rank
config["distributed_port"] = args.distributed_port
config["world_size"] = args.num_nodes * args.num_gpus
config["distributed_backend"] = args.distributed_backend
config["noddp"] = args.no_ddp
config["gp_gpus"] = args.gp_gpus
return config
def create_grid(base_config, sweep_file):
def _flatten_sweeps(sweeps, root_key: str = "", sep: str = "."):
flat_sweeps = []
for key, value in sweeps.items():
new_key = root_key + sep + key if root_key else key
if isinstance(value, collections.MutableMapping):
flat_sweeps.extend(_flatten_sweeps(value, new_key).items())
else:
flat_sweeps.append((new_key, value))
return collections.OrderedDict(flat_sweeps)
def _update_config(config, keys, override_vals, sep: str = "."):
for key, value in zip(keys, override_vals):
key_path = key.split(sep)
child_config = config
for name in key_path[:-1]:
child_config = child_config[name]
child_config[key_path[-1]] = value
return config
sweeps = yaml.safe_load(open(sweep_file, "r"))
flat_sweeps = _flatten_sweeps(sweeps)
keys = list(flat_sweeps.keys())
values = list(itertools.product(*flat_sweeps.values()))
configs = []
for i, override_vals in enumerate(values):
config = copy.deepcopy(base_config)
config = _update_config(config, keys, override_vals)
config["identifier"] = config["identifier"] + f"_run{i}"
configs.append(config)
return configs
def save_experiment_log(args, jobs, configs):
log_file = args.logdir / "exp" / time.strftime("%Y-%m-%d-%I-%M-%S%p.log")
log_file.parent.mkdir(exist_ok=True, parents=True)
with open(log_file, "w") as f:
for job, config in zip(jobs, configs):
print(
json.dumps(
{
"config": config,
"slurm_id": job.job_id,
"timestamp": time.strftime("%I:%M:%S%p %Z %b %d, %Y"),
}
),
file=f,
)
return log_file
def get_pbc_distances(
pos,
edge_index,
cell,
cell_offsets,
neighbors,
return_offsets=False,
return_distance_vec=False,
):
row, col = edge_index
distance_vectors = pos[row] - pos[col]
# correct for pbc
neighbors = neighbors.to(cell.device)
cell = torch.repeat_interleave(cell, neighbors, dim=0)
offsets = cell_offsets.float().view(-1, 1, 3).bmm(cell.float()).view(-1, 3)
distance_vectors += offsets
# compute distances
distances = distance_vectors.norm(dim=-1)
# redundancy: remove zero distances
nonzero_idx = torch.arange(len(distances), device=distances.device)[
distances != 0
]
edge_index = edge_index[:, nonzero_idx]
distances = distances[nonzero_idx]
out = {
"edge_index": edge_index,
"distances": distances,
}
if return_distance_vec:
out["distance_vec"] = distance_vectors[nonzero_idx]
if return_offsets:
out["offsets"] = offsets[nonzero_idx]
return out
def radius_graph_pbc(
data,
radius,
max_num_neighbors_threshold,
enforce_max_neighbors_strictly=False,
pbc=[True, True, True],
):
device = data.pos.device
batch_size = len(data.natoms)
if hasattr(data, "pbc"):
data.pbc = torch.atleast_2d(data.pbc)
for i in range(3):
if not torch.any(data.pbc[:, i]).item():
pbc[i] = False
elif torch.all(data.pbc[:, i]).item():
pbc[i] = True
else:
raise RuntimeError(
"Different structures in the batch have different PBC configurations. This is not currently supported."
)
# position of the atoms
atom_pos = data.pos
# Before computing the pairwise distances between atoms, first create a list of atom indices to compare for the entire batch
num_atoms_per_image = data.natoms
num_atoms_per_image_sqr = (num_atoms_per_image**2).long()
# index offset between images
index_offset = (
torch.cumsum(num_atoms_per_image, dim=0) - num_atoms_per_image
)
index_offset_expand = torch.repeat_interleave(
index_offset, num_atoms_per_image_sqr
)
num_atoms_per_image_expand = torch.repeat_interleave(
num_atoms_per_image, num_atoms_per_image_sqr
)
# Compute a tensor containing sequences of numbers that range from 0 to num_atoms_per_image_sqr for each image
# that is used to compute indices for the pairs of atoms. This is a very convoluted way to implement
# the following (but 10x faster since it removes the for loop)
# for batch_idx in range(batch_size):
# batch_count = torch.cat([batch_count, torch.arange(num_atoms_per_image_sqr[batch_idx], device=device)], dim=0)
num_atom_pairs = torch.sum(num_atoms_per_image_sqr)
index_sqr_offset = (
torch.cumsum(num_atoms_per_image_sqr, dim=0) - num_atoms_per_image_sqr
)
index_sqr_offset = torch.repeat_interleave(
index_sqr_offset, num_atoms_per_image_sqr
)
atom_count_sqr = (
torch.arange(num_atom_pairs, device=device) - index_sqr_offset
)
# Compute the indices for the pairs of atoms (using division and mod)
# If the systems get too large this apporach could run into numerical precision issues
index1 = (
torch.div(
atom_count_sqr, num_atoms_per_image_expand, rounding_mode="floor"
)
) + index_offset_expand
index2 = (
atom_count_sqr % num_atoms_per_image_expand
) + index_offset_expand
# Get the positions for each atom
pos1 = torch.index_select(atom_pos, 0, index1)
pos2 = torch.index_select(atom_pos, 0, index2)
# Calculate required number of unit cells in each direction.
# Smallest distance between planes separated by a1 is
# 1 / ||(a2 x a3) / V||_2, since a2 x a3 is the area of the plane.
# Note that the unit cell volume V = a1 * (a2 x a3) and that
# (a2 x a3) / V is also the reciprocal primitive vector
# (crystallographer's definition).
cross_a2a3 = torch.cross(data.cell[:, 1], data.cell[:, 2], dim=-1)
cell_vol = torch.sum(data.cell[:, 0] * cross_a2a3, dim=-1, keepdim=True)
if pbc[0]:
inv_min_dist_a1 = torch.norm(cross_a2a3 / cell_vol, p=2, dim=-1)
rep_a1 = torch.ceil(radius * inv_min_dist_a1)
else:
rep_a1 = data.cell.new_zeros(1)
if pbc[1]:
cross_a3a1 = torch.cross(data.cell[:, 2], data.cell[:, 0], dim=-1)
inv_min_dist_a2 = torch.norm(cross_a3a1 / cell_vol, p=2, dim=-1)
rep_a2 = torch.ceil(radius * inv_min_dist_a2)
else:
rep_a2 = data.cell.new_zeros(1)
if pbc[2]:
cross_a1a2 = torch.cross(data.cell[:, 0], data.cell[:, 1], dim=-1)
inv_min_dist_a3 = torch.norm(cross_a1a2 / cell_vol, p=2, dim=-1)
rep_a3 = torch.ceil(radius * inv_min_dist_a3)
else:
rep_a3 = data.cell.new_zeros(1)
# Take the max over all images for uniformity. This is essentially padding.
# Note that this can significantly increase the number of computed distances
# if the required repetitions are very different between images
# (which they usually are). Changing this to sparse (scatter) operations
# might be worth the effort if this function becomes a bottleneck.
max_rep = [rep_a1.max(), rep_a2.max(), rep_a3.max()]
# Tensor of unit cells
cells_per_dim = [
torch.arange(-rep, rep + 1, device=device, dtype=torch.float)
for rep in max_rep
]
unit_cell = torch.cartesian_prod(*cells_per_dim)
num_cells = len(unit_cell)
unit_cell_per_atom = unit_cell.view(1, num_cells, 3).repeat(
len(index2), 1, 1
)
unit_cell = torch.transpose(unit_cell, 0, 1)
unit_cell_batch = unit_cell.view(1, 3, num_cells).expand(
batch_size, -1, -1
)
# Compute the x, y, z positional offsets for each cell in each image
data_cell = torch.transpose(data.cell, 1, 2)
pbc_offsets = torch.bmm(data_cell, unit_cell_batch)
pbc_offsets_per_atom = torch.repeat_interleave(
pbc_offsets, num_atoms_per_image_sqr, dim=0
)
# Expand the positions and indices for the 9 cells
pos1 = pos1.view(-1, 3, 1).expand(-1, -1, num_cells)
pos2 = pos2.view(-1, 3, 1).expand(-1, -1, num_cells)
index1 = index1.view(-1, 1).repeat(1, num_cells).view(-1)
index2 = index2.view(-1, 1).repeat(1, num_cells).view(-1)
# Add the PBC offsets for the second atom
pos2 = pos2 + pbc_offsets_per_atom
# Compute the squared distance between atoms
atom_distance_sqr = torch.sum((pos1 - pos2) ** 2, dim=1)
atom_distance_sqr = atom_distance_sqr.view(-1)
# Remove pairs that are too far apart
mask_within_radius = torch.le(atom_distance_sqr, radius * radius)
# Remove pairs with the same atoms (distance = 0.0)
mask_not_same = torch.gt(atom_distance_sqr, 0.0001)
mask = torch.logical_and(mask_within_radius, mask_not_same)
index1 = torch.masked_select(index1, mask)
index2 = torch.masked_select(index2, mask)
unit_cell = torch.masked_select(
unit_cell_per_atom.view(-1, 3), mask.view(-1, 1).expand(-1, 3)
)
unit_cell = unit_cell.view(-1, 3)
atom_distance_sqr = torch.masked_select(atom_distance_sqr, mask)
mask_num_neighbors, num_neighbors_image = get_max_neighbors_mask(
natoms=data.natoms,
index=index1,
atom_distance=atom_distance_sqr,
max_num_neighbors_threshold=max_num_neighbors_threshold,
enforce_max_strictly=enforce_max_neighbors_strictly,
)
if not torch.all(mask_num_neighbors):
# Mask out the atoms to ensure each atom has at most max_num_neighbors_threshold neighbors
index1 = torch.masked_select(index1, mask_num_neighbors)
index2 = torch.masked_select(index2, mask_num_neighbors)
unit_cell = torch.masked_select(
unit_cell.view(-1, 3), mask_num_neighbors.view(-1, 1).expand(-1, 3)
)
unit_cell = unit_cell.view(-1, 3)
edge_index = torch.stack((index2, index1))
return edge_index, unit_cell, num_neighbors_image
def get_max_neighbors_mask(
natoms,
index,
atom_distance,
max_num_neighbors_threshold,
degeneracy_tolerance=0.01,
enforce_max_strictly=False,
):
"""
Give a mask that filters out edges so that each atom has at most
`max_num_neighbors_threshold` neighbors.
Assumes that `index` is sorted.
Enforcing the max strictly can force the arbitrary choice between
degenerate edges. This can lead to undesired behaviors; for
example, bulk formation energies which are not invariant to
unit cell choice.
A degeneracy tolerance can help prevent sudden changes in edge
existence from small changes in atom position, for example,
rounding errors, slab relaxation, temperature, etc.
"""
device = natoms.device
num_atoms = natoms.sum()
# Get number of neighbors
# segment_coo assumes sorted index
ones = index.new_ones(1).expand_as(index)
num_neighbors = segment_coo(ones, index, dim_size=num_atoms)
max_num_neighbors = num_neighbors.max()
num_neighbors_thresholded = num_neighbors.clamp(
max=max_num_neighbors_threshold
)
# Get number of (thresholded) neighbors per image
image_indptr = torch.zeros(
natoms.shape[0] + 1, device=device, dtype=torch.long
)
image_indptr[1:] = torch.cumsum(natoms, dim=0)
num_neighbors_image = segment_csr(num_neighbors_thresholded, image_indptr)
# If max_num_neighbors is below the threshold, return early
if (
max_num_neighbors <= max_num_neighbors_threshold
or max_num_neighbors_threshold <= 0
):
mask_num_neighbors = torch.tensor(
[True], dtype=bool, device=device
).expand_as(index)
return mask_num_neighbors, num_neighbors_image
# Create a tensor of size [num_atoms, max_num_neighbors] to sort the distances of the neighbors.
# Fill with infinity so we can easily remove unused distances later.
distance_sort = torch.full(
[num_atoms * max_num_neighbors], np.inf, device=device
)
# Create an index map to map distances from atom_distance to distance_sort
# index_sort_map assumes index to be sorted
index_neighbor_offset = torch.cumsum(num_neighbors, dim=0) - num_neighbors
index_neighbor_offset_expand = torch.repeat_interleave(
index_neighbor_offset, num_neighbors
)
index_sort_map = (
index * max_num_neighbors
+ torch.arange(len(index), device=device)
- index_neighbor_offset_expand
)
distance_sort.index_copy_(0, index_sort_map, atom_distance)
distance_sort = distance_sort.view(num_atoms, max_num_neighbors)
# Sort neighboring atoms based on distance
distance_sort, index_sort = torch.sort(distance_sort, dim=1)
# Select the max_num_neighbors_threshold neighbors that are closest
if enforce_max_strictly:
distance_sort = distance_sort[:, :max_num_neighbors_threshold]
index_sort = index_sort[:, :max_num_neighbors_threshold]
max_num_included = max_num_neighbors_threshold
else:
effective_cutoff = (
distance_sort[:, max_num_neighbors_threshold]
+ degeneracy_tolerance
)
is_included = torch.le(distance_sort.T, effective_cutoff)
# Set all undesired edges to infinite length to be removed later
distance_sort[~is_included.T] = np.inf
# Subselect tensors for efficiency
num_included_per_atom = torch.sum(is_included, dim=0)
max_num_included = torch.max(num_included_per_atom)
distance_sort = distance_sort[:, :max_num_included]
index_sort = index_sort[:, :max_num_included]
# Recompute the number of neighbors
num_neighbors_thresholded = num_neighbors.clamp(
max=num_included_per_atom
)
num_neighbors_image = segment_csr(
num_neighbors_thresholded, image_indptr
)
# Offset index_sort so that it indexes into index
index_sort = index_sort + index_neighbor_offset.view(-1, 1).expand(
-1, max_num_included
)
# Remove "unused pairs" with infinite distances
mask_finite = torch.isfinite(distance_sort)
index_sort = torch.masked_select(index_sort, mask_finite)
# At this point index_sort contains the index into index of the
# closest max_num_neighbors_threshold neighbors per atom
# Create a mask to remove all pairs not in index_sort
mask_num_neighbors = torch.zeros(len(index), device=device, dtype=bool)
mask_num_neighbors.index_fill_(0, index_sort, True)
return mask_num_neighbors, num_neighbors_image
def get_pruned_edge_idx(
edge_index, num_atoms: int, max_neigh: float = 1e9
) -> torch.Tensor:
assert num_atoms is not None # TODO: Shouldn't be necessary
# removes neighbors > max_neigh
# assumes neighbors are sorted in increasing distance
_nonmax_idx_list = []
for i in range(num_atoms):
idx_i = torch.arange(len(edge_index[1]))[(edge_index[1] == i)][
:max_neigh
]
_nonmax_idx_list.append(idx_i)
return torch.cat(_nonmax_idx_list)
def merge_dicts(dict1: dict, dict2: dict):
"""Recursively merge two dictionaries.
Values in dict2 override values in dict1. If dict1 and dict2 contain a dictionary as a
value, this will call itself recursively to merge these dictionaries.
This does not modify the input dictionaries (creates an internal copy).
Additionally returns a list of detected duplicates.
Adapted from https://github.com/TUM-DAML/seml/blob/master/seml/utils.py
Parameters
----------
dict1: dict
First dict.
dict2: dict
Second dict. Values in dict2 will override values from dict1 in case they share the same key.
Returns
-------
return_dict: dict
Merged dictionaries.
"""
if not isinstance(dict1, dict):
raise ValueError(f"Expecting dict1 to be dict, found {type(dict1)}.")
if not isinstance(dict2, dict):
raise ValueError(f"Expecting dict2 to be dict, found {type(dict2)}.")
return_dict = copy.deepcopy(dict1)
duplicates = []
for k, v in dict2.items():
if k not in dict1:
return_dict[k] = v
else:
if isinstance(v, dict) and isinstance(dict1[k], dict):
return_dict[k], duplicates_k = merge_dicts(dict1[k], dict2[k])
duplicates += [f"{k}.{dup}" for dup in duplicates_k]
else:
return_dict[k] = dict2[k]
duplicates.append(k)
return return_dict, duplicates
class SeverityLevelBetween(logging.Filter):
def __init__(self, min_level, max_level) -> None:
super().__init__()
self.min_level = min_level
self.max_level = max_level
def filter(self, record):
return self.min_level <= record.levelno < self.max_level
def setup_logging() -> None:
root = logging.getLogger()
# Perform setup only if logging has not been configured
if not root.hasHandlers():
root.setLevel(logging.INFO)
log_formatter = logging.Formatter(
"%(asctime)s (%(levelname)s): %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
# Send INFO to stdout
handler_out = logging.StreamHandler(sys.stdout)
handler_out.addFilter(
SeverityLevelBetween(logging.INFO, logging.WARNING)
)
handler_out.setFormatter(log_formatter)
root.addHandler(handler_out)
# Send WARNING (and higher) to stderr
handler_err = logging.StreamHandler(sys.stderr)
handler_err.setLevel(logging.WARNING)
handler_err.setFormatter(log_formatter)
root.addHandler(handler_err)
def compute_neighbors(data, edge_index):
# Get number of neighbors
# segment_coo assumes sorted index
ones = edge_index[1].new_ones(1).expand_as(edge_index[1])
num_neighbors = segment_coo(
ones, edge_index[1], dim_size=data.natoms.sum()
)
# Get number of neighbors per image
image_indptr = torch.zeros(
data.natoms.shape[0] + 1, device=data.pos.device, dtype=torch.long
)
image_indptr[1:] = torch.cumsum(data.natoms, dim=0)
neighbors = segment_csr(num_neighbors, image_indptr)
return neighbors
def check_traj_files(batch, traj_dir) -> bool:
if traj_dir is None:
return False
traj_dir = Path(traj_dir)
traj_files = [traj_dir / f"{id}.traj" for id in batch[0].sid.tolist()]
return all(fl.exists() for fl in traj_files)
@contextmanager
def new_trainer_context(*, config: Dict[str, Any], args: Namespace):
from ocpmodels.common import distutils, gp_utils
from ocpmodels.common.registry import registry
if TYPE_CHECKING:
from ocpmodels.tasks.task import BaseTask
from ocpmodels.trainers import BaseTrainer
@dataclass
class _TrainingContext:
config: Dict[str, Any]
task: "BaseTask"
trainer: "BaseTrainer"
setup_logging()
original_config = config
config = copy.deepcopy(original_config)
if args.distributed:
distutils.setup(config)
if config["gp_gpus"] is not None:
gp_utils.setup_gp(config)
try:
setup_imports(config)
trainer_cls = registry.get_trainer_class(
config.get("trainer", "energy")
)
assert trainer_cls is not None, "Trainer not found"
trainer = trainer_cls(
task=config["task"],
model=config["model"],
dataset=config["dataset"],
optimizer=config["optim"],
identifier=config["identifier"],
timestamp_id=config.get("timestamp_id", None),
run_dir=config.get("run_dir", "./"),
is_debug=config.get("is_debug", False),
print_every=config.get("print_every", 10),
seed=config.get("seed", 0),
logger=config.get("logger", "tensorboard"),
local_rank=config["local_rank"],
amp=config.get("amp", False),
cpu=config.get("cpu", False),
slurm=config.get("slurm", {}),
noddp=config.get("noddp", False),
)
task_cls = registry.get_task_class(config["mode"])
assert task_cls is not None, "Task not found"
task = task_cls(config)
start_time = time.time()
ctx = _TrainingContext(
config=original_config, task=task, trainer=trainer
)
yield ctx
distutils.synchronize()
if distutils.is_master():
logging.info(f"Total time taken: {time.time() - start_time}")
finally:
if args.distributed:
distutils.cleanup()
def _resolve_scale_factor_submodule(model: nn.Module, name: str):
from ocpmodels.modules.scaling.scale_factor import ScaleFactor
try:
scale = model.get_submodule(name)
if not isinstance(scale, ScaleFactor):
return None
return scale
except AttributeError:
return None
def _report_incompat_keys(
model: nn.Module,
keys: "_IncompatibleKeys",
strict: bool = False,
) -> Tuple[List[str], List[str]]:
# filter out the missing scale factor keys for the new scaling factor module
missing_keys: List[str] = []
for full_key_name in keys.missing_keys:
parent_module_name, _ = full_key_name.rsplit(".", 1)
scale_factor = _resolve_scale_factor_submodule(
model, parent_module_name
)
if scale_factor is not None:
continue
missing_keys.append(full_key_name)
# filter out unexpected scale factor keys that remain from the old scaling modules
unexpected_keys: List[str] = []
for full_key_name in keys.unexpected_keys:
parent_module_name, _ = full_key_name.rsplit(".", 1)
scale_factor = _resolve_scale_factor_submodule(
model, parent_module_name
)
if scale_factor is not None:
continue
unexpected_keys.append(full_key_name)
error_msgs = []
if len(unexpected_keys) > 0:
error_msgs.insert(
0,
"Unexpected key(s) in state_dict: {}. ".format(
", ".join('"{}"'.format(k) for k in unexpected_keys)
),
)
if len(missing_keys) > 0:
error_msgs.insert(
0,
"Missing key(s) in state_dict: {}. ".format(
", ".join('"{}"'.format(k) for k in missing_keys)
),
)
if len(error_msgs) > 0:
error_msg = "Error(s) in loading state_dict for {}:\n\t{}".format(
model.__class__.__name__, "\n\t".join(error_msgs)
)
if strict:
raise RuntimeError(error_msg)
else:
logging.warning(error_msg)
return missing_keys, unexpected_keys
def load_state_dict(
module: nn.Module,
state_dict: Mapping[str, torch.Tensor],
strict: bool = True,
) -> Tuple[List[str], List[str]]:
incompat_keys = module.load_state_dict(state_dict, strict=False) # type: ignore
return _report_incompat_keys(module, incompat_keys, strict=strict)
def scatter_det(*args, **kwargs):
from ocpmodels.common.registry import registry
if registry.get("set_deterministic_scatter", no_warning=True):
torch.use_deterministic_algorithms(mode=True)
out = scatter(*args, **kwargs)
if registry.get("set_deterministic_scatter", no_warning=True):
torch.use_deterministic_algorithms(mode=False)
return out
| 38,297 | 33.072954 | 128 | py |
ocp | ocp-main/ocpmodels/common/logger.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import logging
from abc import ABC, abstractmethod
import torch
import wandb
from torch.utils.tensorboard import SummaryWriter
from ocpmodels.common.registry import registry
class Logger(ABC):
"""Generic class to interface with various logging modules, e.g. wandb,
tensorboard, etc.
"""
def __init__(self, config) -> None:
self.config = config
@abstractmethod
def watch(self, model):
"""
Monitor parameters and gradients.
"""
pass
def log(self, update_dict, step=None, split: str = ""):
"""
Log some values.
"""
assert step is not None
if split != "":
new_dict = {}
for key in update_dict:
new_dict["{}/{}".format(split, key)] = update_dict[key]
update_dict = new_dict
return update_dict
@abstractmethod
def log_plots(self, plots):
pass
@abstractmethod
def mark_preempting(self):
pass
@registry.register_logger("wandb")
class WandBLogger(Logger):
def __init__(self, config) -> None:
super().__init__(config)
project = (
self.config["logger"].get("project", None)
if isinstance(self.config["logger"], dict)
else None
)
wandb.init(
config=self.config,
id=self.config["cmd"]["timestamp_id"],
name=self.config["cmd"]["identifier"],
dir=self.config["cmd"]["logs_dir"],
project=project,
resume="allow",
)
def watch(self, model) -> None:
wandb.watch(model)
def log(self, update_dict, step=None, split: str = "") -> None:
update_dict = super().log(update_dict, step, split)
wandb.log(update_dict, step=int(step))
def log_plots(self, plots, caption: str = "") -> None:
assert isinstance(plots, list)
plots = [wandb.Image(x, caption=caption) for x in plots]
wandb.log({"data": plots})
def mark_preempting(self) -> None:
wandb.mark_preempting()
@registry.register_logger("tensorboard")
class TensorboardLogger(Logger):
def __init__(self, config) -> None:
super().__init__(config)
self.writer = SummaryWriter(self.config["cmd"]["logs_dir"])
# TODO: add a model hook for watching gradients.
def watch(self, model) -> bool:
logging.warning(
"Model gradient logging to tensorboard not yet supported."
)
return False
def log(self, update_dict, step=None, split: str = ""):
update_dict = super().log(update_dict, step, split)
for key in update_dict:
if torch.is_tensor(update_dict[key]):
self.writer.add_scalar(key, update_dict[key].item(), step)
else:
assert isinstance(update_dict[key], int) or isinstance(
update_dict[key], float
)
self.writer.add_scalar(key, update_dict[key], step)
def mark_preempting(self):
pass
def log_plots(self, plots):
pass
| 3,275 | 27 | 75 | py |
ocp | ocp-main/ocpmodels/common/flags.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import argparse
from pathlib import Path
class Flags:
def __init__(self) -> None:
self.parser = argparse.ArgumentParser(
description="Graph Networks for Electrocatalyst Design"
)
self.add_core_args()
def get_parser(self) -> argparse.ArgumentParser:
return self.parser
def add_core_args(self) -> None:
self.parser.add_argument_group("Core Arguments")
self.parser.add_argument(
"--mode",
choices=["train", "predict", "run-relaxations", "validate"],
required=True,
help="Whether to train the model, make predictions, or to run relaxations",
)
self.parser.add_argument(
"--config-yml",
required=True,
type=Path,
help="Path to a config file listing data, model, optim parameters.",
)
self.parser.add_argument(
"--identifier",
default="",
type=str,
help="Experiment identifier to append to checkpoint/log/result directory",
)
self.parser.add_argument(
"--debug",
action="store_true",
help="Whether this is a debugging run or not",
)
self.parser.add_argument(
"--run-dir",
default="./",
type=str,
help="Directory to store checkpoint/log/result directory",
)
self.parser.add_argument(
"--print-every",
default=10,
type=int,
help="Log every N iterations (default: 10)",
)
self.parser.add_argument(
"--seed", default=0, type=int, help="Seed for torch, cuda, numpy"
)
self.parser.add_argument(
"--amp", action="store_true", help="Use mixed-precision training"
)
self.parser.add_argument(
"--checkpoint", type=str, help="Model checkpoint to load"
)
self.parser.add_argument(
"--timestamp-id",
default=None,
type=str,
help="Override time stamp ID. "
"Useful for seamlessly continuing model training in logger.",
)
# Cluster args
self.parser.add_argument(
"--sweep-yml",
default=None,
type=Path,
help="Path to a config file with parameter sweeps",
)
self.parser.add_argument(
"--submit", action="store_true", help="Submit job to cluster"
)
self.parser.add_argument(
"--summit", action="store_true", help="Running on Summit cluster"
)
self.parser.add_argument(
"--logdir", default="logs", type=Path, help="Where to store logs"
)
self.parser.add_argument(
"--slurm-partition",
default="ocp",
type=str,
help="Name of partition",
)
self.parser.add_argument(
"--slurm-mem", default=80, type=int, help="Memory (in gigabytes)"
)
self.parser.add_argument(
"--slurm-timeout", default=72, type=int, help="Time (in hours)"
)
self.parser.add_argument(
"--num-gpus", default=1, type=int, help="Number of GPUs to request"
)
self.parser.add_argument(
"--distributed", action="store_true", help="Run with DDP"
)
self.parser.add_argument(
"--cpu", action="store_true", help="Run CPU only training"
)
self.parser.add_argument(
"--num-nodes",
default=1,
type=int,
help="Number of Nodes to request",
)
self.parser.add_argument(
"--distributed-port",
type=int,
default=13356,
help="Port on master for DDP",
)
self.parser.add_argument(
"--distributed-backend",
type=str,
default="nccl",
help="Backend for DDP",
)
self.parser.add_argument(
"--local_rank", default=0, type=int, help="Local rank"
)
self.parser.add_argument(
"--no-ddp", action="store_true", help="Do not use DDP"
)
self.parser.add_argument(
"--gp-gpus",
type=int,
default=None,
help="Number of GPUs to split the graph over (only for Graph Parallel training)",
)
flags = Flags()
| 4,659 | 31.137931 | 93 | py |
ocp | ocp-main/ocpmodels/common/__init__.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
| 178 | 24.571429 | 63 | py |
ocp | ocp-main/ocpmodels/common/gp_utils.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import math
from typing import Any, Optional
import torch
from torch import distributed as dist
"""
Functions to support graph parallel training.
This is based on the Megatron-LM implementation:
https://github.com/facebookresearch/fairscale/blob/main/fairscale/nn/model_parallel/initialize.py
"""
########## INITIALIZATION ##########
_GRAPH_PARALLEL_GROUP = None
_DATA_PARALLEL_GROUP = None
def ensure_div(a: int, b: int) -> None:
assert a % b == 0
def divide_and_check_no_remainder(a: int, b: int) -> int:
ensure_div(a, b)
return a // b
def setup_gp(config) -> None:
gp_size = config["gp_gpus"]
backend = config["distributed_backend"]
assert torch.distributed.is_initialized()
world_size = torch.distributed.get_world_size()
gp_size = min(gp_size, world_size)
ensure_div(world_size, gp_size)
dp_size = world_size // gp_size
rank = dist.get_rank()
if rank == 0:
print("> initializing graph parallel with size {}".format(gp_size))
print("> initializing ddp with size {}".format(dp_size))
groups = torch.arange(world_size).reshape(dp_size, gp_size)
found = [x.item() for x in torch.where(groups == rank)]
global _DATA_PARALLEL_GROUP
assert (
_DATA_PARALLEL_GROUP is None
), "data parallel group is already initialized"
for j in range(gp_size):
group = dist.new_group(groups[:, j].tolist(), backend=backend)
if j == found[1]:
_DATA_PARALLEL_GROUP = group
global _GRAPH_PARALLEL_GROUP
assert (
_GRAPH_PARALLEL_GROUP is None
), "graph parallel group is already initialized"
for i in range(dp_size):
group = dist.new_group(groups[i, :].tolist(), backend=backend)
if i == found[0]:
_GRAPH_PARALLEL_GROUP = group
def cleanup_gp() -> None:
dist.destroy_process_group(_DATA_PARALLEL_GROUP)
dist.destroy_process_group(_GRAPH_PARALLEL_GROUP)
def initialized() -> bool:
return _GRAPH_PARALLEL_GROUP is not None
def get_dp_group():
return _DATA_PARALLEL_GROUP
def get_gp_group():
return _GRAPH_PARALLEL_GROUP
def get_dp_rank() -> int:
return dist.get_rank(group=get_dp_group())
def get_gp_rank() -> int:
return dist.get_rank(group=get_gp_group())
def get_dp_world_size() -> int:
return dist.get_world_size(group=get_dp_group())
def get_gp_world_size() -> int:
return (
1 if not initialized() else dist.get_world_size(group=get_gp_group())
)
########## DIST METHODS ##########
def pad_tensor(
tensor: torch.Tensor, dim: int = -1, target_size: Optional[int] = None
) -> torch.Tensor:
size = tensor.size(dim)
if target_size is None:
world_size = get_gp_world_size()
if size % world_size == 0:
pad_size = 0
else:
pad_size = world_size - size % world_size
else:
pad_size = target_size - size
if pad_size == 0:
return tensor
pad_shape = list(tensor.shape)
pad_shape[dim] = pad_size
padding = torch.empty(pad_shape, device=tensor.device, dtype=tensor.dtype)
return torch.cat([tensor, padding], dim=dim)
def trim_tensor(
tensor: torch.Tensor, sizes: Optional[torch.Tensor] = None, dim: int = 0
):
size = tensor.size(dim)
world_size = get_gp_world_size()
if size % world_size == 0:
return tensor, sizes
trim_size = size - size % world_size
if dim == 0:
tensor = tensor[:trim_size]
elif dim == 1:
tensor = tensor[:, :trim_size]
else:
raise ValueError
if sizes is not None:
sizes[-1] = sizes[-1] - size % world_size
return tensor, sizes
def _split_tensor(
tensor: torch.Tensor,
num_parts: int,
dim: int = -1,
contiguous_chunks: bool = False,
):
part_size = math.ceil(tensor.size(dim) / num_parts)
tensor_list = torch.split(tensor, part_size, dim=dim)
if contiguous_chunks:
return tuple(chunk.contiguous() for chunk in tensor_list)
return tensor_list
def _reduce(ctx: Any, input: torch.Tensor) -> torch.Tensor:
group = get_gp_group()
if ctx:
ctx.mark_dirty(input)
if dist.get_world_size(group) == 1:
return input
dist.all_reduce(input, group=group)
return input
def _split(input: torch.Tensor, dim: int = -1) -> torch.Tensor:
group = get_gp_group()
rank = get_gp_rank()
world_size = dist.get_world_size(group=group)
if world_size == 1:
return input
input_list = _split_tensor(input, world_size, dim=dim)
return input_list[rank].contiguous()
def _gather(input: torch.Tensor, dim: int = -1) -> torch.Tensor:
group = get_gp_group()
rank = get_gp_rank()
world_size = dist.get_world_size(group=group)
if world_size == 1:
return input
tensor_list = [torch.empty_like(input) for _ in range(world_size)]
tensor_list[rank] = input
dist.all_gather(tensor_list, input, group=group)
return torch.cat(tensor_list, dim=dim).contiguous()
def _gather_with_padding(input: torch.Tensor, dim: int = -1) -> torch.Tensor:
group = get_gp_group()
rank = get_gp_rank()
world_size = dist.get_world_size(group=group)
if world_size == 1:
return input
# Gather sizes
size_list = [
torch.empty(1, device=input.device, dtype=torch.long)
for _ in range(world_size)
]
size = torch.tensor(
[input.size(dim)], device=input.device, dtype=torch.long
)
size_list[rank] = size
dist.all_gather(size_list, size, group=group)
# Gather the inputs
max_size = int(max([size.item() for size in size_list]))
input = pad_tensor(input, dim, max_size)
shape = list(input.shape)
shape[dim] = max_size
tensor_list = [
torch.empty(shape, device=input.device, dtype=input.dtype)
for _ in range(world_size)
]
tensor_list[rank] = input
dist.all_gather(tensor_list, input, group=group)
# Trim and cat
if dim == 0:
tensor_list = [
tensor[:size] for tensor, size in zip(tensor_list, size_list)
]
elif dim == 1:
tensor_list = [
tensor[:, :size] for tensor, size in zip(tensor_list, size_list)
]
else:
raise ValueError
return torch.cat(tensor_list, dim=dim).contiguous()
class CopyToModelParallelRegion(torch.autograd.Function):
@staticmethod
def forward(ctx, input: torch.Tensor) -> torch.Tensor:
return input
@staticmethod
def backward(ctx, grad_output: torch.Tensor) -> torch.Tensor:
return _reduce(None, grad_output)
class ReduceFromModelParallelRegion(torch.autograd.Function):
@staticmethod
def forward(ctx, input: torch.Tensor) -> torch.Tensor:
return _reduce(ctx, input)
@staticmethod
def backward(ctx, grad_output: torch.Tensor) -> torch.Tensor:
world_size = 1
return grad_output.mul_(world_size)
class ScatterToModelParallelRegion(torch.autograd.Function):
@staticmethod
def forward(ctx, input: torch.Tensor, dim: int = -1) -> torch.Tensor:
result = _split(input, dim)
ctx.save_for_backward(torch.tensor(dim))
return result
@staticmethod
def backward(ctx, grad_output: torch.Tensor):
(dim,) = ctx.saved_tensors
world_size = 1
return (
_gather_with_padding(grad_output, dim.item()).div_(world_size),
None,
)
class GatherFromModelParallelRegion(torch.autograd.Function):
@staticmethod
def forward(ctx, input: torch.Tensor, dim: int = -1) -> torch.Tensor:
ctx.save_for_backward(torch.tensor(dim))
result = _gather_with_padding(input, dim)
return result
@staticmethod
def backward(ctx, grad_output: torch.Tensor):
(dim,) = ctx.saved_tensors
result = _split(grad_output, dim.item())
world_size = 1
return result.mul_(world_size), None
def copy_to_model_parallel_region(input: torch.Tensor) -> torch.Tensor:
return CopyToModelParallelRegion.apply(input)
def reduce_from_model_parallel_region(input: torch.Tensor) -> torch.Tensor:
return ReduceFromModelParallelRegion.apply(input)
def scatter_to_model_parallel_region(
input: torch.Tensor, dim: int = -1
) -> torch.Tensor:
return ScatterToModelParallelRegion.apply(input, dim)
def gather_from_model_parallel_region(
input: torch.Tensor, dim: int = -1
) -> torch.Tensor:
return GatherFromModelParallelRegion.apply(input, dim)
| 8,667 | 27.234528 | 97 | py |
ocp | ocp-main/ocpmodels/common/transforms.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
# Borrowed from https://github.com/rusty1s/pytorch_geometric/blob/master/torch_geometric/transforms/random_rotate.py
# with changes to keep track of the rotation / inverse rotation matrices.
import math
import numbers
import random
from typing import List
import torch
import torch_geometric
from torch_geometric.transforms import LinearTransformation
class RandomRotate:
r"""Rotates node positions around a specific axis by a randomly sampled
factor within a given interval.
Args:
degrees (tuple or float): Rotation interval from which the rotation
angle is sampled. If `degrees` is a number instead of a
tuple, the interval is given by :math:`[-\mathrm{degrees},
\mathrm{degrees}]`.
axes (int, optional): The rotation axes. (default: `[0, 1, 2]`)
"""
def __init__(self, degrees, axes: List[int] = [0, 1, 2]) -> None:
if isinstance(degrees, numbers.Number):
degrees = (-abs(degrees), abs(degrees))
assert isinstance(degrees, (tuple, list)) and len(degrees) == 2
self.degrees = degrees
self.axes = axes
def __call__(self, data):
if data.pos.size(-1) == 2:
degree = math.pi * random.uniform(*self.degrees) / 180.0
sin, cos = math.sin(degree), math.cos(degree)
matrix = [[cos, sin], [-sin, cos]]
else:
m1, m2, m3 = torch.eye(3), torch.eye(3), torch.eye(3)
if 0 in self.axes:
degree = math.pi * random.uniform(*self.degrees) / 180.0
sin, cos = math.sin(degree), math.cos(degree)
m1 = torch.tensor([[1, 0, 0], [0, cos, sin], [0, -sin, cos]])
if 1 in self.axes:
degree = math.pi * random.uniform(*self.degrees) / 180.0
sin, cos = math.sin(degree), math.cos(degree)
m2 = torch.tensor([[cos, 0, -sin], [0, 1, 0], [sin, 0, cos]])
if 2 in self.axes:
degree = math.pi * random.uniform(*self.degrees) / 180.0
sin, cos = math.sin(degree), math.cos(degree)
m3 = torch.tensor([[cos, sin, 0], [-sin, cos, 0], [0, 0, 1]])
matrix = torch.mm(torch.mm(m1, m2), m3)
data_rotated = LinearTransformation(matrix)(data)
if torch_geometric.__version__.startswith("2."):
matrix = matrix.T
# LinearTransformation only rotates `.pos`; need to rotate `.cell` too.
if hasattr(data_rotated, "cell"):
data_rotated.cell = torch.matmul(data_rotated.cell, matrix)
return (
data_rotated,
matrix,
torch.inverse(matrix),
)
def __repr__(self) -> str:
return "{}({}, axis={})".format(
self.__class__.__name__, self.degrees, self.axis
)
| 3,003 | 36.55 | 116 | py |
ocp | ocp-main/ocpmodels/common/hpo_utils.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import math
from ray import tune
def tune_reporter(
iters,
train_metrics,
val_metrics,
test_metrics=None,
metric_to_opt: str = "val_loss",
min_max: str = "min",
) -> None:
"""
Wrapper function for tune.report()
Args:
iters(dict): dict with training iteration info (e.g. steps, epochs)
train_metrics(dict): train metrics dict
val_metrics(dict): val metrics dict
test_metrics(dict, optional): test metrics dict, default is None
metric_to_opt(str, optional): str for val metric to optimize, default is val_loss
min_max(str, optional): either "min" or "max", determines whether metric_to_opt is to be minimized or maximized, default is min
"""
# labels metric dicts
train = label_metric_dict(train_metrics, "train")
val = label_metric_dict(val_metrics, "val")
# this enables tolerance for NaNs assumes val set is used for optimization
if math.isnan(val[metric_to_opt]):
if min_max == "min":
val[metric_to_opt] = 100000.0
if min_max == "max":
val[metric_to_opt] = 0.0
if test_metrics:
test = label_metric_dict(test_metrics, "test")
else:
test = {}
# report results to Ray Tune
tune.report(**iters, **train, **val, **test)
def label_metric_dict(metric_dict, split):
new_dict = {}
for key in metric_dict:
new_dict["{}_{}".format(split, key)] = metric_dict[key]
metric_dict = new_dict
return metric_dict
| 1,686 | 29.125 | 135 | py |
ocp | ocp-main/ocpmodels/common/relaxation/ase_utils.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
Utilities to interface OCP models/trainers with the Atomic Simulation
Environment (ASE)
"""
import copy
import logging
import os
import torch
import yaml
from ase import Atoms
from ase.calculators.calculator import Calculator
from ase.calculators.singlepoint import SinglePointCalculator as sp
from ase.constraints import FixAtoms
from ocpmodels.common.registry import registry
from ocpmodels.common.utils import (
radius_graph_pbc,
setup_imports,
setup_logging,
)
from ocpmodels.datasets import data_list_collater
from ocpmodels.preprocessing import AtomsToGraphs
def batch_to_atoms(batch):
n_systems = batch.natoms.shape[0]
natoms = batch.natoms.tolist()
numbers = torch.split(batch.atomic_numbers, natoms)
fixed = torch.split(batch.fixed, natoms)
forces = torch.split(batch.force, natoms)
positions = torch.split(batch.pos, natoms)
tags = torch.split(batch.tags, natoms)
cells = batch.cell
energies = batch.y.tolist()
atoms_objects = []
for idx in range(n_systems):
atoms = Atoms(
numbers=numbers[idx].tolist(),
positions=positions[idx].cpu().detach().numpy(),
tags=tags[idx].tolist(),
cell=cells[idx].cpu().detach().numpy(),
constraint=FixAtoms(mask=fixed[idx].tolist()),
pbc=[True, True, True],
)
calc = sp(
atoms=atoms,
energy=energies[idx],
forces=forces[idx].cpu().detach().numpy(),
)
atoms.set_calculator(calc)
atoms_objects.append(atoms)
return atoms_objects
class OCPCalculator(Calculator):
implemented_properties = ["energy", "forces"]
def __init__(
self,
config_yml=None,
checkpoint=None,
trainer=None,
cutoff=6,
max_neighbors=50,
cpu=True,
) -> None:
"""
OCP-ASE Calculator
Args:
config_yml (str):
Path to yaml config or could be a dictionary.
checkpoint (str):
Path to trained checkpoint.
trainer (str):
OCP trainer to be used. "forces" for S2EF, "energy" for IS2RE.
cutoff (int):
Cutoff radius to be used for data preprocessing.
max_neighbors (int):
Maximum amount of neighbors to store for a given atom.
cpu (bool):
Whether to load and run the model on CPU. Set `False` for GPU.
"""
setup_imports()
setup_logging()
Calculator.__init__(self)
# Either the config path or the checkpoint path needs to be provided
assert config_yml or checkpoint is not None
if config_yml is not None:
if isinstance(config_yml, str):
config = yaml.safe_load(open(config_yml, "r"))
if "includes" in config:
for include in config["includes"]:
# Change the path based on absolute path of config_yml
path = os.path.join(
config_yml.split("configs")[0], include
)
include_config = yaml.safe_load(open(path, "r"))
config.update(include_config)
else:
config = config_yml
# Only keeps the train data that might have normalizer values
if isinstance(config["dataset"], list):
config["dataset"] = config["dataset"][0]
elif isinstance(config["dataset"], dict):
config["dataset"] = config["dataset"].get("train", None)
else:
# Loads the config from the checkpoint directly (always on CPU).
config = torch.load(checkpoint, map_location=torch.device("cpu"))[
"config"
]
if trainer is not None: # passing the arg overrides everything else
config["trainer"] = trainer
else:
if "trainer" not in config: # older checkpoint
if config["task"]["dataset"] == "trajectory_lmdb":
config["trainer"] = "forces"
elif config["task"]["dataset"] == "single_point_lmdb":
config["trainer"] = "energy"
else:
logging.warning(
"Unable to identify OCP trainer, defaulting to `forces`. Specify the `trainer` argument into OCPCalculator if otherwise."
)
config["trainer"] = "forces"
if "model_attributes" in config:
config["model_attributes"]["name"] = config.pop("model")
config["model"] = config["model_attributes"]
# for checkpoints with relaxation datasets defined, remove to avoid
# unnecesarily trying to load that dataset
if "relax_dataset" in config["task"]:
del config["task"]["relax_dataset"]
# Calculate the edge indices on the fly
config["model"]["otf_graph"] = True
# Save config so obj can be transported over network (pkl)
self.config = copy.deepcopy(config)
self.config["checkpoint"] = checkpoint
if "normalizer" not in config:
del config["dataset"]["src"]
config["normalizer"] = config["dataset"]
self.trainer = registry.get_trainer_class(
config.get("trainer", "energy")
)(
task=config["task"],
model=config["model"],
dataset=None,
normalizer=config["normalizer"],
optimizer=config["optim"],
identifier="",
slurm=config.get("slurm", {}),
local_rank=config.get("local_rank", 0),
is_debug=config.get("is_debug", True),
cpu=cpu,
)
if checkpoint is not None:
self.load_checkpoint(checkpoint)
self.a2g = AtomsToGraphs(
max_neigh=max_neighbors,
radius=cutoff,
r_energy=False,
r_forces=False,
r_distances=False,
r_edges=False,
r_pbc=True,
)
def load_checkpoint(self, checkpoint_path: str) -> None:
"""
Load existing trained model
Args:
checkpoint_path: string
Path to trained model
"""
try:
self.trainer.load_checkpoint(checkpoint_path)
except NotImplementedError:
logging.warning("Unable to load checkpoint!")
def calculate(self, atoms, properties, system_changes) -> None:
Calculator.calculate(self, atoms, properties, system_changes)
data_object = self.a2g.convert(atoms)
batch = data_list_collater([data_object], otf_graph=True)
predictions = self.trainer.predict(
batch, per_image=False, disable_tqdm=True
)
if self.trainer.name == "s2ef":
self.results["energy"] = predictions["energy"].item()
self.results["forces"] = predictions["forces"].cpu().numpy()
elif self.trainer.name == "is2re":
self.results["energy"] = predictions["energy"].item()
| 7,362 | 33.406542 | 145 | py |
ocp | ocp-main/ocpmodels/common/relaxation/__init__.py | 0 | 0 | 0 | py |
|
ocp | ocp-main/ocpmodels/common/relaxation/ml_relaxation.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import logging
from collections import deque
from pathlib import Path
import torch
from torch_geometric.data import Batch
from ocpmodels.common.registry import registry
from ocpmodels.datasets.lmdb_dataset import data_list_collater
from .optimizers.lbfgs_torch import LBFGS, TorchCalc
def ml_relax(
batch,
model,
steps,
fmax,
relax_opt,
save_full_traj,
device="cuda:0",
transform=None,
early_stop_batch=False,
):
"""
Runs ML-based relaxations.
Args:
batch: object
model: object
steps: int
Max number of steps in the structure relaxation.
fmax: float
Structure relaxation terminates when the max force
of the system is no bigger than fmax.
relax_opt: str
Optimizer and corresponding parameters to be used for structure relaxations.
save_full_traj: bool
Whether to save out the full ASE trajectory. If False, only save out initial and final frames.
"""
batches = deque([batch[0]])
relaxed_batches = []
while batches:
batch = batches.popleft()
oom = False
ids = batch.sid
calc = TorchCalc(model, transform)
# Run ML-based relaxation
traj_dir = relax_opt.get("traj_dir", None)
optimizer = LBFGS(
batch,
calc,
maxstep=relax_opt.get("maxstep", 0.04),
memory=relax_opt["memory"],
damping=relax_opt.get("damping", 1.0),
alpha=relax_opt.get("alpha", 70.0),
device=device,
save_full_traj=save_full_traj,
traj_dir=Path(traj_dir) if traj_dir is not None else None,
traj_names=ids,
early_stop_batch=early_stop_batch,
)
try:
relaxed_batch = optimizer.run(fmax=fmax, steps=steps)
relaxed_batches.append(relaxed_batch)
except RuntimeError as e:
oom = True
torch.cuda.empty_cache()
if oom:
# move OOM recovery code outside of except clause to allow tensors to be freed.
data_list = batch.to_data_list()
if len(data_list) == 1:
raise e
logging.info(
f"Failed to relax batch with size: {len(data_list)}, splitting into two..."
)
mid = len(data_list) // 2
batches.appendleft(data_list_collater(data_list[:mid]))
batches.appendleft(data_list_collater(data_list[mid:]))
relaxed_batch = Batch.from_data_list(relaxed_batches)
return relaxed_batch
| 2,786 | 29.626374 | 106 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.