python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
# NB: IMPORT utils FIRST SO THAT MATPLOTLIB DOESN'T GET MESSED UP!!!
from utils import (
generate_data, save_data_plot, OracleDiscriminator
)
from experiments.tfs.image import *
from experiments.train_scripts import (
flags, train_tan, train_end_model, assemble_tan
)
from experiments.utils import get_log_dir_path
from functools import partial
from tanda.discriminator import SimpleDiscriminator
from tanda.transformer import Transformer
#####################################################################
# Additional TF input flags
flags.DEFINE_integer("tfs", 1, "TF set to use")
# Synthetic datset generation
flags.DEFINE_integer("synthetic_n", 1000,
"Number of training points to generate")
flags.DEFINE_integer("synthetic_dim", 2, "Dimension of synthetic data")
flags.DEFINE_float("synthetic_r", 1.0,
"Radius of ball around origin in which data is uniformly generated")
# Using an oracle discriminator
flags.DEFINE_boolean("oracle_disc", False,
"Optionally use a perfect discriminator, for testing")
FLAGS = flags.FLAGS
#####################################################################
#####################################################################
# Transformation functions
tf_sets = []
d = FLAGS.synthetic_dim
### TF SET 1: Small vs. Large
def TF_displace(x, d=0):
"""Displace point by vector d"""
return x + d
small_disps = [0.2 * (np.random.random(d) - 0.5) for _ in range(10)]
large_disps = [4.0 * (np.random.random(d) - 0.5) for _ in range(5)]
if FLAGS.is_test and FLAGS.tfs == 0:
for disp in small_disps + large_disps:
print(disp)
tfs_1 = [partial(TF_displace, d=disp) for disp in small_disps + large_disps]
tf_sets.append(tfs_1)
### TF SET 2: Medium displacements along the main axes
# Note that:
# * Two or more displacements along same axis = bad
# * We will have FLAGS.synthetic_dim number of displacements, each with
# magnitude 0.75
# * Should set sequence_length to be = FLAGS.synthetic_dim
medium_disps = []
for i in range(d):
x = np.zeros(d)
x[i] = 0.75
medium_disps.append(x)
tfs_2 = [partial(TF_displace, d=disp) for disp in medium_disps]
tf_sets.append(tfs_2)
### TF SET 3: Non-commuting displacements
def TF_displace_decay(x, d=0, r=2.0):
"""Displace point by decay * d, where decay = min(1, r / |x|^2)"""
return x + min(1.0, r / np.linalg.norm(x)**2) * d
medium_disps_2 = []
for i in range(d):
x = np.zeros(d)
#x[i] = np.random.random() + 0.5
x[i] = 0.75
medium_disps_2.append(x)
medium_disps_2.append(-np.copy(x))
tfs_3 = [partial(TF_displace_decay, d=disp) for disp in medium_disps_2]
tf_sets.append(tfs_3)
### TF SET 4: Uniform-magnitude random vectors + non-commuting null zone
def TF_displace_stuck(x, d=0, r=1.5):
return x + d if np.linalg.norm(x) < r else x
r = 0.33
vecs = []
for _ in range(10):
# Pick an angle uniformly
theta = 2 * np.pi * np.random.random()
vecs.append(np.array([r * np.cos(theta), r * np.sin(theta)]))
tfs_4 = [partial(TF_displace_stuck, d=v) for v in vecs]
tf_sets.append(tfs_4)
tfs = tf_sets[FLAGS.tfs - 1]
#####################################################################
if __name__ == '__main__':
# Create log path: Create at this level (or one above, in launch script)
# so that we use same one for both steps
log_path = FLAGS.log_path if FLAGS.log_path is not None else \
get_log_dir_path(FLAGS.log_root, FLAGS.run_name)
# Note that the flags in this file control the dataset size, not the
# normal flags in train_scripts.py!
dims = [FLAGS.synthetic_dim]
if FLAGS.subsample_seed > 0:
np.random.seed(FLAGS.subsample_seed)
X = generate_data(
FLAGS.synthetic_n, d=FLAGS.synthetic_dim, r=FLAGS.synthetic_r)
# For testing, also include a discriminator which is perfectly correct
if FLAGS.oracle_disc:
d_class = OracleDiscriminator
else:
d_class = SimpleDiscriminator
###
### STEP 1: TRAIN TAN
###
if FLAGS.is_test:
print("STEP 1: Training TAN")
train_tan(X, dims, tfs, log_path, d_class=d_class,
t_class=Transformer, plotter=save_data_plot)
|
tanda-master
|
experiments/synthetic/train.py
|
tanda-master
|
experiments/cifar10/__init__.py
|
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import os
import six
from functools import partial
from six.moves import cPickle
from skimage import img_as_float
def load_cifar10_batch(fpath, one_hot=True, as_float=True):
with open(fpath, 'rb') as f:
# https://stackoverflow.com/questions/11305790
if six.PY3:
data = cPickle.load(f, encoding='latin-1')
else:
data = cPickle.load(f)
X = np.copy(data['data']).reshape(-1, 32*32, 3, order='F')
X = X.reshape(-1, 32, 32, 3)
Y = np.array(data['labels'])
# Convert labels to one hot
if one_hot:
Y = to_one_hot(Y)
# CONVERT TO FLOAT [0,1] TYPE HERE to be consistent with skimage TFs!!!
# See: http://scikit-image.org/docs/dev/user_guide/data_types.html
if as_float:
X = img_as_float(X)
return X, Y
def to_one_hot(y, n_classes=10):
Y = np.zeros([y.shape[0], n_classes])
for i in range(y.shape[0]):
Y[i, y[i]] = 1
return Y
def load_cifar10_data(data_root, one_hot=True, as_float=True,
validation_set=True):
"""Load training (first 4 batches), validation (5th batch), and test set.
If validation_set=False, combines training and validation sets, and returns
test set as both validation and test.
"""
# Apply loading format uniformly
load_batch = partial(load_cifar10_batch, one_hot=one_hot, as_float=as_float)
# Load training data
X_train, Y_train = [], []
train_batches = 4 if validation_set else 5
for i in range(train_batches):
X, Y = load_batch(os.path.join(data_root, 'data_batch_%s' % (i+1,)))
X_train.append(X)
Y_train.append(Y)
X_train = np.vstack(X_train)
Y_train = np.concatenate(Y_train)
# Load test data
X_test, Y_test = load_batch(os.path.join(data_root, 'test_batch'))
# Load validation data
if validation_set:
X_valid, Y_valid = load_batch(os.path.join(data_root, 'data_batch_5'))
else:
X_valid, Y_valid = X_test, Y_test
return X_train, Y_train, X_valid, Y_valid, X_test, Y_test
|
tanda-master
|
experiments/cifar10/dataset.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from dataset import load_cifar10_data
from experiments.train_scripts import flags, select_fold, train
from experiments.tfs.image import *
from functools import partial
from itertools import chain
#####################################################################
flags.DEFINE_boolean("validation_set", True,
"If False, use validation set as part of training set")
FLAGS = flags.FLAGS
#####################################################################
#####################################################################
# Transformation functions
tfs = list(chain.from_iterable([
[partial(TF_rotate, angle=p) for p in [2.5, -2.5, 5, -5]],
[partial(TF_zoom, scale=p) for p in [0.9, 1.1, 0.75, 1.25]],
[partial(TF_shear, shear=p) for p in [0.1, -0.1, 0.25, -0.25]],
[partial(TF_swirl, strength=p) for p in [0.1, -0.1, 0.25, -0.25]],
[partial(TF_shift_hue, shift=p) for p in [0.1, -0.1, 0.25, -0.25]],
[partial(TF_enhance_contrast, p=p) for p in [0.75, 1.25, 0.5, 1.5]],
[partial(TF_enhance_brightness, p=p) for p in [0.75, 1.25, 0.5, 1.5]],
[partial(TF_enhance_color, p=p) for p in [0.75, 1.25, 0.5, 1.5]],
[TF_horizontal_flip]
]))
#####################################################################
if __name__ == '__main__':
# Load CIFAR10 data
dims = [32, 32, 3]
DATA_DIR = 'experiments/cifar10/data/cifar-10-batches-py'
X_train, Y_train, X_valid, Y_valid, X_test, Y_test = load_cifar10_data(
DATA_DIR, validation_set=FLAGS.validation_set)
if FLAGS.n_folds > 0:
X_train, Y_train = select_fold(X_train, Y_train)
# Run training scripts
train(X_train, dims, tfs, Y_train=Y_train, X_valid=X_valid, Y_valid=Y_valid,
n_classes=10)
|
tanda-master
|
experiments/cifar10/train.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import tensorflow as tf
from .discriminator import DCNN
ADAM = tf.train.AdamOptimizer
SGD = tf.train.GradientDescentOptimizer
def get_mse_loss(mse, mse_term, eps=1e-6, mean=False):
z = mse_term * (1.0 / (mse + eps))
return tf.reduce_mean(z) if mean else z
def per_image_std_map(X, img_dims, dims_out):
imgs = tf.reshape(X, [-1] + img_dims)
imgs = tf.map_fn(tf.image.per_image_standardization, imgs)
return tf.reshape(imgs, dims_out)
class TFQ(object):
def __init__(self, generator, size):
self.generator = generator
self.size = size
self._q = None
self._i = size
def init_q(self, session):
self._q = self.generator.get_action_sequence(session, self.size)
self._i = 0
def next(self, session):
if self._i >= self.size:
self.init_q(session)
self._i += 1
return self._q[self._i - 1, :]
class TAN(object):
"""Transormation Adversarial Network"""
def __init__(self, discriminator, generator, transformer, d_lr, g_lr,
mse_term=1.0, mse_layer=None, d_trainer=ADAM, g_trainer=SGD,
reuse=False, gamma=0.0, per_img_std=False, train_disc=True,
tf_seq_queue_size=None):
self.discriminator = discriminator
self.generator = generator
self.transformer = transformer
# Note that we inherit the data dimensions from the discriminator
self.dims = self.discriminator.dims
self.d = np.prod(self.dims) # Flattened size
# We can optionally not train the disc e.g. if using an oracle disc
self.train_disc = train_disc
# Build training operations
self.d_train_op = None
self.g_train_op = None
self.batch_size = self.generator.batch_size
self.reuse = reuse
# Optionally initialize a TF seq queue for batch generation
if not tf_seq_queue_size:
self.tf_q = None
else:
self.tf_q = TFQ(generator, tf_seq_queue_size)
# Build model graph
self._build(d_lr, g_lr, mse_term, mse_layer, d_trainer, g_trainer,
gamma, per_img_std)
def _build(self, d_lr, g_lr, mse_term, mse_layer, d_trainer, g_trainer,
gamma, per_img_std):
"""Build the TAN computation graph"""
T = self.generator.seq_len
# Placeholders for basic input data
self.data = tf.placeholder(tf.float32, (None, self.d))
batch_size = tf.shape(self.data)[0]
# For each datapoint we expect the *original data point first*,
# then the T = seq_len incremental transformed versions
self.transformed_data = tf.placeholder(tf.float32, (None, T+1, self.d))
###
### DISCRIMINATOR LOSS
###
# Get discriminator logits over real data
with tf.variable_scope("discriminator", reuse=self.reuse):
D_real = self.discriminator.get_logits_op(self.data,
per_img_std=per_img_std, get_layers=(mse_layer is not None))
# Separate layers from loss, or use pixels as layers
if mse_layer is not None:
D_real, D_real_layers = D_real
data = D_real_layers[-(mse_layer + 1)]
else:
if per_img_std:
data = per_image_std_map(self.data, self.dims,
[batch_size, self.d])
data_t = per_image_std_map(self.transformed_data, self.dims,
[batch_size, T+1, self.d])
else:
data, data_t = self.data, self.transformed_data
# Get discriminator logits over *final* transform data
data_tf = self.transformed_data[:, -1, :]
with tf.variable_scope("discriminator", reuse=True):
D_tf = self.discriminator.get_logits_op(data_tf,
per_img_std=per_img_std, get_layers=False)
# Define discriminative loss
real_loss = self.discriminator.get_loss_op(D_real)
tf_loss = self.discriminator.get_loss_op(D_tf, positive=False)
self.D_loss = 0.5 * (real_loss + tf_loss)
###
### GENERATOR LOSS
###
# Get the logits for each incrementally-transformed datapoint
with tf.variable_scope("discriminator", reuse=True):
D_tf_g_array = tf.TensorArray(tf.float32, T + 1)
if mse_layer is not None:
data_t_array = tf.TensorArray(tf.float32, T + 1)
for i in range(T + 1):
# Note: Here we pass in train=False, which is passed to e.g.
# batch_norm and other operators in the discriminator that are
# stochastic during training
d_tf_g = self.discriminator.get_logits_op(
self.transformed_data[:, i, :], per_img_std=per_img_std,
train=False, get_layers=(mse_layer is not None))
# Separate loss and layer
if mse_layer is not None:
d_tf_g, d_tf_g_layers = d_tf_g
# Use negative index to retrieve layer to use
d_tf_g_layer = d_tf_g_layers[-(mse_layer + 1)]
data_t_array = data_t_array.write(i, d_tf_g_layer)
D_tf_g_array = D_tf_g_array.write(i, d_tf_g)
# D_tf_g is reshaped to [batch_size, T+1, dim]
D_tf_g = tf.transpose(D_tf_g_array.stack(), perm=[1, 0, 2])
if mse_layer is not None:
data_t = tf.transpose(data_t_array.stack(), perm=[1, 0, 2])
# Define generative loss for training
# G_loss_all is a batch_size x (T+1) matrix with the discriminator
# losses of all the incremental transformed data points
G_loss_all = self.discriminator.get_loss_op(D_tf_g, mean=False)
# Add MSE term to generator objective function here
shape = [batch_size, 1, tf.shape(data_t)[2]]
data_r = tf.tile(tf.reshape(data, shape), [1, T+1, 1])
mse = tf.reduce_mean(tf.square(data_r - data_t), 2)
G_loss_all = G_loss_all + get_mse_loss(mse, mse_term)
# Get the change in loss between each incremental transformation
# G_loss_deltas is a [batch_size, T] Tensor
self.G_loss_deltas = G_loss_all[:, 1:] - G_loss_all[:, :-1]
# Get the policy loss op
q = self.generator.get_policy_loss_op(self.G_loss_deltas, gamma)
###
### TRAINING OPS
###
# Define discriminative operation
self.d_train_op = None
if self.train_disc:
d_name = "discriminator"
d_vars = [
v for v in tf.trainable_variables() if v.name.startswith(d_name)
]
d_update_ops = [
u for u in tf.get_collection(tf.GraphKeys.UPDATE_OPS)
if u.name.startswith(d_name)
]
with tf.variable_scope(d_name, reuse=self.reuse):
d_step = tf.Variable(0, trainable=False)
# Note: This is necessary for batch_norm to be handled correctly
with tf.control_dependencies(d_update_ops):
self.d_train_op = d_trainer(d_lr).minimize(self.D_loss,
global_step=d_step, var_list=d_vars)
# Define generative operation
g_vars = [
v for v in tf.trainable_variables()
if v.name.startswith(self.generator.name)
]
g_update_ops = [
u for u in tf.get_collection(tf.GraphKeys.UPDATE_OPS)
if u.name.startswith(self.generator.name)
]
with tf.variable_scope(self.generator.name, reuse=self.reuse):
g_step = tf.Variable(0, trainable=False)
# Note: This is necessary for batch_norm to be handled correctly
with tf.control_dependencies(g_update_ops):
self.g_train_op = g_trainer(g_lr).minimize(q,
global_step=g_step, var_list=g_vars)
# Define predictions
self.d_pred = tf.to_int32(tf.greater(tf.sigmoid(D_real), 0.5))
self.g_pred = tf.to_int32(tf.greater(tf.sigmoid(D_tf), 0.5))
###
### LOGGING
###
# Create summary for D_loss
D_loss_summary = tf.summary.scalar("disc_loss", self.D_loss)
# Get discriminative loss over transformed image for generator loss
# Note a squeeze is performed automatically in the slice for MSE
msef = tf.reduce_mean(tf.square(data_r[:, -1, :] - data_t[:, -1, :]), 1)
self.G_loss = self.discriminator.get_loss_op(D_tf)
self.G_loss_mse = self.G_loss + get_mse_loss(msef, mse_term, mean=True)
# Create summaries for generator loss
G_loss_summary = tf.summary.scalar("gen_loss", self.G_loss)
G_loss_mse_summary = tf.summary.scalar("gen_mse_loss", self.G_loss_mse)
# Create alias summaries for random loss
self.R_loss = self.G_loss
self.R_loss_mse = self.G_loss_mse
R_loss_summary = tf.summary.scalar("rand_loss", self.R_loss)
R_loss_mse_summary = tf.summary.scalar("rand_mse_loss", self.R_loss_mse)
# Merge summaries
dg_summaries = [D_loss_summary, G_loss_summary, G_loss_mse_summary]
self.dg_summary = tf.summary.merge(dg_summaries)
r_summaries = [R_loss_summary, R_loss_mse_summary]
self.rand_summary = tf.summary.merge(r_summaries)
### Saver
# NOTE: We only save the generative model, this way compatible with a
# larger range of end models
vars_list = [
v for v in tf.trainable_variables()
if v.name.startswith(self.generator.name)
]
self.saver = tf.train.Saver(var_list=vars_list)
def get_transformed_data(self, session, data, emit_incremental=False,
n_seqs_per_example=1):
"""Transform data
@session: a TensorFlow session
@data: original training data batch
@emit_incremental: return incrementally transformed data points?
@n_seqs_per_example: number of sampled transformation sequences
applied to each data point
Returns a tuple of transformed data, the sequences applied, and the
original data repeated n_seqs_per_example times
"""
# Replicate n_seqs_per_example times
data_rep = np.tile(data, (n_seqs_per_example, 1))
# Get action sequences
tf_seqs = self.generator.get_action_sequence(session, data_rep.shape[0])
# Transform data
return (self.transformer.transform(
data_rep, tf_seqs, emit_incremental=emit_incremental
), tf_seqs, data_rep)
def transform(self, session, x):
"""Transform single data point
@session: a TensorFlow session
@x: original training data point
Returns a transformed data point; uses TF queue if initialized
"""
# Get action sequences
if self.tf_q is not None:
tf_seq = self.tf_q.next(session)
else:
tf_seq = self.generator.get_action_sequence(session, 1)[0, :]
# Transform data
return self.transformer(x, tf_seq)
def get_random_loss(self, session, data, gen_loss=None):
"""
Return loss with random transformations; if gen_loss provided
as scalar value, return sumary for gen_loss / rand_loss as well.
"""
# Make sure is in proper dims
r_data_b = self.transformer.transform_basic(data)
# Get random sequence of transformations
seq_len = self.generator.seq_len
# Get both incremental and final transformed data points
rand_tf_data_inc = self.transformer.random_transform(
data, seq_len, emit_incremental=True
)
loss, summary = session.run([self.R_loss, self.rand_summary], {
self.data: r_data_b,
self.transformed_data: rand_tf_data_inc,
})
if gen_loss is not None:
ratio_val = tf.summary.Summary.Value(
tag="gen_rand_loss_ratio",
simple_value=float(loss) / gen_loss
)
ratio_summary = tf.summary.Summary(value=[ratio_val])
return loss, summary, ratio_summary
else:
return loss, summary
def train_step(self, session, data, n_disc_steps, n_gen_steps, n_sample=1):
# Optionally transform test data (to match transformed data)
# E.g. see PadCropTransformer class
d_data_test = self.transformer.transform_basic(data)
# Update discriminator
for _ in range(n_disc_steps):
# Get transformed data
tf_d_data, _, _ = self.get_transformed_data(session, data,
emit_incremental=True)
# Get loss and if trainind discriminator (default) execute train op
fd = {self.data: d_data_test, self.transformed_data: tf_d_data}
if self.train_disc:
d_loss, _ = session.run([self.D_loss, self.d_train_op], fd)
else:
d_loss = session.run([self.D_loss], fd)
# Update generator
for _ in range(n_gen_steps):
# Get both incrementally-transformed data and final version
tf_g_data_inc, tf_seqs, data_rep = self.get_transformed_data(
session, data, emit_incremental=True,
n_seqs_per_example=n_sample
)
# Get the feed_dict for the training step
# Note that the get_feed method will make sure that the action
# sequences sampled are the same as the ones used to generate the
# transformed data above!
g_feed = self.generator.get_feed(tf_seqs)
g_feed.update({self.transformed_data: tf_g_data_inc})
# Optionally transform test data (to match transformed data)
# E.g. see PadCropTransformer class
data_rep = self.transformer.transform_basic(data_rep)
g_feed.update({self.data: data_rep})
# Define training op
g_loss, summary, _, g_loss_deltas = session.run(
[self.G_loss, self.dg_summary, self.g_train_op,
self.G_loss_deltas], g_feed
)
# Return losses
return d_loss, g_loss, summary, g_loss_deltas, tf_seqs
def get_transformed_data_and_predictions(self, session, data):
tf_data, _, _ = self.get_transformed_data(session, data, True)
d_y, g_y = session.run([self.d_pred, self.g_pred], {
self.data: data, self.transformed_data: tf_data
})
final_tf_data = np.squeeze(tf_data[:, -1, :])
return final_tf_data, np.ravel(d_y), np.ravel(g_y)
def save(self, session, save_path):
_ = self.saver.save(session, save_path)
def restore(self, session, save_path):
self.saver.restore(session, save_path)
def PretrainedTAN(G, T, dims, session, checkpoint_path, tf_seq_queue_size=5000):
# Build dummy discriminator
D = DCNN(dims=dims)
# Build TAN
tan = TAN(D, G, T, 0, 0, tf_seq_queue_size=tf_seq_queue_size)
tan.restore(session, checkpoint_path)
return tan
|
tanda-master
|
tanda/tan.py
|
tanda-master
|
tanda/__init__.py
|
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import copy
import numpy as np
from skimage.util import crop, pad
class Transformer(object):
def __init__(self, tfs):
"""Transforms data points given a set of transformation functions (TFs)
@tfs: a list of TFs
Note that each element of `tfs` will usually be a base TF with a
specific parameter, e.g. `partial(TF_rotate, angle=2.5)`.
"""
self.tfs = tfs
# The number of available actions
# Note this is synonymous with len(self.tfs) in the current
# implementation, but might diverge in other ones, so we standardize
self.n_actions = len(self.tfs)
def pre_tf(self, x):
return x
def post_tf(self, x):
return x
def _apply(self, x, tf_seq, emit_incremental):
"""Apply a sequence of TFs to data point x
@tf_seq: a list of indices referencing `self.tfs`
@emit_incremental: If true, returns each incrementally-transformed
image, _including_ the original image
"""
# NOTE that we include the un-transformed datapoint as the first object!
xcs = [self.post_tf(self.pre_tf(copy.deepcopy(x)))]
# Apply the TFs, in the given order by default
xc = self.pre_tf(copy.deepcopy(x))
for i in tf_seq:
xc = self.tfs[i](xc)
xcs.append(self.post_tf(copy.deepcopy(xc)))
# Return either just the final transformed version, or all the
# incremental ones
if emit_incremental:
return np.vstack(xcs)
else:
return xcs[-1]
def transform(self, X, tf_seqs, emit_incremental=True):
"""Apply a sequence of TFs to a batch of data points X
@tf_seqs: A matrix representing one list of indices, referencing
`self.tfs`, for each data point x in X.
@emit_incremental: If true, returns each incrementally-transformed
image, _including_ the original image
"""
xcs = [self._apply(x, t, emit_incremental) for x, t in zip(X, tf_seqs)]
if emit_incremental:
return np.array(xcs)
else:
return np.vstack(xcs)
def random_transform(self, X, seq_len, emit_incremental=True, **kwargs):
"""Apply a random sequence of TFs to each x in X"""
rand_seqs = np.random.randint(self.n_actions, size=(len(X), seq_len))
return self.transform(
X, rand_seqs, emit_incremental=emit_incremental, **kwargs
)
def transform_basic(self, X, train=False):
return X
def __call__(self, x, tf_seq):
return self._apply(x, tf_seq, False).reshape(x.shape)
class ImageTransformer(Transformer):
def __init__(self, tfs, dims):
self.dims = dims
self.size = np.prod(dims)
super(ImageTransformer, self).__init__(tfs)
def pre_tf(self, img):
return np.reshape(img, self.dims)
def post_tf(self, img):
return np.reshape(img, [self.size])
class PadCropTransformer(ImageTransformer):
"""Pad and then (randomly) crop back to same original size."""
def __init__(self, tfs, dims, pad_px=4, pad_mode='edge'):
self.pad_px = pad_px
self.pad_mode = pad_mode
super(PadCropTransformer, self).__init__(tfs, dims)
def transform_basic(self, X, train=False):
return np.vstack([
self.post_tf(self.pre_tf(copy.deepcopy(X[i])), train=train)
for i in range(X.shape[0])
])
def pre_tf(self, img):
img = np.reshape(img, self.dims)
# Pad image by n_pixels on each side
return pad(
img,
[(self.pad_px, self.pad_px) for _ in range(2)] + [(0,0)],
mode=self.pad_mode
)
def post_tf(self, img, train=True):
"""Note that we assume a square image and crop centered if not
training, randomly if training.
"""
assert self.dims[0] == self.dims[1]
if train:
# Take a random crop of the original size
crop_sizes = np.random.randint(0, 2*self.pad_px+1, [2])
crops = [(c, 2*self.pad_px-c) for c in crop_sizes]
else:
crops = [(self.pad_px, self.pad_px) for _ in range(2)]
# For channel dimension don't do any cropping
crops += [(0,0)]
return np.reshape(crop(img, crops, copy=True), [self.size])
|
tanda-master
|
tanda/transformer.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import tensorflow as tf
import tensorflow.contrib.rnn as rnn
from tensorflow.python.framework import ops
from tensorflow.python.ops.rnn_cell_impl import RNNCell
def mean_field_cell(logits, state):
return logits, state
class GeneratorCellBuilder(object):
def __init__(self, cell_type, **kwargs):
self.c = cell_type
self.kw = kwargs
def _check_feed_actions(self, feed_actions):
if feed_actions:
raise Exception("Cannot feed actions, only logits!")
def _build_cell(self, **kwargs):
return self.c
def _init_state(self, cell, batch_size):
return None
def build_cell_and_init_state(self, batch_size, feed_actions):
self._check_feed_actions(feed_actions)
cell = self._build_cell(**self.kw)
return cell, self._init_state(cell, batch_size)
class GeneratorRNNCellBuilder(GeneratorCellBuilder):
def _check_feed_actions(self, feed_actions):
pass
def _build_cell(self, m, n_stack=1, wrappers=[]):
if n_stack == 1:
cell = self.c(m)
cell = rnn.MultiRNNCell([self.c(m) for _ in range(n_stack)])
# Apply wrappers; use functools.partial to bind other arguments
for wrapper in wrappers:
cell = wrapper(cell)
return cell
def _init_state(self, cell, batch_size):
return cell.zero_state(batch_size, tf.float32)
class OutputRangeWrapper(RNNCell):
def __init__(self, cell, output_range, norm_op=None):
"""Rescales output range of @cell
@cell: an RNN cell
@output_range: range of outputs, e.g. 4 produces outputs in [-2, 2]
@norm_op: function to map @cell outputs to range [0, 1]
"""
if not isinstance(cell, RNNCell):
raise TypeError("The parameter cell is not RNNCell.")
if output_range < 0:
raise ValueError("Logit range must be > 0: %d." % output_range)
self._cell = cell
self._range = output_range
self._norm = norm_op
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell._output_size
def zero_state(self, n, dtype):
with ops.name_scope(type(self).__name__ + "ZeroState", values=[n]):
return self._cell.zero_state(n, dtype)
def __call__(self, inputs, state, scope=None):
output, res_state = self._cell(inputs, state)
if self._norm:
output = self._norm(output)
return self._range * (output - 0.5), res_state
|
tanda-master
|
tanda/generator/rnn_cell_util.py
|
from .generator import GRUGenerator, LSTMGenerator, MeanFieldGenerator
|
tanda-master
|
tanda/generator/__init__.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import tensorflow as tf
import tensorflow.contrib.rnn as rnn
from .rnn_cell_util import (
GeneratorCellBuilder, GeneratorRNNCellBuilder, mean_field_cell,
OutputRangeWrapper
)
from functools import partial
from itertools import product
from tensorflow.python.ops import variable_scope
class RNNCellGenerator(object):
"""Train a model to emit action sequences"""
def __init__(self, m, seq_len, cell_builder, name='gen', reuse=False,
**kwargs):
self.m = m
self.seq_len = seq_len
self.cell_build = cell_builder
self.name = name
self.action_seq = None
self.logits = None
with tf.variable_scope(name, reuse=reuse):
self.batch_size = tf.placeholder(tf.int32, [])
self._build_generator(**kwargs)
def _build_generator(self, feed_actions=True, init_type='zeros'):
"""Build the RNN TF sequence generator
@feed_actions: Feed one-hot actions taken in previous step, rather than
logits (from which action was sampled)
@init_type: How to initialize the sequence generation:
* train: Train a variable to init with
* zeros: Send in an all-zeros vector
"""
# Build the cell
cell, state = self.cell_build.build_cell_and_init_state(
self.batch_size, feed_actions
)
# If train input, train a variable input. Otherwise, all zeros
if init_type.lower().startswith('train'):
input_var = tf.Variable(tf.zeros((1, self.m), dtype=tf.float32))
feed = tf.tile(input_var, (self.batch_size, 1))
elif init_type.lower().startswith('zero'):
feed = tf.zeros((self.batch_size, self.m), dtype=tf.float32)
else:
raise ValueError(
"RNN cell generator init_type %s not recognized" % init_type)
# Placeholders to recover policy for updates
self.rerun = tf.placeholder_with_default(False, [])
self.input_actions = tf.placeholder_with_default(
tf.zeros((1, self.seq_len), dtype=tf.int32), (None, self.seq_len)
)
self.coo_actions = tf.placeholder(tf.int32, (None, 3))
# Run loopy feed forward
actions_arr = tf.TensorArray(tf.int32, self.seq_len)
logits_arr = tf.TensorArray(tf.float32, self.seq_len)
for t in range(self.seq_len):
if t > 0:
variable_scope.get_variable_scope().reuse_variables()
# Compute logits for next action using RNN cell
logits, state = cell(feed, state)
# Samplers to draw actions
def sample():
return tf.to_int32(tf.multinomial(logits, 1))
def rerun_sample():
return self.input_actions[:, t]
# If rerunning to apply policy gradients, draw is the input
draw = tf.reshape(tf.cond(self.rerun, rerun_sample, sample), (-1,))
# Write to arrays
logits_arr = logits_arr.write(t, logits)
actions_arr = actions_arr.write(t, draw)
# Update feed- either with the action taken (default), or with
# the logits output at the previous timestep
if feed_actions:
feed = tf.one_hot(draw, self.m)
else:
feed = logits
# Reshape logits to [batch_size, seq_len, n_actions]
self.logits = tf.transpose(logits_arr.stack(), (1, 0, 2))
# Reshape action_seq to [batch_size, seq_len]
self.action_seq = tf.transpose(actions_arr.stack())
def _get_generated_probabilities(self):
"""Returns a [batch_size, seq_len] Tensor with probabilities for each
action that was drawn
"""
input_batch_size = tf.shape(self.input_actions)[0]
dists = tf.nn.softmax(self.logits)
r_dists = tf.gather_nd(dists, self.coo_actions)
return tf.reshape(r_dists, (input_batch_size, self.seq_len))
def _build_discounts_matrix(self, T, gamma):
"""Build lower-triangular matrix of discounts.
For example for T = 3: D = [[1, 0, 0]
[gamma, 1, 0]
[gamma^2, gamma, 1]]
Then with R, our N x T incremental rewards matrix, the discounted sum is
R * D
"""
power_ltri = tf.cumsum(
tf.sequence_mask(tf.range(T)+1, T, dtype=tf.float32), exclusive=True
)
gamma_ltri = tf.pow(gamma, power_ltri)
gamma_ltri *= tf.sequence_mask(tf.range(T)+1, T, dtype=tf.float32)
return gamma_ltri
def get_policy_loss_op(self, incremental_rewards, gamma):
"""Input is a [batch_size, seq_len] Tensor where each entry represents
the incremental reward for an action on a data point
"""
T = tf.shape(incremental_rewards)[1]
# Form matrix of discounts to apply
gamma_ltri = self._build_discounts_matrix(T, gamma)
# Compute future discounted rewards as [batch_size x seq_len] matrix
future_rewards = tf.matmul(incremental_rewards, gamma_ltri)
# Compute baseline and advantage
baseline = tf.reduce_mean(future_rewards, axis=0)
advantages = future_rewards - baseline
# Apply advantage to policy
policy = self._get_generated_probabilities()
return tf.reduce_sum(tf.log(policy) * tf.stop_gradient(advantages))
def get_action_sequence(self, session, batch_size):
"""Sample action sequences"""
return session.run(self.action_seq, {self.batch_size: batch_size})
def get_feed(self, actions, **kwargs):
"""Get the feed_dict for the training step.
@action_seqs: The sequence of actions taken to generate the transformed
data in this training step.
Note that we feed `action_seqs` back in and set rerun=True to indicate
that the exact same sequence of actions should be used in all other
operations in this step!
"""
coord = product(range(actions.shape[0]), range(actions.shape[1]))
feed = {
self.batch_size : actions.shape[0],
self.input_actions: actions,
self.coo_actions: [[i, j, actions[i, j]] for i, j in coord],
self.rerun: True,
}
kwargs.update(feed)
return kwargs
class GRUGenerator(RNNCellGenerator):
def __init__(self, m, seq_len, name='gen', reuse=False, n_stack=1,
logit_range=4.0, **kwargs):
# Get GRU cell builder
range_wrapper = partial(OutputRangeWrapper, output_range=logit_range)
cb = GeneratorRNNCellBuilder(
rnn.GRUCell, m=m, n_stack=n_stack, wrappers=[range_wrapper]
)
# Super constructor
super(GRUGenerator, self).__init__(
m, seq_len, name=name, cell_builder=cb, reuse=reuse, **kwargs
)
class LSTMGenerator(RNNCellGenerator):
def __init__(self, m, seq_len, name='gen', reuse=False, n_stack=1,
logit_range=4.0, **kwargs):
# Get LSTM cell builder
def norm(x):
return 0.5 * (x + 1.)
range_wrapper = partial(
OutputRangeWrapper, output_range=logit_range, norm_op=norm
)
cb = GeneratorRNNCellBuilder(
rnn.BasicLSTMCell, m=m, n_stack=n_stack, wrappers=[range_wrapper]
)
# Super constructor
super(LSTMGenerator, self).__init__(
m, seq_len, name=name, cell_builder=cb, reuse=reuse, **kwargs
)
class MeanFieldGenerator(RNNCellGenerator):
def __init__(self, m, seq_len, name='gen', reuse=False, **kwargs):
# Get mean field cell builder
cb = GeneratorCellBuilder(mean_field_cell)
# Super constructor
super(MeanFieldGenerator, self).__init__(
m, seq_len, name=name, cell_builder=cb, reuse=reuse, **kwargs
)
|
tanda-master
|
tanda/generator/generator.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ResNet model.
Related papers:
https://arxiv.org/pdf/1603.05027v2.pdf
https://arxiv.org/pdf/1512.03385v1.pdf
https://arxiv.org/pdf/1605.07146v1.pdf
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import six
import tensorflow as tf
from .discriminator import Discriminator
from collections import namedtuple
from functools import partial
HParams = namedtuple('HParams',
'batch_size, num_classes, min_lrn_rate, lrn_rate, '
'num_residual_units, use_bottleneck, weight_decay_rate, '
'relu_leakiness, optimizer')
class ResNet(Discriminator):
"""ResNet model."""
def __init__(self, hps, dims):
"""ResNet constructor.
Args:
@hps: Hyperparameters.
@mode: One of 'train' and 'eval'.
"""
self.hps = hps
self._extra_train_ops = []
super(ResNet, self).__init__(dims=dims)
def _get_logits_op(self, X, n_classes, train=True, reuse=False, **kwargs):
"""Build the core model within the graph."""
with tf.variable_scope('init'):
x = self._conv('init_conv', X, 3, 3, 16, self._stride_arr(1))
strides = [1, 2, 2]
activate_before_residual = [True, False, False]
if self.hps.use_bottleneck:
res_func = partial(self._bottleneck_residual, train=train, reuse=reuse)
filters = [16, 64, 128, 256]
else:
res_func = partial(self._residual, train=train, reuse=reuse)
filters = [16, 16, 32, 64]
# Uncomment the following codes to use w28-10 wide residual network.
# It is more memory efficient than very deep residual network and has
# comparably good performance.
# https://arxiv.org/pdf/1605.07146v1.pdf
# filters = [16, 160, 320, 640]
# Update hps.num_residual_units to 9
with tf.variable_scope('unit_1_0'):
x = res_func(x, filters[0], filters[1], self._stride_arr(strides[0]),
activate_before_residual[0])
for i in six.moves.range(1, self.hps.num_residual_units):
with tf.variable_scope('unit_1_%d' % i):
x = res_func(x, filters[1], filters[1], self._stride_arr(1), False)
with tf.variable_scope('unit_2_0'):
x = res_func(x, filters[1], filters[2], self._stride_arr(strides[1]),
activate_before_residual[1])
for i in six.moves.range(1, self.hps.num_residual_units):
with tf.variable_scope('unit_2_%d' % i):
x = res_func(x, filters[2], filters[2], self._stride_arr(1), False)
with tf.variable_scope('unit_3_0'):
x = res_func(x, filters[2], filters[3], self._stride_arr(strides[2]),
activate_before_residual[2])
for i in six.moves.range(1, self.hps.num_residual_units):
with tf.variable_scope('unit_3_%d' % i):
x = res_func(x, filters[3], filters[3], self._stride_arr(1), False)
with tf.variable_scope('unit_last'):
x = self._batch_norm('final_bn', x, train=train, reuse=reuse)
x = self._relu(x, self.hps.relu_leakiness)
x = self._global_avg_pool(x)
with tf.variable_scope('logit'):
logits = self._fully_connected(x, n_classes)
return logits
def _stride_arr(self, stride):
"""Map a stride scalar to the stride array for tf.nn.conv2d."""
return [1, stride, stride, 1]
def _batch_norm(self, name, x, train=True, reuse=False):
# Note we replace the batch norm from the tensorflow/models code with the
# contrib.layers implementation here for better scoping behavior...
return tf.contrib.layers.batch_norm(
x,
decay=0.9,
center=True, # I.e. use beta
scale=True, # I.e. use gamma
epsilon=1e-5,
# Note: important to leave this unset!
# updates_collections=None,
variables_collections=[self.bn_vars_collection],
is_training=train,
reuse=reuse,
scope=name,
trainable=True
)
def _residual(self, x, in_filter, out_filter, stride,
activate_before_residual=False, train=True, reuse=False):
"""Residual unit with 2 sub layers."""
if activate_before_residual:
with tf.variable_scope('shared_activation'):
x = self._batch_norm('init_bn', x, train=train, reuse=reuse)
x = self._relu(x, self.hps.relu_leakiness)
orig_x = x
else:
with tf.variable_scope('residual_only_activation'):
orig_x = x
x = self._batch_norm('init_bn', x, train=train, reuse=reuse)
x = self._relu(x, self.hps.relu_leakiness)
with tf.variable_scope('sub1'):
x = self._conv('conv1', x, 3, in_filter, out_filter, stride)
with tf.variable_scope('sub2'):
x = self._batch_norm('bn2', x, train=train, reuse=reuse)
x = self._relu(x, self.hps.relu_leakiness)
x = self._conv('conv2', x, 3, out_filter, out_filter, [1, 1, 1, 1])
with tf.variable_scope('sub_add'):
if in_filter != out_filter:
orig_x = tf.nn.avg_pool(orig_x, stride, stride, 'VALID')
orig_x = tf.pad(
orig_x, [[0, 0], [0, 0], [0, 0],
[(out_filter-in_filter)//2, (out_filter-in_filter)//2]])
x += orig_x
tf.logging.debug('image after unit %s', x.get_shape())
return x
def _bottleneck_residual(self, x, in_filter, out_filter, stride,
activate_before_residual=False, train=True,
reuse=False):
"""Bottleneck residual unit with 3 sub layers."""
if activate_before_residual:
with tf.variable_scope('common_bn_relu'):
x = self._batch_norm('init_bn', x, train=train, reuse=reuse)
x = self._relu(x, self.hps.relu_leakiness)
orig_x = x
else:
with tf.variable_scope('residual_bn_relu'):
orig_x = x
x = self._batch_norm('init_bn', x, train=train, reuse=reuse)
x = self._relu(x, self.hps.relu_leakiness)
with tf.variable_scope('sub1'):
x = self._conv('conv1', x, 1, in_filter, out_filter/4, stride)
with tf.variable_scope('sub2'):
x = self._batch_norm('bn2', x, train=train, reuse=reuse)
x = self._relu(x, self.hps.relu_leakiness)
x = self._conv('conv2', x, 3, out_filter/4, out_filter/4, [1, 1, 1, 1])
with tf.variable_scope('sub3'):
x = self._batch_norm('bn3', x, train=train, reuse=reuse)
x = self._relu(x, self.hps.relu_leakiness)
x = self._conv('conv3', x, 1, out_filter/4, out_filter, [1, 1, 1, 1])
with tf.variable_scope('sub_add'):
if in_filter != out_filter:
orig_x = self._conv('project', orig_x, 1, in_filter, out_filter, stride)
x += orig_x
tf.logging.info('image after unit %s', x.get_shape())
return x
def get_weight_decay_op(self):
"""L2 weight decay loss."""
costs = []
for var in tf.trainable_variables():
if var.op.name.find(r'DW') > 0:
costs.append(tf.nn.l2_loss(var))
return tf.add_n(costs)
def _conv(self, name, x, filter_size, in_filters, out_filters, strides):
"""Convolution."""
with tf.variable_scope(name):
n = filter_size * filter_size * out_filters
kernel = tf.get_variable(
'DW', [filter_size, filter_size, in_filters, out_filters],
tf.float32, initializer=tf.random_normal_initializer(
stddev=np.sqrt(2.0/n)))
return tf.nn.conv2d(x, kernel, strides, padding='SAME')
def _relu(self, x, leakiness=0.0):
"""Relu, with optional leaky support."""
return tf.where(tf.less(x, 0.0), leakiness * x, x, name='leaky_relu')
def _fully_connected(self, x, out_dim):
"""FullyConnected layer for final output."""
w = tf.get_variable(
'DW', [x.get_shape()[1], out_dim],
initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
b = tf.get_variable('biases', [out_dim],
initializer=tf.constant_initializer())
return tf.nn.xw_plus_b(x, w, b)
def _global_avg_pool(self, x):
assert x.get_shape().ndims == 4
return tf.reduce_mean(x, [1, 2])
hps_default = HParams(batch_size=100,
num_classes=10,
min_lrn_rate=0.0001,
lrn_rate=0.1,
num_residual_units=9,
use_bottleneck=False,
weight_decay_rate=0.0002,
relu_leakiness=0.1,
optimizer='mom')
ResNetDefault = partial(ResNet, hps=hps_default)
|
tanda-master
|
tanda/discriminator/resnet_cifar.py
|
from .dcnn import DCNN
from .discriminator import Discriminator
from .resnet_cifar import ResNetDefault
from .simple import SimpleDiscriminator
|
tanda-master
|
tanda/discriminator/__init__.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import tensorflow as tf
import tensorflow.contrib.rnn as rnn
from .discriminator import Discriminator
from functools import partial
D_H = 2
D_W = 2
class DCNN(Discriminator):
"""
Discriminator from DCGAN paper
From https://github.com/carpedm20/DCGAN-tensorflow/blob/master/model.py
"""
def __init__(self, dims=[28, 28, 1], df_dim=64):
super(DCNN, self).__init__(dims=(dims if len(dims) == 3 else dims+[1]))
self.df_dim = df_dim
self.out_dim = self.last_layer_size
def _get_logits_op(self, X, n_classes=1, train=True, reuse=False,
get_layers=False, **kwargs):
"""Returns logits"""
batch_norm = partial(batch_norm_op,
bn_vars_collection=self.bn_vars_collection)
n_batch = tf.shape(X)[0]
# Apply convolutional layers
h0 = conv2d(X, self.dims[-1], self.df_dim, name='d_h0_conv')
h0_a = lrelu(h0)
h1 = conv2d(h0_a, self.df_dim, self.df_dim * 2, name='d_h1_conv')
h1_a = lrelu(batch_norm(h1, name='bn_1', train=train, reuse=reuse))
h2 = conv2d(h1_a, self.df_dim * 2, self.df_dim * 4, name='d_h2_conv')
h2_a = lrelu(batch_norm(h2, name='bn_2', train=train, reuse=reuse))
h3 = conv2d(h2_a, self.df_dim * 4, self.df_dim * 8, name='d_h3_conv')
h3_a = lrelu(batch_norm(h3, name='bn_3', train=train, reuse=reuse))
h_out = tf.reshape(h3_a, [n_batch, self.out_dim])
h4 = linear(h_out, self.out_dim, n_classes, scope='d_h3_lin')
# Check for get_layers
if get_layers:
layers = [tf.reshape(z, (n_batch, -1)) for z in [h0, h1, h2, h3]]
return h4, layers
return h4
@property
def last_layer_size(self):
n_convs, h, w = 4, D_H, D_W
z1, z2 = self.dims[0], self.dims[1]
for _ in range(n_convs):
z1, z2 = int(np.ceil(float(z1) / h)), int(np.ceil(float(z2) / w))
return int(z1 * z2 * self.df_dim * (2. ** (n_convs - 1)))
def lrelu(x, leak=0.2, name="lrelu"):
return tf.maximum(x, leak*x)
def conv2d(X, in_dim, out_dim, k_h=5, k_w=5, d_h=D_H, d_w=D_W, stddev=0.02,
name="conv2d"):
# Note: dims is (h, w, n_channels)
with tf.variable_scope(name):
w = tf.get_variable('w', [k_h, k_w, in_dim, out_dim],
initializer=tf.truncated_normal_initializer(stddev=stddev))
# Standard convolution
conv = tf.nn.conv2d(X, w, strides=[1, d_h, d_w, 1], padding='SAME')
# Add biases
biases = tf.get_variable('biases', [out_dim],
initializer=tf.constant_initializer(0.0))
conv = tf.nn.bias_add(conv, biases)
return conv
def batch_norm_op(x, bn_vars_collection="BN_vars", train=True, reuse=False,
epsilon=1e-5, momentum=0.9, name="batch_norm"):
return tf.contrib.layers.batch_norm(
x,
decay=momentum,
scale=True,
epsilon=epsilon,
variables_collections=[bn_vars_collection],
is_training=train,
reuse=reuse,
scope=name
)
def linear(X, in_dim, out_size, scope=None, stddev=0.02, bias_start=0.0):
with tf.variable_scope(scope or "linear"):
w = tf.get_variable("w", [in_dim, out_size], tf.float32,
tf.random_normal_initializer(stddev=stddev))
b = tf.get_variable("bias", [out_size],
initializer=tf.constant_initializer(bias_start))
return tf.matmul(X, w) + b
|
tanda-master
|
tanda/discriminator/dcnn.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import tensorflow as tf
from .discriminator import Discriminator
def nnet(input_tensor, n_hidden=4):
h = tf.layers.dense(input_tensor, n_hidden,
activation=tf.nn.sigmoid, name='h_0')
return tf.layers.dense(h, 1, name='h_1')
class SimpleDiscriminator(Discriminator):
"""A simple two-layer neural net"""
def get_logits_op(self, x_input, **kwargs):
"""Returns logits"""
return nnet(x_input)
|
tanda-master
|
tanda/discriminator/simple.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import tensorflow as tf
ADAM = tf.train.AdamOptimizer
class Discriminator(object):
"""
Parent class for discriminator in TAN module
Also includes methods to build supervised version so can be reused
as end discriminative model.
"""
def __init__(self, dims=None):
self.dims = dims
# Placeholders for supervised version of discriminator
self.X = None
self.Y = None
self.loss = None
self.train_op = None
self.accuracy = None
self.bn_vars_collection = "BN_vars"
def _get_logits_op(self, X, n_classes, train=True, reuse=False,
get_layers=False, **kwargs):
"""Implement this method with sub-class; X has shape [-1] + self.dims"""
raise NotImplementedError()
def get_logits_op(self, X, n_classes=1, train=True, reuse=False,
per_img_std=False, get_layers=False, **kwargs):
"""Returns logits using self._get_logits_op, first preprocessing"""
X = tf.reshape(X, [-1] + self.dims)
if per_img_std:
X = tf.map_fn(tf.image.per_image_standardization, X)
out = self._get_logits_op(X, n_classes, train=train, reuse=reuse,
get_layers=get_layers, **kwargs)
if get_layers and len(out) != 2:
raise Exception("Specified get_layers but not available")
return out
@property
def last_layer_size(self):
raise NotImplementedError()
def get_loss_op(self, logits, name=None, positive=True, mean=True):
"""Loss op for use in TAN (n_classes=1)"""
y = tf.ones_like(logits) if positive else tf.zeros_like(logits)
loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=y)
return tf.reduce_mean(loss, name=name) if mean else tf.squeeze(loss)
def _tr_term(self, logits_arr, Np):
"""Get the TR reg term given a loits_arr consisting of Np
different logits (number of classes = K) of transformations of batches
of size B. This term is just the average squared distance between the
logits of a pair of passes for a data point, averaged over the batch.
See https://papers.nips.cc/paper/6333-regularization-with-stochastic-
transformations-and-perturbations-for-deep-semi-supervised-learning.pdf
"""
# Reshape to [B, Np, K]
A = tf.transpose(logits_arr.stack(), [1, 0, 2])
# ||a_{ij}||_2^2; note element-wise multiply here
R = tf.reshape(tf.reduce_sum(A * A, 2), [-1, Np, 1])
# ||a_{ji}||_2^2
R_t = tf.transpose(R, [0, 2, 1])
# a_{ij}a_{ji}
S = tf.matmul(A, tf.transpose(A, [0, 2, 1]))
# Pairwise distance matrix (a_{ij} - a_{ji})^2
D = R - 2 * S + R_t
# Lower triangular part (don't double count)
D_lt = tf.matrix_band_part(D, -1, 0)
# Take mean across over distinct pairs & batch size
return tf.reduce_mean(tf.reduce_sum(D_lt, axis=2))
def build_supervised(self, n_classes, name, trainer=ADAM, lr_init=0.01,
per_img_std=True, weight_decay=0.0, ls_term=0.0, ls_term_n_passes=1):
"""Build model for supervised setting
@per_img_std: Per image normalization
@weight_decay: If > 0.0, adds `self.get_weight_decay_op()` to loss
@ls_term: Local smoothness term which adds the mean L2 norm of a batch
of unlabeled data and its transformed copy to minimize as well;
if > 0.0, adds this op to the graph & training step
"""
size = np.prod(self.dims)
summaries = []
# Note we take *flattened* data (just because that's how TAN uses)
self.X = tf.placeholder(tf.float32, [None, size])
self.Y = tf.placeholder(tf.float32, [None, n_classes])
with tf.variable_scope(name):
logits = self.get_logits_op(self.X, n_classes=n_classes,
per_img_std=per_img_std)
# Loss function
self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
logits=logits, labels=self.Y))
# Add ="weight decay" term if applicable
if weight_decay > 0:
self.loss += weight_decay * self.get_weight_decay_op()
## TRANSFORMATION REGULARIZATION TERM
self.U_ts = None
if ls_term > 0.0:
# Note that U_ts also includes the un-transformed image, so we do
# an "extra" pass for this one
Np = ls_term_n_passes + 1
self.U_ts = tf.placeholder(tf.float32, [Np, None, size])
# Pass through the network
# NOTE: Any random ops e.g. dropout, etc. should be used here in
# train mode!
# NOTE: We regularize logits (not prediction vector); this works
# much better empirically
logits_u_t_arr = tf.TensorArray(tf.float32, Np)
with tf.variable_scope(name, reuse=True):
# Add several transformed versions' logits
for i in range(Np):
logits_u_t = self.get_logits_op(
self.U_ts[i, :, :],
n_classes=n_classes,
per_img_std=per_img_std,
train=True,
reuse=True
)
logits_u_t_arr = logits_u_t_arr.write(i, logits_u_t)
# Add TR reg term to loss
u_reg_loss = self._tr_term(logits_u_t_arr, Np)
summaries.append(tf.summary.scalar("U_loss", u_reg_loss))
self.loss += ls_term * u_reg_loss
# Learning rate- constant variable that we can overwrite
self.lr = tf.constant(lr_init, tf.float32)
summaries.append(tf.summary.scalar("learning_rate", self.lr))
# Get summaries
summaries.append(tf.summary.scalar("loss", self.loss))
self.train_summaries = tf.summary.merge(summaries)
# Training step
var_list = [v for v in tf.trainable_variables()
if v.name.startswith(name)]
# Note: This is necessary for batch_norm to be handled correctly
update_ops = [u for u in tf.get_collection(tf.GraphKeys.UPDATE_OPS)
if u.name.startswith(name)]
with tf.control_dependencies(update_ops):
self.train_op = trainer(self.lr).minimize(self.loss,
var_list=var_list)
# Accuracy
# Note: We need to get logits again because we need to set train=False
# for e.g. batch_norm, dropout, etc.
with tf.variable_scope(name, reuse=True):
logits_test = self.get_logits_op(self.X, n_classes=n_classes,
per_img_std=per_img_std, train=False, reuse=True)
# Precision computation ops + summary
correct = tf.equal(tf.argmax(logits_test, 1), tf.argmax(self.Y, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
acc_summary = tf.summary.scalar("accuracy", self.accuracy)
self.acc_summary = tf.summary.merge([acc_summary])
# For returning marginal probabilities
self.probs = tf.nn.softmax(logits_test)
# For saving variables
vars = tf.trainable_variables()
vars_list = [v for v in vars if v.name.startswith(name)]
# Note: In order to save batch_norm moving averages--which are needed
# at test time--need to save them in a collection (see README) with
# name self.bn_vars_collection, o/w won't get saved!
vars_list += tf.get_collection_ref(self.bn_vars_collection)
self.saver = tf.train.Saver(var_list=vars_list)
def supervised_train_step(self, session, X, Y, U_ts=None, lr=None):
feed_dict = {self.X: X, self.Y: Y}
if self.U_ts is not None and U_ts is not None:
feed_dict.update({self.U_ts: U_ts})
if lr is not None:
feed_dict[self.lr] = lr
loss, summary, _ = session.run([self.loss, self.train_summaries,
self.train_op], feed_dict=feed_dict)
return loss, summary
def get_accuracy(self, session, X, Y, batch_size=100):
# NOTE: We do eval in minibatches otherwise too much memory!!
N = X.shape[0]
n_batches = int(np.floor(N / batch_size))
# Iterate over batches
accs_sum = 0.0
for i, b in enumerate(range(0, N, batch_size)):
# Get next batch
X_batch = X[b : b + batch_size, :]
Y_batch = Y[b : b + batch_size, :]
# Get accuracy
n_batch = X_batch.shape[0]
batch_acc = session.run(self.accuracy,
feed_dict={self.X: X_batch, self.Y: Y_batch})
accs_sum += n_batch * batch_acc
# Acc = (n_1 * acc_1 + ... + n_k * acc_k) / sum(n_i)
acc = accs_sum / float(N)
value = tf.summary.Summary.Value(tag="accuracy", simple_value=acc)
summary = tf.summary.Summary(value=[value])
return acc, summary
def get_probs(self, session, X):
return session.run(self.probs, {self.X: X})
def save(self, sess, path):
"""Note this saves _only_ the end model."""
_ = self.saver.save(sess, path)
print("End model saved.")
def restore(self, sess, path):
self.saver.restore(sess, path)
|
tanda-master
|
tanda/discriminator/discriminator.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
import matplotlib.pyplot as plt
from datasets import transformations
import torch
import numpy as np
def plot_x2_reconstructions(
pairs, model, indices, train_set, save_name,
):
"""
Plots sample x2 reconstructions based on indices
Args:
pairs (datasets.Pairs): contains x1, x2, and params.
model (function): callable f(x1) = x1_reconstruction
indices (list of ints): indices for samples to plot
train_set (bool): if true title is plotted with train otherwise test.
save_name (str): indicates path where images should be saved.
"""
title = "Training Reconstruction" if train_set else "Test Reconstruction"
fig, axs = plt.subplots(len(indices), 3, figsize=(6, 12))
fig.suptitle(title, fontsize=16)
for i, sample_idx in enumerate(indices):
x1, x2, params = pairs[sample_idx]
n_pixels = x1.shape[1]
try:
# for weakly supervised autoencoder
x2_reconstruction = model(x1.unsqueeze(0), x2.unsqueeze(0), params)
except TypeError:
# for real autoencoder
x2_reconstruction = model(x1.unsqueeze(0), params)
axs[i][0].imshow(x1.squeeze())
axs[i][0].set_title("x1")
axs[i][1].imshow(x2.squeeze())
axs[i][1].set_title("x2")
axs[i][2].imshow(
x2_reconstruction.cpu().detach().numpy().reshape(n_pixels, n_pixels)
)
axs[i][2].set_title("x2 from tranformed z1")
if save_name:
plt.savefig(f"{save_name}.png", dpi=300, bbox_inches="tight")
plt.close()
else:
plt.show()
def plot_x1_reconstructions(pairs, model, indices, train_set, save_name):
"""
Plots sample x2 reconstructions based on indices
Args:
pairs (datasets.Pairs): contains x1, x2, and params.
model (function): callable f(x1) = x1_reconstruction
indices (list of ints): indices for samples to plot
train_set (bool): if true title is plotted with train otherwise test.
save_name (str): indicates path where images should be saved.
"""
title = "Training Reconstructions" if train_set else "Test Reconstructions"
fig, axs = plt.subplots(len(indices), 2, figsize=(5, 12))
fig.suptitle(title, fontsize=16)
for i, sample_idx in enumerate(indices):
x1, x2, params = pairs[sample_idx]
n_pixels = x1.shape[1]
x1_reconstruction = model(x1.unsqueeze(0)).cpu().detach().numpy()
axs[i][0].imshow(x1.squeeze())
axs[i][0].set_title("x1")
axs[i][1].imshow(x1_reconstruction.reshape(n_pixels, n_pixels))
axs[i][1].set_title("x1 reconstruction")
if save_name:
plt.savefig(f"{save_name}.png", dpi=300, bbox_inches="tight")
plt.close()
else:
plt.show()
def plot_rotations(
X,
model,
n_transformations,
title,
save_name=None,
param_name="angle",
use_latent_op=True,
):
"""Plots all rotated reconstructions for given samples"""
font_size = 18
degree_sign = "\N{DEGREE SIGN}"
n_samples = X.shape[0]
fig, axs = plt.subplots(n_samples, n_transformations + 2, figsize=(16, 12))
fig.suptitle(title, fontsize=16)
for sample_i, x1 in enumerate(X):
axs[sample_i, 0].imshow(x1.squeeze())
axs[sample_i, 0].set_title("original", fontsize=font_size)
axs[sample_i, 0].set_xticks([])
axs[sample_i, 0].set_yticks([])
transformation_params = get_all_transformations(param_name, n_transformations)
for i, param in enumerate(transformation_params):
if use_latent_op:
x2_reconstruction = model.reconstruct_x2(x1.unsqueeze(1), param)
else:
x2_reconstruction = model.reconstruct_transformed_x1(
x1.unsqueeze(1), param
)
axs[sample_i, i + 1].imshow(x2_reconstruction.squeeze())
if param_name == "angle":
axs[sample_i, i + 1].set_title(
f"{param.angle:0.0f}{degree_sign}", fontsize=font_size
)
axs[sample_i, i + 1].set_xticks([])
axs[sample_i, i + 1].set_yticks([])
if save_name:
plt.savefig(save_name, bbox_inches="tight", dpi=300)
plt.close()
else:
plt.show()
def plot_transformations_complex(
X,
model,
title,
save_name=None,
param_name="angle",
supervised=False,
):
"""Plots all rotated reconstructions for given samples"""
font_size = 18
degree_sign = "\N{DEGREE SIGN}"
n_samples = X.shape[0]
transformation_params = transformations.get_transform_params(model.data.n_rotations,
model.data.n_x_translations, model.data.n_y_translations, (1.0, ))
n_transformations = len([i for i in transformation_params])
fig, axs = plt.subplots(n_samples, n_transformations + 1, figsize=(16, int(12/5.*len(X))))
for sample_i, x1 in enumerate(X):
axs[sample_i, 0].imshow(x1.squeeze())
axs[sample_i, 0].set_title("original", fontsize=font_size)
axs[sample_i, 0].set_xticks([])
axs[sample_i, 0].set_yticks([])
x1 = x1.to(model.device)
z1 = model.encoder(x1)
transformation_params = transformations.get_transform_params(model.data.n_rotations,
model.data.n_x_translations, model.data.n_y_translations, (1.0, ))
for i, param in enumerate(transformation_params):
shifts = torch.LongTensor([[i]])
if supervised:
z_transformed = model.transform(z1, [shifts])
else:
z_transformed = model.transform(z1, torch.LongTensor([[i]]))
x2_reconstruction = model.decoder(z_transformed).detach().cpu().numpy()
axs[sample_i, i + 1].imshow(x2_reconstruction.squeeze())
if param_name == "angle":
axs[sample_i, i + 1].set_title(
f"{param.angle:0.0f}{degree_sign}", fontsize=font_size
)
elif param_name == "tx":
axs[sample_i, i + 1].set_title(f"{param.shift_x:0.0f}", fontsize=font_size)
elif param_name == 'ty':
axs[sample_i, i + 1].set_title(f"{param.shift_y:0.0f}", fontsize=font_size)
else:
axs[sample_i, i + 1].set_title(f"{param.shift_x:0.0f},{param.shift_y:0.0f}",
fontsize=font_size)
axs[sample_i, i + 1].set_xticks([])
axs[sample_i, i + 1].set_yticks([])
if save_name:
plt.savefig(save_name, bbox_inches="tight", dpi=300)
plt.close()
else:
plt.show()
def get_all_transformations(param_name, n_transformations):
if param_name == "angle":
return transformations.get_transform_params(n_transformations, 0, 0, (1.0,))
elif param_name == "shift_x":
return transformations.get_transform_params(0, n_transformations, 0, (1.0,))
elif param_name == "shift_y":
return transformations.get_transform_params(0, 0, n_transformations, (1.0,))
def plot_rotations_translations(X, model, n_transformations, n_rot, n_x, n_y, save_name=None):
degree_sign = "\N{DEGREE SIGN}"
n_samples = X.shape[0]
fig, axs = plt.subplots(n_samples, n_transformations + 2, figsize=(16, int(12/5.*len(X))))
for sample_i, x1 in enumerate(X):
axs[sample_i, 0].imshow(x1.squeeze())
axs[sample_i, 0].set_title("original", fontsize=16)
axs[sample_i, 0].set_xticks([])
axs[sample_i, 0].set_yticks([])
x1 = x1.to(model.device)
transformation_params = [t for t in transformations.get_transform_params(n_rot, n_x, n_y, (1.0, ))]
z = model.encoder(x1)
angle = None
shift_x = None
shift_y = None
t_list = []
i = 0
for _, t in enumerate(range(n_transformations+1)):
j = np.random.randint(len(transformation_params))
param = transformation_params[j]
if not t in t_list:
shifts = model.return_shifts([param])
z_transformed = model.transform(z, shifts)
x2_reconstruction = model.decoder(z_transformed).detach().cpu().numpy()
axs[sample_i, i + 1].imshow(x2_reconstruction.squeeze())
axs[sample_i, i + 1].set_title(f"{param.angle:0.0f}{degree_sign}\n{param.shift_x:0.0f},{param.shift_y:0.0f}", fontsize=16)
axs[sample_i, i + 1].set_xticks([])
axs[sample_i, i + 1].set_yticks([])
angle = param.angle
shift_x = param.shift_x
shift_y = param.shift_y
i += 1
if i+1 >= n_transformations + 2:
break
if save_name:
plt.savefig(save_name, bbox_inches="tight", dpi=300)
plt.close()
else:
plt.show()
|
Addressing-the-Topological-Defects-of-Disentanglement-main
|
plot.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
"""
Launches experiments locally or on the cluster
python run_experiments.py [name] --cluster
OPTIONS:
python run_experiments.py linear-mnist-test --data mnist
python run_experiments.py cci-autoencoder-shapes --architecture CCI
"""
import argparse
import autoencoder
import cci_variational_autoencoder
import os
import itertools
from datasets import datasets
from functools import partial
import torch
import shutil
import submitit
BASE_PARAMS = {
"seed": [0, 10, 20, 30, 40],
"n_epochs": [30],
"learning_rate": [0.001, 0.0005],
}
device = "cuda" if torch.cuda.is_available() else "cpu"
print(f"running on {device}")
def run_cci_vae_shapes(
beta=1000.0,
c_max=36.0,
z_dim=30,
batch_size=16,
n_epochs=10,
learning_rate=0.0005,
seed=0,
folder=None,
n_classes=300,
architecture=None,
n_rotations=9,
n_x_translations=0,
n_y_translations=0,
distribution="gaussian",
):
"""Runs CCI VAE and variants on Simple Shapes. Note architecture kwarg is not used"""
if folder is None:
raise ValueError("Please provide an experiment folder")
print("saving results to ", folder)
shapes = datasets.SimpleShapes(
batch_size,
n_rotations=n_rotations,
n_x_translations=n_x_translations,
n_y_translations=n_y_translations,
n_classes=n_classes,
seed=seed,
pairs=False,
)
train_cci_vae_variants(
shapes, beta, c_max, z_dim, n_epochs, learning_rate, distribution, seed, folder
)
def run_cci_vae_mnist(
beta=1000.0,
c_max=36.0,
z_dim=30,
batch_size=16,
n_epochs=10,
learning_rate=0.0005,
seed=0,
folder=None,
n_classes=300,
proportion=0.01,
architecture=None,
n_rotations=9,
n_x_translations=0,
n_y_translations=0,
distribution="gaussian",
):
"""Runs CCI VAE and variants on MNIST. Note architecture kwarg is not used"""
if folder is None:
raise ValueError("Please provide an experiment folder")
print("saving results to ", folder)
mnist = datasets.ProjectiveMNIST(
batch_size,
seed=seed,
train_set_proportion=proportion,
test_set_proportion=1.0,
valid_set_proportion=proportion,
n_rotations=n_rotations,
n_x_translations=n_x_translations,
n_y_translations=n_y_translations,
pairs=False,
)
train_cci_vae_variants(
mnist, beta, c_max, z_dim, n_epochs, learning_rate, distribution, seed, folder
)
def run_cci_vae_single_digit_mnist(
beta=1000.0,
c_max=36.0,
z_dim=30,
batch_size=16,
n_epochs=10,
learning_rate=0.0005,
seed=0,
folder=None,
n_classes=300,
proportion=0.01,
architecture=None,
n_rotations=9,
n_x_translations=0,
n_y_translations=0,
distribution="gaussian",
):
"""Runs CCI VAE and variants on MNIST. Note architecture kwarg is not used"""
if folder is None:
raise ValueError("Please provide an experiment folder")
print("saving results to ", folder)
mnist = datasets.ProjectiveSingleDigitMNIST(
batch_size,
seed=seed,
train_set_proportion=proportion,
test_set_proportion=1.0,
valid_set_proportion=proportion,
n_rotations=n_rotations,
n_x_translations=n_x_translations,
n_y_translations=n_y_translations,
pairs=False,
)
train_cci_vae_variants(
mnist, beta, c_max, z_dim, n_epochs, learning_rate, distribution, seed, folder
)
def train_cci_vae_variants(
data, beta, c_max, z_dim, n_epochs, learning_rate, distribution, seed, folder
):
"""Trains CCI, Beta, and standard VAE"""
print("Training CCI VAE")
cci_vae_folder = os.path.join(folder, "cci_vae")
train_cci_vae(
data,
beta,
c_max,
z_dim,
n_epochs,
learning_rate,
distribution,
seed,
cci_vae_folder,
)
print("Training Beta VAE")
beta_vae_folder = os.path.join(folder, "beta_vae")
train_cci_vae(
data,
beta,
0.0,
z_dim,
n_epochs,
learning_rate,
distribution,
seed,
beta_vae_folder,
)
print("Training VAE")
vae_folder = os.path.join(folder, "vae")
train_cci_vae(
data, 1.0, 0.0, z_dim, n_epochs, learning_rate, distribution, seed, vae_folder
)
def run_autoencoder_shapes(
z_dim=1000,
batch_size=16,
n_epochs=30,
learning_rate=0.0005,
seed=0,
folder=None,
architecture="Linear",
n_classes=300,
n_rotations=9,
n_x_translations=0,
n_y_translations=0,
distribution=None,
use_latent_op=True,
):
if folder is None:
raise ValueError("Please provide an experiment folder")
print("saving results to ", folder)
shapes = datasets.SimpleShapes(
batch_size,
n_classes=n_classes,
seed=seed,
n_rotations=n_rotations,
n_x_translations=n_x_translations,
n_y_translations=n_y_translations,
)
if use_latent_op:
train_autoencoder(
shapes, z_dim, n_epochs, learning_rate, seed, folder, architecture
)
else:
train_standard_autoencoder(
shapes, z_dim, n_epochs, learning_rate, seed, folder, architecture
)
def run_autoencoder_mnist(
z_dim=1000,
batch_size=16,
n_epochs=2,
learning_rate=0.0005,
seed=0,
folder=None,
architecture="Linear",
proportion=0.01,
n_rotations=9,
n_x_translations=0,
n_y_translations=0,
distribution=None,
use_latent_op=True,
):
if folder is None:
raise ValueError("Please provide an experiment folder")
print("saving results to ", folder)
mnist = datasets.ProjectiveMNIST(
batch_size,
seed=seed,
train_set_proportion=proportion,
test_set_proportion=1.0,
valid_set_proportion=proportion,
n_rotations=n_rotations,
n_x_translations=n_x_translations,
n_y_translations=n_y_translations,
)
if use_latent_op:
print("using latent_op")
train_autoencoder(
mnist, z_dim, n_epochs, learning_rate, seed, folder, architecture
)
else:
train_standard_autoencoder(
mnist, z_dim, n_epochs, learning_rate, seed, folder, architecture
)
def train_standard_autoencoder(
data, z_dim, n_epochs, learning_rate, seed, folder, architecture
):
model = autoencoder.AutoEncoder(
data,
z_dim=z_dim,
n_epochs=n_epochs,
learning_rate=learning_rate,
encoder_type=architecture,
decoder_type=architecture,
device=device,
seed=seed,
)
model.run()
model.save_best_validation(os.path.join(folder, "standard-autoencoder"))
def train_autoencoder(data, z_dim, n_epochs, learning_rate, seed, folder, architecture):
model_disentangled_rotation = autoencoder.AutoEncoder(
data,
z_dim=z_dim,
n_epochs=n_epochs,
learning_rate=learning_rate,
latent_operator_name="DisentangledRotation",
encoder_type=architecture,
decoder_type=architecture,
device=device,
seed=seed,
)
model_disentangled_rotation.run()
model_disentangled_rotation.save_best_validation(
os.path.join(folder, "disentangled-operator")
)
model_shift_operator = autoencoder.AutoEncoder(
data,
z_dim=z_dim,
n_epochs=n_epochs,
learning_rate=learning_rate,
latent_operator_name="ShiftOperator",
encoder_type=architecture,
decoder_type=architecture,
device=device,
seed=seed,
)
model_shift_operator.run()
model_shift_operator.save_best_validation(os.path.join(folder, "shift-operator"))
def train_cci_vae(
data, beta, c_max, z_dim, n_epochs, learning_rate, distribution, seed, folder
):
cci_vae = cci_variational_autoencoder.CCIVariationalAutoEncoder(
data,
beta=beta,
c_max=c_max,
z_dim=z_dim,
seed=seed,
learning_rate=learning_rate,
n_epochs=n_epochs,
distribution=distribution,
)
cci_vae.train()
cci_vae.save_best_validation(folder)
def launch_single_job(experiment, base_dir, results_dir, **kwargs):
log_folder = base_dir + "%j"
executor = submitit.AutoExecutor(folder=log_folder)
# executor.update_parameters(timeout_min=600, gpus_per_node=1)
executor.update_parameters(
timeout_min=240, gpus_per_node=1,
)
job = executor.submit(experiment, folder=results_dir, **kwargs)
print("job id", job.job_id)
print(f"logging to: {base_dir + job.job_id}")
print(f"results stored at: {results_dir}")
result = job.result()
print(f"job result: {result}")
def launch_sweep(experiment, params, base_dir, experiment_dir):
log_folder = base_dir + "%j"
executor = submitit.AutoExecutor(folder=log_folder)
# executor.update_parameters(timeout_min=600, gpus_per_node=1)
executor.update_parameters(
timeout_min=600, gpus_per_node=1,
)
jobs = []
with executor.batch():
for i, param in enumerate(params):
print("running with param ", param)
param["folder"] = os.path.join(experiment_dir, f"{i}")
job = executor.submit(experiment, **param)
jobs.append(job)
print(f"launched {len(params)} jobs")
print("sweep id", jobs[0].job_id)
print(f"logging to: {base_dir}{jobs[0].job_id}")
results = [job.result() for job in jobs]
print(f"job results: {results}")
def get_params(args):
params = BASE_PARAMS
if args.data == "mnist":
params["batch_size"] = [8, 16, 32, 64]
elif args.data == "shapes":
params["batch_size"] = [4, 8, 16, 32]
if args.model == "cci_vae":
params["n_epochs"] = [10, 20, 50]
params["beta"] = [4.0, 10.0, 100.0, 1000.0]
params["z_dim"] = [10, 30]
return params
def get_param_combinations(params):
"""Returns a list of dictionaries with all combinations"""
keys, values = zip(*params.items())
params_combinations = [dict(zip(keys, v)) for v in itertools.product(*values)]
return params_combinations
def get_directories(args, cluster=False):
user = os.environ["USER"]
if cluster:
RESULTS_DIR = f"/checkpoint/{user}/Equivariance/"
base_dir = f"/checkpoint/{user}/jobs/{args.name}/"
else:
RESULTS_DIR = os.path.expanduser(
"~/Dropbox/FAIR/Projects/Equivariance/experiments/results"
)
base_dir = os.path.expanduser(
"~/Dropbox/FAIR/Projects/Equivariance/experiments/jobs/{args.name}/"
)
experiment_dir = os.path.join(RESULTS_DIR, args.name)
# clean experimental directory
if os.path.exists(experiment_dir):
shutil.rmtree(experiment_dir)
return base_dir, experiment_dir
def get_experiment_function(args):
experiments = {
"run_autoencoder_shapes": run_autoencoder_shapes,
"run_autoencoder_mnist": run_autoencoder_mnist,
"run_cci_vae_shapes": run_cci_vae_shapes,
"run_cci_vae_mnist": run_cci_vae_mnist,
"run_cci_vae_single_digit_mnist": run_cci_vae_mnist,
}
experiment = experiments[f"run_{args.model}_{args.data}"]
print(f"run_{args.model}_{args.data}")
if args.data == "shapes":
experiment = partial(experiment, n_classes=args.n_classes)
elif args.data in {"mnist", "single_digit_mnist"}:
experiment = partial(experiment, proportion=args.mnist_proportion)
else:
raise ValueError(f"dataset {args.data} not supported")
# standard autoencoder
if "autoencoder" == args.model and args.no_latent_op:
experiment = partial(experiment, use_latent_op=False)
n_rotations, n_x_translations, n_y_translations = get_n_transformations(args)
experiment = partial(
experiment,
n_rotations=n_rotations,
n_x_translations=n_x_translations,
n_y_translations=n_y_translations,
architecture=args.architecture,
z_dim=args.z_dim,
distribution=args.distribution,
)
return experiment
def get_n_transformations(args):
n_rotations, n_x_translations, n_y_translations = 0, 0, 0
n_transformations = 9
if args.transformation == "rotation":
n_rotations = n_transformations
if args.transformation == "shift_x":
n_x_translations = n_transformations
if args.transformation == "shift_y":
n_y_translations = n_transformations
return n_rotations, n_x_translations, n_y_translations
def init_argparse() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(
usage="python run_experiments --cluster",
description="runs experiments with specified parameters",
)
parser.add_argument("name", help="name of experiment")
parser.add_argument(
"--model",
help="model for experiments. Example: autoencoder, cci_vae",
default="autoencoder",
)
parser.add_argument(
"--architecture", help="name of autoencoder architecture", default="Linear",
)
parser.add_argument(
"--data",
help="dataset used for training: mnist, single_digit_mnist",
default="shapes",
)
parser.add_argument(
"--mnist_proportion",
help="proportion of mnist to use",
default=0.01,
type=float,
)
parser.add_argument(
"--n_classes",
help="number of classes to use for simple shapes",
default=300,
type=int,
)
parser.add_argument(
"--z_dim", help="dataset used for training", default=1000, type=int
)
parser.add_argument(
"--transformation",
choices=["rotation", "shift_x", "shift_y"],
type=str.lower,
default="rotation",
)
parser.add_argument(
"--distribution",
help="likelihood distribution used for computing loss in CCI VAE",
choices=["gaussian", "bernoulli"],
type=str.lower,
default="gaussian",
)
parser.add_argument("--beta", help="beta used for CCI VAE", default=1000, type=int)
parser.add_argument(
"--no_latent_op",
help="use standard autoencoder without latent operators",
action="store_true",
)
parser.add_argument("--cluster", action="store_true")
parser.add_argument("--sweep", action="store_true")
return parser
if __name__ == "__main__":
parser = init_argparse()
args = parser.parse_args()
experiment = get_experiment_function(args)
base_dir, experiment_dir = get_directories(args, cluster=args.cluster)
if args.cluster and args.sweep:
params = get_params(args)
params_combinations = get_param_combinations(params)
launch_sweep(experiment, params_combinations, base_dir, experiment_dir)
elif args.cluster:
launch_single_job(
experiment, base_dir, experiment_dir,
)
else:
print("running single local job")
experiment(folder=experiment_dir)
|
Addressing-the-Topological-Defects-of-Disentanglement-main
|
run_experiments_real.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
import torch
from torch import nn
from collections import OrderedDict
from abc import ABC
class ResNetExplorer(nn.Module):
"""
Loads a pre-trained model and hook on one of its layer
"""
def __init__(self, path_to_model="pytorch/vision:v0.6.0", model="resnet152"):
super().__init__()
self.pretrained_model = torch.hub.load(path_to_model, model, pretrained=True)
def create_full_model(self, layer_to_explore, layer_to_explore_size, image_size):
all_layers = dict(list(self.pretrained_model.named_children()))
all_keys = list(
all_layers.keys()
) # TODO: I am not sure the order is preserved ?
max_index = all_keys.index(layer_to_explore)
##### ENCODER
# take all layers up to the one we want to explore for the encoder
encoder_layers = [
(all_keys[i], all_layers[layer])
for i, layer in enumerate(all_layers)
if i <= max_index
]
layers = OrderedDict()
for layer in encoder_layers:
name = layer[0]
layers[name] = layer[1]
# create a new model with it (saves time during feed-forward if we take other layers than the last one)
self.fixed_encoder = nn.Sequential(layers)
##### Linear layer to learn the mapping
self.linear = nn.Linear(layer_to_explore_size, layer_to_explore_size)
##### DECODER
self.decoder = nn.Linear(layer_to_explore_size, image_size)
def forward(self, x):
z = self.fixed_encoder(x)
# feed flattened z to linear
z_prime = self.linear(z.view(x.size(0), -1))
x_dec = self.decoder(z_prime)
# sigmoid to have something between 0 and 1
x_dec = torch.sigmoid(x_dec)
# map to image shape
return x_dec.view(x.size())
class LinearEncoder(nn.Module):
def __init__(self, n_pixels, n_channels, z_dim):
super().__init__()
self.fc1 = nn.Linear(n_pixels ** 2 * n_channels, z_dim, bias=False)
def forward(self, x):
out = x.flatten(start_dim=1)
out = self.fc1(out)
return out
class LinearDecoder(nn.Module):
def __init__(self, n_pixels, n_channels, z_dim):
super().__init__()
self.n_pixels = n_pixels
self.n_channels = n_channels
self.fc1 = nn.Linear(z_dim, n_pixels ** 2 * n_channels, bias=False)
def forward(self, x):
out = self.fc1(x)
out = out.reshape(-1, self.n_channels, self.n_pixels, self.n_pixels)
return out
class ComplexLinearEncoder(nn.Module):
def __init__(self, n_pixels, n_channels, z_dim):
super().__init__()
self.fc1r = torch.nn.Linear(n_pixels ** 2 * n_channels, z_dim, bias=False)
self.fc1i = torch.nn.Linear(n_pixels ** 2 * n_channels, z_dim, bias=False)
def forward(self, x):
out = x.flatten(start_dim=1)
outr = self.fc1r(out)
outi = self.fc1i(out)
return (outr, outi)
class ComplexLinearDecoder(nn.Module):
def __init__(self, n_pixels, n_channels, z_dim):
super().__init__()
self.n_pixels = n_pixels
self.n_channels = n_channels
self.fc1r = nn.Linear(z_dim, n_pixels ** 2 * n_channels, bias=False)
self.fc1i = nn.Linear(z_dim, n_pixels ** 2 * n_channels, bias=False)
def forward(self, x):
r1 = self.fc1r(x[0])
r2 = -self.fc1i(x[1])
out_r = r1 + r2
out_r = out_r.reshape(-1, self.n_channels, self.n_pixels, self.n_pixels)
return out_r
class CCIEncoder(nn.Module):
def __init__(self, n_pixels, n_channels, z_dim):
super().__init__()
self.encoder = nn.Sequential(
nn.Conv2d(n_channels, n_pixels, kernel_size=4, stride=2),
nn.ReLU(),
nn.Conv2d(n_pixels, n_pixels, kernel_size=4, stride=2),
nn.ReLU(),
nn.Conv2d(n_pixels, n_pixels, kernel_size=4, stride=2),
nn.ReLU(),
nn.Conv2d(n_pixels, 256, kernel_size=1, stride=1),
Lambda(lambda x: x.view(x.size(0), -1)),
nn.Linear(256, z_dim),
)
def forward(self, x):
out = self.encoder(x)
return out
class CCIDecoder(nn.Module):
def __init__(self, n_pixels, n_channels, z_dim):
super().__init__()
self.decoder = nn.Sequential(
nn.Linear(z_dim, 256),
nn.ReLU(),
Lambda(lambda x: x.view(-1, 256, 1, 1)),
nn.ConvTranspose2d(256, 64, 4),
nn.ReLU(),
nn.ConvTranspose2d(64, 64, 4, 2, 1),
nn.ReLU(),
nn.ConvTranspose2d(64, n_pixels, 4, 2, 1),
nn.ReLU(),
nn.ConvTranspose2d(n_pixels, n_channels, 4, 2, 1),
Lambda(lambda x: x.view(x.size(0), -1)),
nn.Linear(32 * 32, n_pixels * n_pixels),
Lambda(lambda x: x.view(x.size(0), 1, n_pixels, n_pixels)),
)
def forward(self, x):
out = self.decoder(x)
return out
class NonLinearEncoder(nn.Module):
def __init__(self, n_pixels, n_chanels, z_dim):
super().__init__()
self.fc1 = nn.Linear(n_pixels ** 2, n_pixels // 2)
self.batch_norm = nn.BatchNorm1d(n_pixels // 2)
self.fc2 = nn.Linear(n_pixels // 2, z_dim)
def forward(self, x):
out = x.flatten(start_dim=1)
out = self.fc1(out)
out = self.batch_norm(out)
out = torch.relu(out)
out = self.fc2(out)
out = torch.relu(out)
return out
class NonLinearDecoder(nn.Module):
def __init__(self, n_pixels, n_channels, z_dim):
super().__init__()
self.n_channels = n_channels
self.n_pixels = n_pixels
self.fc1 = nn.Linear(z_dim, (n_pixels ** 2) // 2)
self.batch_norm = nn.BatchNorm1d((n_pixels ** 2) // 2)
self.fc2 = nn.Linear((n_pixels ** 2) // 2, n_pixels ** 2)
def forward(self, x):
out = self.fc1(x)
out = self.batch_norm(out)
out = torch.relu(out)
# reshape
out = self.fc2(out)
out = torch.relu(out)
out = out.reshape(-1, self.n_channels, self.n_pixels, self.n_pixels)
return out
class VAEBase(ABC):
@staticmethod
def reparameterize(mu, log_var):
"""Returns z_sample from mu, var"""
std = torch.exp(log_var / 2)
# z_sample = torch.normal(mu, std)
# eps = Variable(torch.randn_like(std))
eps = torch.randn_like(std)
z_sample = mu + eps.mul(std)
return z_sample
@staticmethod
def latent_sample(mu, log_var, num_std):
"""Generates sample based on mu, var that's num_std away from mean"""
std = torch.exp(log_var / 2)
z_sample = (num_std * std).add(mu)
return z_sample
class LinearCCIVAE(nn.Module, VAEBase):
def __init__(self, n_pixels, n_channels, z_dim):
super().__init__()
self.z_dim = z_dim
self.encoder = LinearEncoder(n_pixels, n_channels, 2 * z_dim)
self.decoder = LinearDecoder(n_pixels, n_channels, z_dim)
def forward(self, x):
z_dist = self.encoder(x)
mu, log_var = z_dist[:, : self.z_dim], z_dist[:, self.z_dim :]
# reparameterize
z_sample = LinearCCIVAE.reparameterize(mu, log_var)
out = self.decoder(z_sample)
return out, mu, log_var
class Lambda(nn.Module):
def __init__(self, func):
super().__init__()
self.func = func
def forward(self, x):
return self.func(x)
class CCIVAE(nn.Module, VAEBase):
"""Model Architecture from CCI-VAE paper
https://arxiv.org/abs/1804.03599
Encoder:
4 convolutional layers, each with 32 channels, 4x4 kernels, and a stride of 2.
Followed by 2 fully connected layers, each of 256 units
Latent Space: 20 units (10 for mean, 10 for variance)
Decoder:
transpose of encoder with ReLU activations
"""
def __init__(self, n_pixels, n_channels, z_dim, distribution="gaussian"):
super().__init__()
self.z_dim = z_dim
self.n_channels = n_channels
self.distribution = distribution
self.encoder = nn.Sequential(
nn.Conv2d(n_channels, n_pixels, kernel_size=4, stride=2),
nn.ReLU(),
nn.Conv2d(n_pixels, n_pixels, kernel_size=4, stride=2),
nn.ReLU(),
nn.Conv2d(n_pixels, n_pixels, kernel_size=4, stride=2),
nn.ReLU(),
nn.Conv2d(n_pixels, 256, kernel_size=1, stride=1),
nn.ReLU(),
Lambda(lambda x: x.view(x.size(0), -1)),
nn.Linear(256, 2 * z_dim),
)
self.decoder = nn.Sequential(
nn.Linear(z_dim, 256),
nn.ReLU(),
Lambda(lambda x: x.view(-1, 256, 1, 1)),
nn.ConvTranspose2d(256, 64, 4),
nn.ReLU(),
nn.ConvTranspose2d(64, 64, 4, 2, 1),
nn.ReLU(),
nn.ConvTranspose2d(64, n_pixels, 4, 2, 1),
nn.ReLU(),
nn.ConvTranspose2d(n_pixels, n_channels, 4, 2, 1),
Lambda(lambda x: x.view(x.size(0), -1)),
nn.ReLU(),
nn.Linear(32 * 32, n_pixels * n_pixels),
Lambda(lambda x: x.view(x.size(0), 1, n_pixels, n_pixels)),
nn.Sigmoid(),
)
def forward(self, x):
z_dist = self.encoder(x)
mu, log_var = z_dist[:, : self.z_dim], z_dist[:, self.z_dim :]
# tanh log_var didn't seem to help
# log_var = torch.tanh(log_var)
z_sample = CCIVAE.reparameterize(mu, log_var)
out = self.decoder(z_sample)
return out, mu, log_var
|
Addressing-the-Topological-Defects-of-Disentanglement-main
|
models.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
import torch
import numpy as np
import functools
import pdb
class ShiftOperator:
"""Performs discrete shift based on n_rotations."""
def __init__(self, n_rotations, device):
self.n_rotations = n_rotations
self.device = device
self.translation_matrices = self.generate_shift_operator_matrices(
n_rotations + 1
)
def __call__(self, z_batch, angles):
"""Interface for Autoencoder"""
return self.translate_batch(z_batch, angles)
def translate_batch(self, z_batch, angles):
"""Applies shift operator to batch
Args:
angles (array of floats): counter-clockwise rotation in degrees.
"""
smallest_angle = 360 / (self.n_rotations + 1)
if angles.dim() > 1:
shifts = angles[:, 0] / smallest_angle
else:
shifts = angles / smallest_angle
try:
translated_batch = [
self.translate(z, shifts[i].long()) for i, z in enumerate(z_batch)
]
except IndexError as e:
print("===ANGLES ARE", angles)
raise e
return torch.stack(translated_batch)
def translate(self, z, shift):
"""Translate latent
Args:
z (1-dim tensor): latent vector
shift (int): amount by which to shift.
shift of 0 corresponds to the identity.
"""
# reshape into 2D tensor
z_2d = z.reshape(self.n_rotations + 1, -1)
translation_matrix = self.translation_matrices[shift]
# move to cpu if tensor is cpu. Used for eval
if not z_2d.is_cuda:
translation_matrix = translation_matrix.cpu()
# translation
z_2d_shifted = translation_matrix.matmul(z_2d)
# reshape back
z_shifted = z_2d_shifted.reshape(z.shape)
return z_shifted
def generate_shift_operator_matrices(self, n_rotations):
"""Generates family of shift operator matrices"""
translation_matrix = np.zeros((n_rotations, n_rotations))
for i in range(n_rotations):
translation_matrix[i, (i + 1) % n_rotations] = 1
translation_matrices = [np.eye(n_rotations, n_rotations)]
T = np.eye(n_rotations, n_rotations)
for i in range(n_rotations - 1):
T = np.dot(translation_matrix, T)
translation_matrices.append(T)
translation_matrices = np.array(translation_matrices)
_translation_matrices = torch.tensor(
translation_matrices, dtype=torch.float32, device=self.device,
)
return _translation_matrices
class ComplexShiftOperator:
"""Performs discrete shift based on n_rotations"""
def __init__(self, cardinals, z_dim, device, unique_transfo=False, index=None):
self.cardinals = cardinals
self.z_dim = z_dim
self.device = device
self.translation_matrices = self.generate_translation_matrices(
self.cardinals, self.z_dim
)
if unique_transfo:
if (np.array(cardinals)>1).sum()==1:
self.index = int((np.array(cardinals)>1).nonzero()[0])
elif (np.array(cardinals)>1).sum()>1:
if index is None:
print("Must provide the index of the operator !")
else:
self.index = index
self.translate_batch = self.translate_batch_unique
else:
self.translate_batch = self.translate_batch_multiple
def __call__(self, z_batch, shifts):
"""Interface for Autoencoder"""
z_batch_r, z_batch_i = z_batch
return self.translate_batch(z_batch_r, z_batch_i, shifts)
def translate_batch_unique(self, z_batch_r, z_batch_i, shifts):
"""Translates batch in the case of a unique transformations (Faster)"""
tr = self.translation_matrices[self.index][0][shifts[:, 0]]
ti = self.translation_matrices[self.index][1][shifts[:, 0]]
z_batch_r_shifted = tr * z_batch_r - ti * z_batch_i
z_batch_i_shifted = tr * z_batch_i + ti * z_batch_r
return (
z_batch_r_shifted,
z_batch_i_shifted,
)
def translate_batch_multiple(self, z_batch_r, z_batch_i, shifts):
"""Translates batch in the case of multiple transformations"""
(Mr, Mi) = self.build_multipliers(shifts)
z_batch_r_shifted = Mr * z_batch_r - Mi * z_batch_i
z_batch_i_shifted = Mr * z_batch_i + Mi * z_batch_r
return (
z_batch_r_shifted,
z_batch_i_shifted,
)
def build_multipliers(self, shifts):
size_batch, n_transfo = shifts.shape
Mr = torch.ones((size_batch, self.z_dim), device=self.device)
Mi = torch.zeros((size_batch, self.z_dim), device=self.device)
for i in range(n_transfo):
tr = self.translation_matrices[i][0][shifts[:, i]]
ti = self.translation_matrices[i][1][shifts[:, i]]
Mr = Mr * tr - Mi * ti
Mi = Mr * ti + Mi * tr
return (Mr, Mi)
def translate(self, zr, zi, shift):
"""Translate latent
Args:
z (1-dim tensor): latent vector
shift (int): amount by which to shift
"""
for i in range(len(shift)):
tr = self.translation_matrices[i][0][shift[i]]
ti = self.translation_matrices[i][1][shift[i]]
zr = zr * tr - zi * ti
zi = zi * tr + zr * ti
return (zr, zi)
def generate_translation_matrices(self, cardinals, z_dim):
"""Generates family of translation matrices"""
def DFT_matrix(cardinal, z_dim):
i, j = np.meshgrid(np.arange(cardinal), np.arange(cardinal))
omega = np.exp(2 * np.pi * 1j / cardinal)
W = np.power(omega, i * j)
return W
# Loop over all transformations that can happen to the sample
XYZ = []
for i, t in enumerate(cardinals):
K = self.cardinals[i]
X_i = np.arange(K)
if z_dim % K: # creates in shift operator an unfinished cycle
second_dim = (
int(np.floor(z_dim / K)) + 1
) # TODO: not sure this is the right way
else: # creates in shift operator a finished cycle
second_dim = int(z_dim / K)
X_i = np.tile(X_i.flatten(), (second_dim))[:z_dim]
XYZ.append(X_i)
_all_translation_matrices = list()
for i in range(len(cardinals)):
translation_matrices = DFT_matrix(cardinals[i], z_dim)
translation_matrices = translation_matrices[:, XYZ[i]]
translation_matrices_r = np.real(translation_matrices)
translation_matrices_i = np.imag(translation_matrices)
_translation_matrices_r = torch.tensor(
translation_matrices_r, dtype=torch.float32, device=self.device,
)
_translation_matrices_i = torch.tensor(
translation_matrices_i, dtype=torch.float32, device=self.device,
)
_all_translation_matrices.append(
(_translation_matrices_r, _translation_matrices_i,)
)
return _all_translation_matrices
class DisentangledRotation:
"""Performs rotation using rotation matrix of the form:
[cos, -sin], [sin, cos]
Args:
n_rotations (int): discrete rotations needed before identity is reached
"""
def __init__(self, n_rotations, device):
self.n_rotations = n_rotations
self.device = device
def __call__(self, z, angles):
"""Interface for Autoencoder"""
return self.rotate_batch(z, angles)
def rotate_batch(self, x_batch, angles):
"""Rotates batch"""
rotated_batch = []
if angles.dim() > 1:
angles = angles[:, 0]
else:
angles = angles
for i, x in enumerate(x_batch):
x_rotated = self.rotate(x, angles[i])
rotated_batch.append(x_rotated)
return torch.stack(rotated_batch)
def rotate(self, x, angle):
"""Clockwise rotation or translation
Args:
x (1D tensor): representing latent vector
angle (float): rotation angle in degrees
Returns: rotated tensor of same shape as x
"""
if x.dim() != 1:
raise ValueError(f"x must be a flattened 1D vector. Got shape {x.shape}")
rotation_matrix = self.get_rotation_matrix(angle, x.shape[0])
if not x.is_cuda:
rotation_matrix = rotation_matrix.cpu()
x_rotated = rotation_matrix.matmul(x)
return x_rotated
@functools.lru_cache()
def get_rotation_matrix(self, angle, dim):
"""Angle is the rotation angle in degrees.
Returns rotation matrix that operates on first two dimensions
"""
rotation_matrix = torch.diag(torch.ones(dim, device=self.device))
if angle == 0.0:
return rotation_matrix
radians = (angle / 360) * torch.tensor(2 * np.pi)
matrix_2d = torch.tensor(
[
[torch.cos(radians), -torch.sin(radians)],
[torch.sin(radians), torch.cos(radians)],
]
)
rotation_matrix[0][0] = matrix_2d[0][0]
rotation_matrix[0][1] = matrix_2d[0][1]
rotation_matrix[1][0] = matrix_2d[1][0]
rotation_matrix[1][1] = matrix_2d[1][1]
return rotation_matrix.to(device=self.device)
|
Addressing-the-Topological-Defects-of-Disentanglement-main
|
latent_operators.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
|
Addressing-the-Topological-Defects-of-Disentanglement-main
|
__init__.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
import torch
import numpy as np
import random
import matplotlib
import matplotlib.pyplot as plt
import models
import latent_operators
from datasets import datasets
from datasets.data_utils import x_to_image
import plot
import pdb
import os
import shutil
eps = 1e-20
class WeaklyComplexAutoEncoder:
"""Trains a weakly supervised shift operator.
Args:
data (AbstractDataset): contains train and test loaders with angles
z_dim (int): dimension of latent space
seed (int): for random number generation
translation (bool): if true, uses an offset identity matrix for rotation
"""
def __init__(
self,
data,
z_dim=405,
seed=0,
encoder_type="ComplexLinear",
decoder_type="ComplexLinear",
transformation_type=None,
device="cpu",
temperature=1.0,
output_directory="output",
save_name="",
use_softmax=1,
n_rotations = 0,
n_x_translations = 0,
n_y_translations = 0,
scaling_factors = (1, )
):
self.z_dim = z_dim
self.seed = seed
self.set_seed()
self.data = data
self.device = device
self.encoder = getattr(models, encoder_type + "Encoder")(
self.data.n_pixels, self.data.n_channels, z_dim
).to(self.device)
self.decoder = getattr(models, decoder_type + "Decoder")(
self.data.n_pixels, self.data.n_channels, z_dim
).to(self.device)
cardinals = [
n_rotations + 1,
n_x_translations + 1,
n_y_translations + 1,
len(scaling_factors),
]
self.cardinals = cardinals
# SO FAR, THIS MODEL ONLY WORKS FOR 1 TRANSFO
# We grab which one with the following line
assert (np.array(cardinals) > 1).sum()==1
for i, cardinal in enumerate(cardinals):
if cardinal > 1:
self.K = cardinal
self.transfo_index = i
# function used for transformation
self.use_softmax = use_softmax
self.transform = self.get_transformation(transformation_type)
self.temperature = temperature
self.output_dir = output_directory
self.save_name = save_name
self.best_epoch = 0
self.best_mse = 0
def set_seed(self):
"""Sets seed for random number generation"""
torch.manual_seed(self.seed)
np.random.seed(self.seed)
random.seed(self.seed)
# Generate Dataset
torch.autograd.set_detect_anomaly(True)
def get_transformation(self, name):
"""Returns function to performance transformation based name"""
if name is None:
return None
transformation = getattr(latent_operators, name)
return transformation(self.cardinals, self.z_dim, self.device, unique_transfo = True)
def train(self, loss_func, learning_rate, n_epochs, log_frequency):
self.encoder.train()
self.decoder.train()
params = list(self.encoder.parameters()) + list(self.decoder.parameters())
optimizer = torch.optim.Adam(params, lr=learning_rate)
train_losses = torch.FloatTensor(n_epochs)
valid_losses = torch.FloatTensor(n_epochs)
best_mse = np.inf
N_pairs = len(self.data.train_loader.dataset)
for epoch in range(n_epochs):
epoch_loss = 0
for i, (x1, x2, angles) in enumerate(self.data.train_loader):
x1 = x1.to(device=self.device)
x2 = x2.to(device=self.device)
optimizer.zero_grad()
loss = loss_func(x1, x2, angles)
loss.backward()
optimizer.step()
epoch_loss += loss.item() * x1.size(0)
epoch_loss = epoch_loss / N_pairs
print(f"Epoch {epoch} Train loss: {epoch_loss:0.3e}")
valid_mse = (
self.compute_mean_loss(loss_func, self.data.valid_loader)
.detach()
.item()
)
# train_mse = (
# self.compute_mean_loss(loss_func, self.data.train_loader)
# .detach()
# .item()
# )
# train_losses[epoch] = train_mse
train_losses[epoch] = epoch_loss
if valid_mse < best_mse:
self.update_state(mse=valid_mse, epoch=epoch)
best_mse = valid_mse
file_name = "checkpoint_{}.pth.tar".format(self.save_name)
self.save_best_checkpoint(
out_dir=self.output_dir,
file_name=file_name,
optimizer_state_dict=optimizer.state_dict(),
)
print(f"Epoch {epoch} validation loss: {valid_mse:0.3e}")
valid_losses[epoch] = valid_mse
return train_losses.detach().numpy(), valid_losses.detach().numpy()
def ifft(self, cps):
second_dim = cps.size(1)
K = len(self.transform.translation_matrices[self.transfo_index][0])
cps_r = cps[..., 0].to(device=self.device)
cps_i = cps[..., 1].to(device=self.device)
tr_r = self.transform.translation_matrices[self.transfo_index][0]
tr_i = self.transform.translation_matrices[self.transfo_index][1]
alternative = cps_r[:, None, ...] * tr_r - cps_i[:, None, ...] * tr_i
alternative = alternative.mean(2) # mean over frequencies
return alternative
def reconstruct_x1(self, x1):
"""Reconstructs x1 using model"""
self.encoder.eval()
self.decoder.eval()
x1 = x1.to(device=self.device)
with torch.no_grad():
z1 = self.encoder(x1)
x1_reconstruction_r = self.decoder(z1)
return x1_reconstruction_r
def reconstruct_x2(self, x1, x2, param=None):
"""Reconstructs x2 using model and latent transformation"""
self.encoder.eval()
self.decoder.eval()
x1 = x1.to(device=self.device)
x2 = x2.to(device=self.device)
batch_size = x1.size(0)
with torch.no_grad():
z1 = self.encoder(x1)
z2 = self.encoder(x2)
angles_probas = self.compute_angles_probas(x1, x2, z1, z2)
predicted_angle = angles_probas.detach().argmax(-1, keepdims=True)
z_transformed = self.transform(z1, predicted_angle)
x2_reconstruction_r = self.decoder(z_transformed)
return x2_reconstruction_r
def plot_multiple_transformations(self, param_name='angle', indices=None, train_set=False, save_name=None):
"""Plots all rotated reconstructions for given samples"""
if indices is None:
n_samples = min(len(self.data.X_orig_train), len(self.data.X_orig_test))
indices = np.random.randint(low=0, high=n_samples, size=5)
X = (
self.data.X_orig_train[indices]
if train_set
else self.data.X_orig_test[indices]
).float()
title = (
"Translations" if param_name=='angle' != "angle" else "Rotations"
)
plot.plot_transformations_complex(
X,
self,
title,
save_name=save_name,
param_name=param_name,
supervised=False,
)
def plot_x1_reconstructions(
self, indices=[10, 2092, 10299, 13290], train_set=False, save_name=None
):
"""Plots x1 autoencoder reconstruction from z1.
Args:
pairs (datasets.Pairs): contains x1, x2, and params.
model (function): callable f(x1) = x1_reconstruction
indices (list of ints): indices for samples to plot
train_set (bool): if true title is plotted with train otherwise test.
save_name (str): indicates path where images should be saved.
"""
pairs = self.data.X_train if train_set else self.data.X_test
plot.plot_x1_reconstructions(
pairs, self.reconstruct_x1, indices, train_set, save_name
)
def plot_x2_reconstructions(
self, indices=[10, 2092, 10299, 13290], train_set=False, save_name=None
):
"""Plots x1, x2 and x2 autoencoder reconstruction from z1 rotated.
Args:
pairs (datasets.Pairs): contains x1, x2, and params.
model (function): callable f(x1) = x1_reconstruction
indices (list of ints): indices for samples to plot
train_set (bool): if true title is plotted with train otherwise test.
save_name (str): indicates path where images should be saved.
"""
pairs = self.data.X_train if train_set else self.data.X_test
plot.plot_x2_reconstructions(
pairs, self.reconstruct_x2, indices, train_set, save_name
)
def compute_angles_probas(self, x1, x2, z1, z2):
cps = self.computes_cross_power_spectrum(z1[0], z1[1], z2[0], z2[1])
invfs_alter = self.ifft(cps)
angles_probas = invfs_alter
return angles_probas
def reconstruction_mse_transformed_z1_weak(self, x1, x2, angles, use_argmax=False):
"""Computes reconstruction MSE of x1 from z1 + x2 from transformed(z1), not using ground-truth angles"""
criterion = torch.nn.MSELoss(reduction="none")
batch_size = x1.size(0)
z1 = self.encoder(x1)
z2 = self.encoder(x2)
prod_size = np.prod(x1.size())
x1_reconstruction_r = self.decoder(z1)
x1_reconstruction_loss = criterion(x1_reconstruction_r, x1)
x1_reconstruction_loss = x1_reconstruction_loss.mean()
# TODO this is not adapted to product of shift operators, it's looking only at the 1st cardinal
# Transform according to all possible angles, weighted
angles_probas = self.compute_angles_probas(x1, x2, z1, z2)
if use_argmax:
predicted_angle = angles_probas.detach().argmax(
-1, keepdims=True
)
z_transformed = self.transform(z1, predicted_angle)
x2_reconstruction_r = self.decoder(z_transformed)
x2_reconstruction_loss = criterion(x2_reconstruction_r, x2)
x2_reconstruction_loss = x2_reconstruction_loss.mean()
else:
all_angles = torch.arange(self.K).repeat(1, batch_size).view(-1, 1)
temp = self.temperature
mask = torch.softmax(angles_probas / temp, dim=-1)
repeat_z1 = (
z1[0][:, None, :].repeat(1, self.K, 1).view(batch_size * self.K, -1),
z1[1][:, None, :].repeat(1, self.K, 1).view(batch_size * self.K, -1),
)
x2_repeat = (
x2[:, None, ...]
.repeat(1, self.K, 1, 1, 1)
.view(batch_size * self.K, x2.size(1), x2.size(2), x2.size(3))
)
z_transformed = self.transform(repeat_z1, all_angles)
x2_reconstruction_r = self.decoder(z_transformed)
x2_reconstruction_transformed_loss = (
criterion(x2_reconstruction_r, x2_repeat)
.sum((1, 2, 3)) # sums over image dim
.view(batch_size, -1)
)
x2_reconstruction_loss = (mask * x2_reconstruction_transformed_loss).sum() / prod_size
loss = x1_reconstruction_loss + x2_reconstruction_loss
return loss
def computes_cross_power_spectrum(
self, z_batch_r1, z_batch_i1, z_batch_r2, z_batch_i2
):
"""Computes Cross Power spectrum (no FFT) """
batch_size = z_batch_r1.size(0)
z1z2_batch_r = (
z_batch_r1 * z_batch_r2 + z_batch_i1 * z_batch_i2
) # recall we use the conjugate of z_batch_2, hence the + here
z1z2_batch_i = (
-z_batch_r1 * z_batch_i2 + z_batch_i1 * z_batch_r2
) # recall we use the conjugate of z_batch_2, hence the - in front here
norm_z1z2_batch = ((z1z2_batch_r ** 2 + z1z2_batch_i ** 2)) ** 0.5
cps_r = z1z2_batch_r / norm_z1z2_batch
cps_i = z1z2_batch_i / norm_z1z2_batch
cps = torch.cat([cps_r[..., None], cps_i[..., None]], -1)
return cps
def compute_test_loss(self, loss_func, data_loader):
"""Computes RMSE based on given loss function."""
self.encoder.eval()
self.decoder.eval()
losses = []
N = 0
with torch.no_grad():
for i, (x1, x2, angles) in enumerate(data_loader):
x1 = x1.to(device=self.device)
x2 = x2.to(device=self.device)
bs = x1.size(0)
loss_batch = loss_func(x1, x2, angles, True)*bs
N += bs
losses.append(loss_batch)
test_loss = torch.stack(losses).sum() / float(N)
self.encoder.train()
self.decoder.train()
return test_loss
def compute_mean_loss(self, loss_func, data_loader):
"""Computes RMSE based on given loss function."""
self.encoder.eval()
self.decoder.eval()
losses = []
with torch.no_grad():
for i, (x1, x2, angles) in enumerate(data_loader):
x1 = x1.to(device=self.device)
x2 = x2.to(device=self.device)
loss_batch = loss_func(x1, x2, angles, True)
losses.append(loss_batch)
mean_loss = torch.stack(losses).mean()
self.encoder.train()
self.decoder.train()
return mean_loss
def run(
self, learning_rate=0.0005, n_epochs=10, log_frequency=50
):
"""Runs experiment for autoencoder reconstruction."""
loss_func = self.reconstruction_mse_transformed_z1_weak
train_loss, valid_loss = self.train(
loss_func, learning_rate, n_epochs, log_frequency
)
train_mse = self.compute_mean_loss(loss_func, self.data.train_loader)
print(f"Train MSE: {train_mse}")
valid_mse = self.compute_mean_loss(loss_func, self.data.valid_loader)
print(f"Valid MSE: {valid_mse}")
test_mse = self.compute_test_loss(loss_func, self.data.test_loader_batch_100)
print(f"Test MSE: {test_mse}")
return train_loss, valid_loss, train_mse, valid_mse, test_mse
def update_state(self, mse, epoch):
self.best_mse = mse
self.best_epoch = epoch
def load_model(self, path_to_checkpoint):
checkpoint = torch.load(path_to_checkpoint)
self.best_epoch = checkpoint["best_epoch"]
self.encoder.load_state_dict(checkpoint["encoder_state_dict"])
self.decoder.load_state_dict(checkpoint["decoder_state_dict"])
self.best_mse = checkpoint["best_mse"]
return checkpoint["best_mse"], checkpoint["best_epoch"]
def get_current_state(self):
return {
"encoder_state_dict": self.encoder.state_dict(),
"decoder_state_dict": self.decoder.state_dict(),
"best_epoch": self.best_epoch,
"best_mse": self.best_mse,
}
def save_best_checkpoint(self, out_dir, file_name, optimizer_state_dict):
"""
:param file_name: filename to save checkpoint in.
:param optimizer_state_dict: state of the optimizer.
:return: str to path where the model is saved.
"""
state = self.get_current_state()
state["optimizer_state_dict"] = optimizer_state_dict
best_path = os.path.join(out_dir, "best_" + file_name)
torch.save(state, best_path)
|
Addressing-the-Topological-Defects-of-Disentanglement-main
|
weakly_complex_shift_autoencoder.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
import copy
import torch
import json
import os
import random
import numpy as np
import models
import latent_operators
import plot
from datasets import datasets, transformations
class AutoEncoder:
"""Trains an autoencoder on rotated shapes.
Args:
data (AbstractDataset): contains train and test loaders with transformation params
z_dim (int): dimension of latent space
seed (int): for random number generation
translation (bool): if true, uses an offset identity matrix for rotation
shift_x (bool): use shift values instead of angles in supervision.
"""
def __init__(
self,
data,
z_dim=700,
seed=0,
encoder_type="Linear",
decoder_type="Linear",
latent_operator_name=None,
device="cpu",
learning_rate=0.0005,
n_epochs=5,
):
self.z_dim = z_dim
self.seed = seed
self.set_seed()
self.data = data
self.device = device
self.encoder_type = encoder_type
self.decoder_type = decoder_type
self.encoder = getattr(models, encoder_type + "Encoder")(
self.data.n_pixels, self.data.n_channels, z_dim
).to(self.device)
self.decoder = getattr(models, decoder_type + "Decoder")(
self.data.n_pixels, self.data.n_channels, z_dim
).to(self.device)
self.encoder_best_valid = self.encoder
self.decoder_best_valid = self.decoder
self.learning_rate = learning_rate
self.n_epochs = n_epochs
self.transformation_param_name = self.get_transformation_param_name()
# function used for latent transformation
self.use_latent_op = False if latent_operator_name is None else True
self.latent_operator_name = latent_operator_name
self.latent_operator = self.get_latent_operator(latent_operator_name)
self.train_losses = []
self.valid_losses = []
self.final_test_loss = None
def __repr__(self):
model = {
"encoder_type": self.encoder_type,
"decoder_type": self.decoder_type,
"z_dim": self.z_dim,
"latent_operator": self.latent_operator_name,
"batch_size": self.data.batch_size,
"learning_rate": self.learning_rate,
"n_epochs": self.n_epochs,
"data": str(self.data),
}
return json.dumps(model)
def save(self, path, indices=None):
os.makedirs(path, exist_ok=True)
self.save_model_configs(path)
self.save_models(path)
self.save_losses(path)
self.save_plots(path)
def save_model_configs(self, path):
model_configs_str = self.__repr__()
model_configs = json.loads(model_configs_str)
file_path = os.path.join(path, "model_configs.json")
with open(file_path, "w") as outfile:
json.dump(model_configs, outfile)
def save_models(self, path):
encoder_path = os.path.join(path, "encoder.pt")
torch.save(self.encoder.state_dict(), encoder_path)
decoder_path = os.path.join(path, "decoder.pt")
torch.save(self.decoder.state_dict(), decoder_path)
def load_models(self, path, device="cpu"):
self.encoder.load_state_dict(
torch.load(os.path.join(path, "encoder.pt"), map_location=device)
)
self.decoder.load_state_dict(
torch.load(os.path.join(path, "decoder.pt"), map_location=device)
)
def save_losses(self, path):
file_path = os.path.join(path, "train_losses.npy")
np.save(file_path, self.train_losses)
file_path = os.path.join(path, "valid_losses.npy")
np.save(file_path, self.valid_losses)
file_path = os.path.join(path, "test_loss.npy")
np.save(file_path, self.final_test_loss)
def save_plots(self, path):
for train_set in [True, False]:
set_name = "train" if train_set else "test"
x1_plot_path = os.path.join(path, f"x1_{set_name}_reconstructions")
self.plot_x1_reconstructions(save_name=x1_plot_path, train_set=train_set)
# store x2 reconstructions only when using supervised latent operator
if self.use_latent_op:
x2_plot_path = os.path.join(path, f"x2_{set_name}_reconstructions")
self.plot_x2_reconstructions(
save_name=x2_plot_path, train_set=train_set
)
transformation_name = (
"translations"
if self.transformation_param_name != "angle"
else "rotations"
)
multiple_rotations_path = os.path.join(
path, f"x_{set_name}_{transformation_name}"
)
self.plot_multiple_rotations(
save_name=multiple_rotations_path, train_set=train_set
)
def save_best_validation(self, path, indices=None):
self.encoder = self.encoder_best_valid
self.decoder = self.decoder_best_valid
self.save(path, indices=indices)
def set_seed(self):
"""Sets seed for random number generation"""
torch.manual_seed(self.seed)
np.random.seed(self.seed)
random.seed(self.seed)
# Generate Dataset
torch.autograd.set_detect_anomaly(True)
def get_transformation_param_name(self):
"""Returns the parameter used for transformation"""
if self.data.n_rotations > 1:
return "angle"
elif self.data.n_x_translations > 1:
return "shift_x"
elif self.data.n_y_translations > 1:
return "shift_y"
else:
raise ValueError("No transformation found")
def get_latent_operator(self, name):
"""Returns function to performance transformation based name"""
if name is None:
return None
latent_operator = getattr(latent_operators, name)
return latent_operator(self.n_transformations, self.device)
@property
def n_transformations(self):
if self.data.n_rotations > 1:
return self.data.n_rotations
elif self.data.n_x_translations > 1:
return self.data.n_x_translations
elif self.data.n_y_translations > 1:
return self.data.n_y_translations
else:
raise ValueError("No transformation found")
def train(self, loss_func, stop_early=False, log_frequency=None):
self.encoder.train().to(self.device)
self.decoder.train().to(self.device)
params = list(self.encoder.parameters()) + list(self.decoder.parameters())
optimizer = torch.optim.Adam(params, lr=self.learning_rate)
if log_frequency is None:
log_frequency = self.set_log_frequency()
for epoch in range(self.n_epochs):
running_loss = 0.0
print(f"Epoch {epoch}")
self.log_train_val_loss(loss_func)
for i, (x1, x2, params) in enumerate(self.data.train_loader):
print(f"Training batch {i}", end="\r")
x1 = x1.to(device=self.device)
x2 = x2.to(device=self.device)
angles = self.get_angles(params)
angles = angles.to(device=self.device)
optimizer.zero_grad()
loss = loss_func(x1, x2, angles)
loss.backward()
optimizer.step()
running_loss += loss.item()
if i % log_frequency == (log_frequency - 1):
print(f"Running loss: {running_loss / log_frequency:0.3e}")
running_loss = 0.0
if stop_early:
return None
train_loss, valid_loss = self.log_train_val_loss(loss_func)
self.copy_models_validation(valid_loss)
# test loss per sample (using batch size 1)
self.final_test_loss = self.compute_total_loss(
self.data.test_loader_batch_1, loss_func
)
print(f"Test Loss: {self.final_test_loss:0.3e}")
def set_log_frequency(self):
frequency = len(self.data.train_loader) // 10
return frequency
def copy_models_validation(self, valid_loss):
"""Copies models with best validation"""
if valid_loss < np.min(self.valid_losses):
self.encoder_best_valid = copy.deepcopy(self.encoder)
self.decoder_best_valid = copy.deepcopy(self.decoder)
def log_train_val_loss(self, loss_func, show_print=True):
train_loss = self.compute_total_loss(self.data.train_loader, loss_func)
valid_loss = self.compute_total_loss(self.data.valid_loader, loss_func)
self.train_losses.append(train_loss)
self.valid_losses.append(valid_loss)
if show_print:
print(f"Total loss train: {train_loss:0.3e} validation: {valid_loss:0.3e}")
return train_loss, valid_loss
def compute_total_loss(self, loader, loss_func):
self.encoder.eval()
self.decoder.eval()
losses = []
with torch.no_grad():
for x1, x2, params in loader:
x1 = x1.to(device=self.device)
x2 = x2.to(device=self.device)
angles = self.get_angles(params)
angles = angles.to(device=self.device)
losses.append(loss_func(x1, x2, angles).cpu())
mean_loss = torch.stack(losses).mean()
self.encoder.train()
self.decoder.train()
return mean_loss
def reconstruction_mse_x1(self, x1, x2, angles):
"""Computes MSE x1 reconstruction loss"""
criterion = torch.nn.MSELoss()
z = self.encoder(x1)
x1_reconstruction = self.decoder(z)
loss = criterion(x1_reconstruction, x1)
return loss
def reconstruction_mse_transformed_z1(self, x1, x2, angles):
"""Computes reconstruction MSE of x1 from z1 + x2 from transformed(z1)"""
criterion = torch.nn.MSELoss()
z = self.encoder(x1)
x1_reconstruction = self.decoder(z)
x1_reconstruction_loss = criterion(x1_reconstruction, x1)
z_transformed = self.latent_operator(z, angles)
x2_reconstruction_loss = criterion(self.decoder(z_transformed), x2)
loss = x1_reconstruction_loss + x2_reconstruction_loss
return loss
def reconstruction_mse_frozen_z1(self, x1, x2, angles):
"""Reconstruction loss of x2 from x1 without transformations"""
criterion = torch.nn.MSELoss()
z = self.encoder(x1)
x2_reconstruction = self.decoder(z)
loss = criterion(x2_reconstruction, x2)
return loss
def compute_mean_loss(self, loss_func, data_loader):
"""Computes RMSE based on given loss function."""
self.encoder.eval().cpu()
self.decoder.eval().cpu()
losses = []
for x1, x2, params in data_loader:
angles = self.get_angles(params)
losses.append(loss_func(x1, x2, angles).cpu())
mean_loss = torch.stack(losses).mean()
return mean_loss
def get_angles(self, params):
"""Returns tensor of angles for translations in x or rotations."""
param_name = self.transformation_param_name
if param_name in ("shift_x", "shift_y"):
angles = torch.tensor(
[
transformations.shift_to_angle(
getattr(p, param_name), self.n_transformations,
)
for p in params
]
)
else:
angles = torch.tensor([p.angle for p in params])
return angles
def run(self, log_frequency=None, stop_early=False):
"""Runs experiment for autoencoder reconstruction.
Args:
log_frequency (int): number of batches after which to print loss
stop_early (bool): stop after a single log_frequency number of batches.
Useful for testing without waiting for long training.
"""
if self.latent_operator_name is None:
loss_func = self.reconstruction_mse_x1
elif self.latent_operator_name in ["ShiftOperator", "DisentangledRotation"]:
loss_func = self.reconstruction_mse_transformed_z1
# TODO: what is frozen_rotation?
elif self.latent_operator_name == "frozen_rotation":
loss_func = self.reconstruction_mse_frozen_z1
else:
raise ValueError(
f"transformation type {self.transformation_type} not supported"
)
self.train(
loss_func, log_frequency=log_frequency, stop_early=stop_early,
)
def reconstruct_x1(self, x1):
"""Reconstructs x1 using model"""
self.encoder.eval().cpu()
self.decoder.eval().cpu()
with torch.no_grad():
z = self.encoder(x1)
y = self.decoder(z)
return y
def reconstruct_transformed_x1(self, x1, param):
"""Reconstructs x1 transformed using model"""
self.encoder.eval().cpu()
self.decoder.eval().cpu()
with torch.no_grad():
x_transformed = transformations.transform(x1.squeeze(0), param)
z = self.encoder(x_transformed.unsqueeze(0))
y = self.decoder(z)
return y
def reconstruct_x2(self, x1, param):
"""Reconstructs x2 using model and latent transformation"""
self.encoder.eval().cpu()
self.decoder.eval().cpu()
with torch.no_grad():
z = self.encoder(x1)
angle = self.get_angles([param]).unsqueeze(0)
z_transformed = self.latent_operator(z, angle)
x2 = self.decoder(z_transformed)
return x2
def plot_x1_reconstructions(self, indices=None, train_set=False, save_name=None):
"""Plots x1 autoencoder reconstruction from z1.
Args:
pairs (datasets.Pairs): contains x1, x2, and params.
model (function): callable f(x1) = x1_reconstruction
indices (list of ints): indices for samples to plot
train_set (bool): if true title is plotted with train otherwise test.
save_name (str): indicates path where images should be saved.
"""
pairs = self.data.X_train if train_set else self.data.X_test
if indices is None:
indices = random.sample(range(len(pairs)), k=4)
plot.plot_x1_reconstructions(
pairs, self.reconstruct_x1, indices, train_set, save_name
)
def plot_x2_reconstructions(self, indices=None, train_set=False, save_name=None):
"""Plots x1, x2 and x2 autoencoder reconstruction from z1 rotated.
Args:
pairs (datasets.Pairs): contains x1, x2, and params.
model (function): callable f(x1) = x1_reconstruction
indices (list of ints): indices for samples to plot
train_set (bool): if true title is plotted with train otherwise test.
save_name (str): indicates path where images should be saved.
"""
pairs = self.data.X_train if train_set else self.data.X_test
if indices is None:
indices = random.sample(range(len(pairs)), k=4)
plot.plot_x2_reconstructions(
pairs, self.reconstruct_x2, indices, train_set, save_name
)
def plot_multiple_rotations(self, indices=None, train_set=False, save_name=None):
"""Plots all rotated reconstructions for given samples"""
if indices is None:
n_samples = min(len(self.data.X_orig_train), len(self.data.X_orig_test))
indices = np.random.randint(low=0, high=n_samples, size=5)
X = (
self.data.X_orig_train[indices]
if train_set
else self.data.X_orig_test[indices]
).float()
title = (
"Translations" if self.transformation_param_name != "angle" else "Rotations"
)
plot.plot_rotations(
X,
self,
self.n_transformations,
title,
save_name=save_name,
param_name=self.transformation_param_name,
use_latent_op=self.use_latent_op,
)
def load_data(configs, path):
data_configs = json.loads(configs["data"])
if "shapes" and "2k-classes" in path:
data = datasets.SimpleShapes(
configs["batch_size"],
n_rotations=data_configs["n_rotations"],
n_x_translations=data_configs["n_x_translations"],
n_y_translations=data_configs["n_y_translations"],
n_classes=2000,
seed=0,
)
elif "mnist" in path:
data = datasets.ProjectiveMNIST(
configs["batch_size"],
n_rotations=data_configs["n_rotations"],
n_x_translations=data_configs["n_x_translations"],
n_y_translations=data_configs["n_y_translations"],
train_set_proportion=0.01,
valid_set_proportion=0.01,
test_set_proportion=1.0,
seed=0,
)
else:
raise ValueError("data not found")
return data
def load(path):
with open(os.path.join(path, "model_configs.json")) as f:
configs = json.load(f)
data = load_data(configs, path)
model_type = "CCI" if "cci" in path else "Linear"
model = AutoEncoder(
data,
z_dim=configs["z_dim"],
latent_operator_name=configs["latent_operator"],
encoder_type=model_type,
decoder_type=model_type,
)
model.load_models(path)
return model
if __name__ == "__main__":
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(f"running on {device}")
n_epochs = 2
simple_shapes = datasets.SimpleShapes(16)
print("Training Autoencder")
model = AutoEncoder(simple_shapes, device=device, n_epochs=n_epochs)
model.run()
print("Training Autoencder with Latent Translation")
model_with_rotation = AutoEncoder(
simple_shapes,
latent_operator_name="ShiftOperator",
device=device,
n_epochs=n_epochs,
)
model_with_rotation.run()
|
Addressing-the-Topological-Defects-of-Disentanglement-main
|
autoencoder.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
"""Implements CCI VAE
https://arxiv.org/abs/1804.03599
"""
import torch
import os
import numpy as np
import models
import json
import plot
import copy
import random
from datasets import datasets, transformations
from datasets.data_utils import x_to_image
from sklearn.decomposition import PCA
import matplotlib
import matplotlib.pyplot as plt
class CCIVariationalAutoEncoder:
"""Trains an autoencoder on rotated shapes.
Args:
data (AbstractDataset): contains train and test loaders with angles
model (CCIVAE model): contains forward funtion with encoder / decoder
beta (float): beta in beta-VAE model
c_max (float): maximum value for controlled capacity parameter in CCI VAE.
z_dim (int): dimension of latent space
seed (int): for random number generation
translation (bool): if true, uses an offset identity matrix for rotation
"""
def __init__(
self,
data,
model=models.CCIVAE,
beta=1000.0,
c_max=36.0,
z_dim=30,
seed=0,
device="cpu",
learning_rate=0.0005,
n_epochs=5,
distribution="gaussian",
):
self.beta, self.c_max = beta, c_max
self.c = 0.0
self.z_dim = z_dim
self.data = data
self.device = device
self.model_cls = model
self.model = model(
self.data.n_pixels, self.data.n_channels, z_dim, distribution=distribution
)
self.model.to(device=device)
self.model_best_valid = self.model
self.learning_rate = learning_rate
self.n_epochs = n_epochs
self.distribution = distribution
self.seed = seed
self.set_seed()
self.train_losses = []
self.kl_losses = []
self.reconstruction_losses = []
self.valid_losses = []
self.final_test_loss = None
def __repr__(self):
model = {
"model_class": str(self.model_cls),
"beta": self.beta,
"c_max": self.c_max,
"distribution": self.distribution,
"z_dim": self.z_dim,
"batch_size": self.data.batch_size,
"learning_rate": self.learning_rate,
"n_epochs": self.n_epochs,
"data": str(self.data),
}
return json.dumps(model)
def set_seed(self):
"""Sets seed for random number generation"""
torch.manual_seed(self.seed)
np.random.seed(self.seed)
random.seed(self.seed)
# Generate Dataset
torch.autograd.set_detect_anomaly(True)
def compute_loss(self, x1):
"""Loss for controlled capacity beta vae (CCI VAE)
https://arxiv.org/abs/1804.03599
"""
if self.distribution == "gaussian":
criterion = torch.nn.MSELoss(reduction="sum")
elif self.distribution == "bernoulli":
criterion = torch.nn.BCELoss(reduction="sum")
else:
raise ValueError(f"distribution {self.distribution} not supported")
# assuming a Gaussian Distribution
out, mu, log_var = self.model(x1)
reconstruction_loss = criterion(out, x1)
# https://arxiv.org/abs/1312.6114
# -0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
kl_divergence = (
-0.5 * (1 + log_var - mu.pow(2) - log_var.exp()).mean(dim=0)
).sum()
return reconstruction_loss, kl_divergence
def train(self, stop_early=False, log_frequency=None, track_losses=True):
"""Trains controlled capacity beta vae (CCI VAE)
https://arxiv.org/abs/1804.03599
Learning rate used in the paper is 5e-4
If verbose is False, previous loss print is overridden
If stop_early is True, training stops after first logged loss.
This is useful for testing.
"""
self.model.train().to(self.device)
optimizer = torch.optim.Adam(self.model.parameters(), lr=self.learning_rate)
c_step_size = (self.c_max - self.c) / self.n_epochs
if log_frequency is None:
log_frequency = self.set_log_frequency()
for epoch in range(self.n_epochs):
running_loss = 0.0
print(f"Epoch {epoch}")
if track_losses:
self.log_train_val_loss()
running_loss = 0.0
running_reconstruction_loss, running_kl_divergence = 0.0, 0.0
# update controlled capacity parameter
self.c += c_step_size
for i, (x1, _, _) in enumerate(self.data.train_loader):
x1 = x1.to(device=self.device)
optimizer.zero_grad()
reconstruction_loss, kl_divergence = self.compute_loss(x1)
loss = reconstruction_loss + self.beta * (kl_divergence - self.c).abs()
loss.backward()
optimizer.step()
running_loss += loss.item()
running_reconstruction_loss += (
reconstruction_loss.cpu().detach().numpy()
)
running_kl_divergence += kl_divergence.cpu().detach().numpy()
if i % log_frequency == (log_frequency - 1):
normalized_loss = running_loss / log_frequency
normalized_reconstruction_loss = (
running_reconstruction_loss / log_frequency
)
normalized_kl_divergence = running_kl_divergence / log_frequency
print(f"Running Total Loss: {normalized_loss:0.3e}")
print(
f"Running Reconstruction Loss: {normalized_reconstruction_loss:0.3e}"
f" KL Divergence: {normalized_kl_divergence:0.3e}"
)
self.kl_losses.append(normalized_kl_divergence)
self.reconstruction_losses.append(normalized_reconstruction_loss)
running_loss = 0.0
running_reconstruction_loss = 0.0
running_kl_divergence = 0.0
if stop_early:
return None
if track_losses:
train_loss, valid_loss = self.log_train_val_loss()
self.copy_models_validation(valid_loss)
# compute test loss per sample
self.final_test_loss = self.compute_total_loss(
self.data.test_loader_batch_1
)
print(f"Test Loss: {self.final_test_loss:0.3e}")
def set_log_frequency(self):
frequency = len(self.data.train_loader) // 10
return frequency
def copy_models_validation(self, valid_loss):
"""Copies models with best validation"""
if valid_loss < np.min(self.valid_losses):
self.model_vest_valid = copy.deepcopy(self.model)
def log_train_val_loss(self, show_print=True):
train_loss = self.compute_total_loss(self.data.train_loader)
valid_loss = self.compute_total_loss(self.data.valid_loader)
self.train_losses.append(train_loss)
self.valid_losses.append(valid_loss)
if show_print:
print(f"Total loss train: {train_loss:0.3e} validation: {valid_loss:0.3e}")
return train_loss, valid_loss
def compute_total_loss(self, loader):
"""Computes total average loss on given loader"""
self.model.eval()
losses = []
with torch.no_grad():
for x1, x2, params in loader:
x1 = x1.to(device=self.device)
reconstruction_loss, kl_divergence = self.compute_loss(x1)
loss = reconstruction_loss + self.beta * (kl_divergence - self.c).abs()
losses.append(loss.item())
mean_loss = np.mean(losses)
self.model.train()
return mean_loss
def reconstruct_x1(self, x1):
"""Reconstructs x1 using model"""
self.model.eval().cpu()
with torch.no_grad():
y, _, _ = self.model(x1)
return y
def reconstruct_mean(self, x1):
self.model.eval().cpu()
with torch.no_grad():
_, mu, _ = self.model(x1)
out = self.model.decoder(mu)
return out
def save_best_validation(self, path, indices=None):
"""Saves results best for model with best validation loss"""
self.model = self.model_best_valid
self.save(path, indices=indices)
def save(self, path, indices=None):
os.makedirs(path, exist_ok=True)
self.save_model_configs(path)
self.save_model(path)
self.save_losses(path)
self.save_plots(path)
def save_model_configs(self, path):
model_configs_str = self.__repr__()
model_configs = json.loads(model_configs_str)
file_path = os.path.join(path, "model_configs.json")
with open(file_path, "w") as outfile:
json.dump(model_configs, outfile)
def load_model(self, path):
device = torch.device("cpu")
model = self.model_cls(self.data.n_pixels, self.data.n_channels, self.z_dim)
model.load_state_dict(torch.load(path, map_location=device))
self.model = model
self.model.to(device=device)
def save_model(self, path):
full_path = os.path.join(path, "model.pt")
torch.save(self.model.state_dict(), full_path)
def save_losses(self, path):
file_path = os.path.join(path, "kl_divergence.npy")
np.save(file_path, self.kl_losses)
file_path = os.path.join(path, "reconstruction_losses.npy")
np.save(file_path, self.reconstruction_losses)
file_path = os.path.join(path, "train_losses.npy")
np.save(file_path, self.train_losses)
file_path = os.path.join(path, "valid_losses.npy")
np.save(file_path, self.valid_losses)
file_path = os.path.join(path, "test_loss.npy")
np.save(file_path, self.final_test_loss)
def save_plots(self, path):
matplotlib.use("Agg")
for train_set in [True, False]:
set_name = "train" if train_set else "test"
x1_plot_path = os.path.join(path, f"x1_{set_name}_reconstructions")
self.plot_x1_reconstructions(save_name=x1_plot_path, train_set=train_set)
latent_traversal_path = os.path.join(path, f"x_{set_name}_latent_traversal")
self.plot_latent_traversal(
save_name=latent_traversal_path, train_set=train_set
)
def plot_x1_reconstructions(self, indices=None, train_set=False, save_name=None):
"""Plots x1 autoencoder reconstruction from z1.
Args:
pairs (datasets.Pairs): contains x1, x2, and params.
model (function): callable f(x1) = x1_reconstruction
indices (list of ints): indices for samples to plot
train_set (bool): if true title is plotted with train otherwise test.
save_name (str): indicates path where images should be saved.
"""
pairs = self.data.X_train if train_set else self.data.X_test
if indices is None:
indices = random.sample(range(len(pairs)), k=4)
plot.plot_x1_reconstructions(
pairs, self.reconstruct_mean, indices, train_set, save_name
)
def plot_latent_traversal(
self,
indices=None,
num_std=6.0,
train_set=True,
save_name=None,
fixed_range=True,
):
"""Traverses latent space from [mu - 3 * std, mu + 3 * std] for given indices.
If fixed_range is True, then [-num_std, num_std] is the interval.
"""
self.model.eval().cpu()
pairs = self.data.X_train if train_set else self.data.X_test
if indices is None:
indices = random.sample(range(len(pairs)), k=3)
for index in indices:
sample_save_name = save_name
if save_name is not None:
sample_save_name = save_name + "_sample_" + str(index)
self._plot_latent_traversal_helper(
pairs, index, num_std, train_set, sample_save_name, fixed_range
)
def plot_single_latent_traversal(
self, index=3, train_set=True, latent_dim=0, save_name=None, num_std=6.0,
):
self.model.eval().cpu()
pairs = self.data.X_train if train_set else self.data.X_test
sample_save_name = save_name
if save_name is not None:
sample_save_name = save_name + "_sample_" + str(index)
x1, x2, p = pairs[index]
title = "Training" if train_set else "Test"
traversal_path = CCIVariationalAutoEncoder.get_std_path(num_std)
num_subplots = len(traversal_path) + 1
fig, axs = plt.subplots(1, num_subplots, figsize=(12, 16))
axs[0].imshow(x1.squeeze())
axs[0].set_title(f"{title}: x1, latent {latent_dim}")
axs[0].set_xticks([])
axs[0].set_yticks([])
with torch.no_grad():
_, mu, log_var = self.model(x1.unsqueeze(0))
z = mu
for i, step in enumerate(traversal_path):
z_shifted = z.clone().cpu().detach()
z_shifted[0][latent_dim] = step
with torch.no_grad():
reconstruction = self.model.decoder(z_shifted)
axs[i + 1].imshow(reconstruction.squeeze().detach().numpy())
axs[i + 1].set_xticks([])
axs[i + 1].set_yticks([])
fig.tight_layout()
if save_name:
# close figure to speed up saving
plt.savefig(sample_save_name, bbox_inches="tight", dpi=100)
plt.close(fig)
@staticmethod
def get_std_path(num_std):
"""Returns list of std steps.
[-3, -2, -1, 0, 1, 2, 3]
"""
step_size = num_std / 3.0
positive_steps = [i * step_size for i in range(1, 4)]
negative_steps = sorted(list(-1 * np.array(positive_steps)))
path = negative_steps + [0] + positive_steps
return path
def _plot_latent_traversal_helper(
self, X, index, num_std, train_set, save_name, fixed_range
):
title = "Training" if train_set else "Test"
traversal_path = CCIVariationalAutoEncoder.get_std_path(num_std)
num_subplots = len(traversal_path) + 1
x1, x2, p = X[index]
fig, axs = plt.subplots(self.z_dim, num_subplots, figsize=(20, 60))
for dim in range(self.z_dim):
axs[dim, 0].imshow(x1.squeeze())
axs[dim, 0].set_title(f"{title}: x1, latent {dim}")
axs[dim, 0].set_xticks([])
axs[dim, 0].set_yticks([])
with torch.no_grad():
_, mu, log_var = self.model(x1.unsqueeze(0))
z = mu
for i, step in enumerate(traversal_path):
if not fixed_range:
z_shifted = CCIVariationalAutoEncoder.shift_latent(
z, dim, step, log_var
)
else:
z_shifted = z.clone().cpu().detach()
z_shifted[0][dim] = step
with torch.no_grad():
reconstruction = self.model.decoder(z_shifted)
axs[dim, i + 1].imshow(reconstruction.squeeze().detach().numpy())
if not fixed_range:
axs[dim, i + 1].set_title(f"std {step:.1f}")
else:
axs[dim, i + 1].set_title(f"{step:.1f}")
axs[dim, i + 1].set_xticks([])
axs[dim, i + 1].set_yticks([])
fig.tight_layout()
if save_name:
# close figure to speed up saving
plt.savefig(save_name, bbox_inches="tight", dpi=100)
plt.close(fig)
@staticmethod
def shift_latent(z, dim, num_std, log_var):
"""Shifts latent by num_std along index of latent dimension"""
std = torch.exp(log_var / 2.0)
z_shifted = z.clone().cpu().detach()
z_shifted[0][dim] += num_std * std[0][dim]
return z_shifted
def get_latents(self, train_set=False, num_batches=1000):
"""Returns latent representation for random indices"""
self.model.eval().cpu()
loader = self.data.train_loader if train_set else self.data.test_loader
Z = []
for i, (x1, x2, p) in enumerate(loader):
z = self.get_latent(x1)
Z.append(z)
if i == num_batches:
break
Z = torch.cat(Z)
return Z
def get_latent(self, x):
with torch.no_grad():
_, mu, var = self.model(x)
z = self.model.reparameterize(mu, var)
return z
def compute_latent_variances(self, n_samples=None):
"""Computes variance of latents across transformations of a sample"""
if n_samples is None:
n_samples = len(self.data.X_orig_test)
variances = []
for i in range(n_samples):
x1 = self.data.X_orig_test[i]
self.model.eval().cpu()
with torch.no_grad():
sample_latents = []
for param in self.data.transform_params:
x_transformed = transformations.transform(x1, param)
_, mu, log_var = self.model(x_transformed.unsqueeze(0))
# use mean of latent
z = mu
sample_latents.append(z)
sample_latents = torch.cat(sample_latents)
sample_var = sample_latents.var(dim=0)
variances.append(sample_var)
variances = torch.stack(variances).numpy()
return variances
def compute_latents_per_shape(self, n_samples=None):
"""Computes variance of latents across transformations of a sample"""
if n_samples is None:
n_samples = len(self.data.X_orig_test)
latents = []
for i in range(n_samples):
x1 = self.data.X_orig_test[i]
self.model.eval().cpu()
with torch.no_grad():
sample_latents = []
for param in self.data.transform_params:
x_transformed = transformations.transform(x1, param)
_, mu, log_var = self.model(x_transformed.unsqueeze(0))
# use mean of latent
z = mu
sample_latents.append(z)
sample_latents = torch.cat(sample_latents)
latents.append(sample_latents)
latents = torch.stack(latents).numpy()
return latents
def pca_ranked_eigenvalues(self, n_samples=None):
"""Returns average of ranked normalized eigenvalues for latents"""
latents = self.compute_latents_per_shape(n_samples=n_samples)
n_components = self.data.n_rotations + 1
aggregate_ranked_normalized_eigenvalues = []
for latent in latents:
pca = PCA(n_components=n_components)
pca.fit(latents[1])
ranked_normalized_eigenvalues = np.sort(pca.explained_variance_ratio_)[::-1]
aggregate_ranked_normalized_eigenvalues.append(
ranked_normalized_eigenvalues
)
aggregate_ranked_normalized_eigenvalues = np.stack(
aggregate_ranked_normalized_eigenvalues
)
average_var_explained = np.mean(aggregate_ranked_normalized_eigenvalues, axis=0)
return average_var_explained
def compute_mutual_info(variances):
"""Variances is a numpy array with shape (n_samples, z_dim)"""
n = variances.shape[0]
m_info = np.log(2 * np.pi * variances).sum(0) / (2.0 * n)
return m_info
def load_data(configs, path):
data_configs = json.loads(configs["data"])
if "shapes" and "2k-classes" in path:
data = datasets.SimpleShapes(
configs["batch_size"],
n_rotations=data_configs["n_rotations"],
n_x_translations=data_configs["n_x_translations"],
n_y_translations=data_configs["n_y_translations"],
n_classes=2000,
seed=0,
)
elif "mnist" in path:
data = datasets.ProjectiveSingleDigitMNIST(
configs["batch_size"],
n_rotations=data_configs["n_rotations"],
n_x_translations=data_configs["n_x_translations"],
n_y_translations=data_configs["n_y_translations"],
train_set_proportion=0.1,
valid_set_proportion=0.1,
test_set_proportion=1.0,
seed=0,
)
else:
raise ValueError("data not found")
return data
def load(path):
with open(os.path.join(path, "model_configs.json")) as f:
configs = json.load(f)
data = load_data(configs, path)
model = CCIVariationalAutoEncoder(
data,
z_dim=configs["z_dim"],
beta=configs["beta"],
c_max=configs["c_max"],
distribution=configs["distribution"],
learning_rate=configs["learning_rate"],
n_epochs=configs["n_epochs"],
)
model.load_model(os.path.join(path, "model.pt"))
return model
if __name__ == "__main__":
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(f"running on {device}")
n_epochs = 2
batch_size = 16
simple_shapes = datasets.SimpleShapes(batch_size)
vae = CCIVariationalAutoEncoder(
simple_shapes, beta=0.0, c_max=0.0, device=device, n_epochs=n_epochs
)
vae.train()
|
Addressing-the-Topological-Defects-of-Disentanglement-main
|
cci_variational_autoencoder.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
import torch
import numpy as np
import random
import matplotlib
import matplotlib.pyplot as plt
import models
import latent_operators
from datasets import datasets
from datasets.data_utils import x_to_image
import plot
import pdb
import os
import shutil
import numpy as np
eps = 1e-20
class ComplexAutoEncoder:
"""Trains a shift operator.
Args:
data (AbstractDataset): contains train and test loaders with angles
z_dim (int): dimension of latent space
seed (int): for random number generation
translation (bool): if true, uses an offset identity matrix for rotation
"""
def __init__(
self,
data,
z_dim=405,
seed=0,
encoder_type="ComplexLinear",
decoder_type="ComplexLinear",
transformation_types=None,
indexes=None,
device="cpu",
output_directory="output",
save_name="",
n_rotations = 0,
n_x_translations = 0,
n_y_translations = 0,
scaling_factors = (1, )
):
self.z_dim = z_dim
self.seed = seed
self.set_seed()
self.data = data
self.device = device
self.encoder = getattr(models, encoder_type + "Encoder")(
self.data.n_pixels, self.data.n_channels, z_dim
).to(self.device)
self.decoder = getattr(models, decoder_type + "Decoder")(
self.data.n_pixels, self.data.n_channels, z_dim
).to(self.device)
self.transformation_types = transformation_types
self.W_r = torch.nn.ModuleList()
self.W_i = torch.nn.ModuleList()
for i in range(len(self.transformation_types)-1):
self.W_r.append(torch.nn.Linear(z_dim, z_dim, bias=False).to(self.device))
self.W_i.append(torch.nn.Linear(z_dim, z_dim, bias=False).to(self.device))
cardinals = [
n_rotations + 1,
n_x_translations + 1,
n_y_translations + 1,
len(scaling_factors),
]
self.cardinals = cardinals
# function used for transformation
# indexes 0, 1, 2
self.transforms = []
for i in range(len(transformation_types)):
self.transforms.append(self.get_transformation(transformation_types[i], indexes[i]))
self.output_dir = output_directory
self.save_name = save_name
self.best_epoch = 0
self.best_mse = 0
def set_seed(self):
"""Sets seed for random number generation"""
torch.manual_seed(self.seed)
np.random.seed(self.seed)
random.seed(self.seed)
# Generate Dataset
torch.autograd.set_detect_anomaly(True)
def get_transformation(self, name, index):
"""Returns function to performance transformation based name"""
if name is None:
return None
transformation = getattr(latent_operators, name)
return transformation(self.cardinals, self.z_dim, self.device, unique_transfo = True, index=index)
def return_shifts(self, params):
smallest_angle = 360 / (self.data.n_rotations + 1)
int_x = round(self.data.n_pixels / (self.data.n_x_translations + 1))
int_y = round(self.data.n_pixels / (self.data.n_y_translations + 1))
shifts_x = torch.LongTensor([[param.shift_x/int_x for param in params]]).t()
shifts_y = torch.LongTensor([[param.shift_y/int_y for param in params]]).t()
shifts_r = torch.LongTensor([[int(param.angle/smallest_angle) for param in params]]).t()
shifts = []
if self.data.n_rotations > 0:
shifts.append(shifts_r)
if self.data.n_x_translations > 0:
shifts.append(shifts_x)
if self.data.n_y_translations > 0:
shifts.append(shifts_y)
return shifts
def transform(self, z1, shifts):
N_transfo = len(self.transforms)
# shifts is now a tuple
z_r = z1[0]
z_i = z1[1]
for i in range(0,N_transfo-1,1):
z_transformed = self.transforms[i]((z_r,z_i), shifts[i])
z_r = z_transformed[0]
z_i = z_transformed[1]
z_r = self.W_r[i](z_r) - self.W_i[i](z_i)
z_i= self.W_r[i](z_i) + self.W_i[i](z_r)
z_transformed = self.transforms[N_transfo-1]((z_r,z_i), shifts[N_transfo-1])
return z_transformed
def train(self, loss_func, learning_rate, n_epochs, log_frequency):
self.encoder.train()
self.decoder.train()
params = list(self.encoder.parameters()) + list(self.decoder.parameters()) + \
list(self.W_r.parameters()) + list(self.W_i.parameters())
optimizer = torch.optim.Adam(params, lr=learning_rate)
train_losses = torch.FloatTensor(n_epochs)
valid_losses = torch.FloatTensor(n_epochs)
best_mse = np.inf
N_pairs = len(self.data.train_loader.dataset)
for epoch in range(n_epochs):
epoch_loss = 0
for i, (x1, x2, angles) in enumerate(self.data.train_loader):
x1 = x1.to(device=self.device)
x2 = x2.to(device=self.device)
optimizer.zero_grad()
loss = loss_func(x1, x2, angles)
loss.backward()
optimizer.step()
epoch_loss += loss.item() * x1.size(0)
epoch_loss = epoch_loss / N_pairs
print(f"Epoch {epoch} Train loss: {epoch_loss:0.3e}")
valid_mse = (
self.compute_mean_loss(loss_func, self.data.valid_loader)
.detach()
.item()
)
train_losses[epoch] = epoch_loss
if valid_mse < best_mse:
self.update_state(mse=valid_mse, epoch=epoch)
best_mse = valid_mse
file_name = "checkpoint_{}.pth.tar".format(self.save_name)
self.save_best_checkpoint(
out_dir=self.output_dir,
file_name=file_name,
optimizer_state_dict=optimizer.state_dict(),
)
print(f"Epoch {epoch} validation loss: {valid_mse:0.3e}")
valid_losses[epoch] = valid_mse
return train_losses.detach().numpy(), valid_losses.detach().numpy()
def reconstruct_x1(self, x1):
"""Reconstructs x1 using model"""
self.encoder.eval()
self.decoder.eval()
x1 = x1.to(device=self.device)
with torch.no_grad():
z1 = self.encoder(x1)
x1_reconstruction_r = self.decoder(z1)
return x1_reconstruction_r
def reconstruct_x2(self, x1, param):
"""Reconstructs x2 using model and latent transformation"""
self.encoder.eval()
self.decoder.eval()
x1 = x1.to(device=self.device)
batch_size = x1.size(0)
with torch.no_grad():
z1 = self.encoder(x1)
shifts = self.return_shifts([param])
z_transformed = self.transform(z1, shifts)
x2_reconstruction_r = self.decoder(z_transformed)
return x2_reconstruction_r
def plot_x1_reconstructions(
self, indices=[10, 2092, 10299, 13290], train_set=False, save_name=None
):
"""Plots x1 autoencoder reconstruction from z1.
Args:
pairs (datasets.Pairs): contains x1, x2, and params.
model (function): callable f(x1) = x1_reconstruction
indices (list of ints): indices for samples to plot
train_set (bool): if true title is plotted with train otherwise test.
save_name (str): indicates path where images should be saved.
"""
pairs = self.data.X_train if train_set else self.data.X_test
plot.plot_x1_reconstructions(
pairs, self.reconstruct_x1, indices, train_set, save_name
)
def plot_x2_reconstructions(
self, indices=[10, 2092, 10299, 13290], train_set=False, save_name=None
):
"""Plots x1, x2 and x2 autoencoder reconstruction from z1 rotated.
Args:
pairs (datasets.Pairs): contains x1, x2, and params.
model (function): callable f(x1) = x1_reconstruction
indices (list of ints): indices for samples to plot
train_set (bool): if true title is plotted with train otherwise test.
save_name (str): indicates path where images should be saved.
"""
pairs = self.data.X_train if train_set else self.data.X_test
plot.plot_x2_reconstructions(
pairs, self.reconstruct_x2, indices, train_set, save_name
)
def reconstruction_mse_transformed_z1(self, x1, x2, params):
"""Computes reconstruction MSE of x1 from z1 + x2 from transformed(z1), not using ground-truth angles"""
criterion = torch.nn.MSELoss(reduction="none")
batch_size = x1.size(0)
z1 = self.encoder(x1)
x1_reconstruction_r = self.decoder(z1)
x1_reconstruction_loss = criterion(x1_reconstruction_r, x1)
x1_reconstruction_loss = x1_reconstruction_loss.mean()
shifts = self.return_shifts(params)
z_transformed = self.transform(z1, shifts)
x2_reconstruction_r = self.decoder(z_transformed)
x2_reconstruction_loss = criterion(x2_reconstruction_r, x2)
x2_reconstruction_loss = x2_reconstruction_loss.mean()
loss = x1_reconstruction_loss + x2_reconstruction_loss
return loss
def compute_test_loss(self, loss_func, data_loader):
"""Computes RMSE based on given loss function."""
self.encoder.eval()
self.decoder.eval()
losses = []
N = 0
with torch.no_grad():
for i, (x1, x2, angles) in enumerate(data_loader):
x1 = x1.to(device=self.device)
x2 = x2.to(device=self.device)
bs = x1.size(0)
loss_batch = loss_func(x1, x2, angles)*bs
N += bs
losses.append(loss_batch)
test_loss = torch.stack(losses).sum() / float(N)
self.encoder.train()
self.decoder.train()
return test_loss
def compute_mean_loss(self, loss_func, data_loader):
"""Computes RMSE based on given loss function."""
self.encoder.eval()
self.decoder.eval()
losses = []
with torch.no_grad():
for i, (x1, x2, angles) in enumerate(data_loader):
x1 = x1.to(device=self.device)
x2 = x2.to(device=self.device)
loss_batch = loss_func(x1, x2, angles)
losses.append(loss_batch)
mean_loss = torch.stack(losses).mean()
self.encoder.train()
self.decoder.train()
return mean_loss
def run(
self, learning_rate=0.0005, n_epochs=10, log_frequency=50
):
"""Runs experiment for autoencoder reconstruction."""
loss_func = self.reconstruction_mse_transformed_z1
train_loss, valid_loss = self.train(
loss_func, learning_rate, n_epochs, log_frequency
)
train_mse = self.compute_mean_loss(loss_func, self.data.train_loader)
print(f"Train MSE: {train_mse}")
valid_mse = self.compute_mean_loss(loss_func, self.data.valid_loader)
print(f"Valid MSE: {valid_mse}")
test_mse = self.compute_test_loss(loss_func, self.data.test_loader_batch_100)
print(f"Test MSE: {test_mse}")
return train_loss, valid_loss, train_mse, valid_mse, test_mse
def update_state(self, mse, epoch):
self.best_mse = mse
self.best_epoch = epoch
def load_model(self, path_to_checkpoint):
checkpoint = torch.load(path_to_checkpoint)
self.best_epoch = checkpoint["best_epoch"]
self.encoder.load_state_dict(checkpoint["encoder_state_dict"])
self.decoder.load_state_dict(checkpoint["decoder_state_dict"])
for t in range(len(self.transformation_types) - 1):
self.W_r[t].load_state_dict(checkpoint["W_r"][t])
self.W_i[t].load_state_dict(checkpoint["W_i"][t])
self.best_mse = checkpoint["best_mse"]
return checkpoint["best_mse"], checkpoint["best_epoch"]
def get_current_state(self):
W_r = {}
W_i = {}
for t in range(len(self.transformation_types)-1):
W_r[t] = self.W_r[t].state_dict()
W_i[t] = self.W_i[t].state_dict()
return {
"encoder_state_dict": self.encoder.state_dict(),
"decoder_state_dict": self.decoder.state_dict(),
"W_r": W_r,
"W_i": W_i,
"best_epoch": self.best_epoch,
"best_mse": self.best_mse,
}
def save_best_checkpoint(self, out_dir, file_name, optimizer_state_dict):
"""
:param file_name: filename to save checkpoint in.
:param optimizer_state_dict: state of the optimizer.
:return: str to path where the model is saved.
"""
state = self.get_current_state()
state["optimizer_state_dict"] = optimizer_state_dict
best_path = os.path.join(out_dir, "best_" + file_name)
torch.save(state, best_path)
def plot_multiple_transformations_stacked(self, indices, n_plots, train_set=False, save_name=None):
degree_sign = "\N{DEGREE SIGN}"
if indices is None:
n_samples = min(len(self.data.X_orig_train), len(self.data.X_orig_test))
indices = np.random.randint(low=0, high=n_samples, size=5)
X = (
self.data.X_orig_train[indices]
if train_set
else self.data.X_orig_test[indices]
).float()
plot.plot_rotations_translations(
X,
self,
n_plots,
self.data.n_rotations,
self.data.n_x_translations,
self.data.n_y_translations,
save_name=save_name
)
def plot_multiple_transformations(self, param_name='angle', indices=None, train_set=False, save_name=None):
"""Plots all rotated reconstructions for given samples"""
if indices is None:
n_samples = min(len(self.data.X_orig_train), len(self.data.X_orig_test))
indices = np.random.randint(low=0, high=n_samples, size=5)
X = (
self.data.X_orig_train[indices]
if train_set
else self.data.X_orig_test[indices]
).float()
title = (
"Translations" if param_name=='angle' != "angle" else "Rotations"
)
plot.plot_transformations_complex(
X,
self,
title,
save_name=save_name,
param_name=param_name,
supervised=True,
)
|
Addressing-the-Topological-Defects-of-Disentanglement-main
|
complex_shift_autoencoder.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
---
Saves model/plots for best validation MSE
"""
import math
import numpy as np
import os
from distutils.dir_util import copy_tree
def save_best_validation_helper(folder, operator):
min_valid_loss = math.inf
for sweep in os.listdir(folder):
if sweep.startswith("best") or sweep.startswith(".DS_Store"):
continue
path = os.path.join(folder, sweep, operator)
try:
valid_loss = np.min(np.load(os.path.join(path, "valid_losses.npy")))
except FileNotFoundError:
print(f"run {sweep} missing for {operator}")
continue
if min_valid_loss >= valid_loss:
min_valid_loss = valid_loss
destination = os.path.join(folder, "best-validation", operator)
copy_tree(path, destination)
def save_all_best_validation(parent_folder):
for experiment in os.listdir(parent_folder):
experiment_path = os.path.join(parent_folder, experiment)
if experiment.endswith("-sweep") and "autoencoder" in experiment and "standard" not in experiment:
save_best_validation_helper(experiment_path, "disentangled-operator")
save_best_validation_helper(experiment_path, "shift-operator")
elif experiment.endswith("-sweep") and "standard-autoencoder" in experiment:
save_best_validation_helper(experiment_path, "standard-autoencoder")
elif experiment.endswith("-sweep") and "cci-vae" in experiment:
save_best_validation_helper(experiment_path, "cci_vae")
save_best_validation_helper(experiment_path, "beta_vae")
save_best_validation_helper(experiment_path, "vae")
if __name__ == "__main__":
user = os.environ["USER"]
parent_folder = f"/checkpoint/{user}/Equivariance/"
save_all_best_validation(parent_folder)
|
Addressing-the-Topological-Defects-of-Disentanglement-main
|
save_best_validation.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
"""
Transformations applied to the input images
"""
import torch
import itertools
import numpy as np
import skimage.transform
from dataclasses import dataclass
# TODO: set automaticlaly based on n_pixels
TRANSLATION_INTERVAL = [0, 28]
@dataclass
class Params:
"""
angle (float): counter-clockwise rotation angle in degrees
shift_x (float): shift value to the right
shift_y (float): shift value to upwards
scale (float): scaling factor
"""
angle: float = 0.0
shift_x: float = 0.0
shift_y: float = 0.0
scale: float = 1.0
def transform(image, params):
"""
Applies transformations on a single image based on params.
Order of transformation is: rotate, translate, scale
Args:
image (np.array or torch.tensor): of shape [n_pixels, n_pixels]
params (Params): contains parameters for rotations, scaling etc.
Returns: image with transformations applied
"""
assert (
image.ndim == 3
), f"image must be of shape [n_channels, n_pixels, n_pixels] not {image.shape}"
image_transformed = image.squeeze()
# Rotate
if params.angle not in (0.0, 360.0):
# cval is the fill value.
image_transformed = skimage.transform.rotate(
image_transformed, params.angle, cval=image_transformed.min()
)
# Translate
# if edge is reached cut-off portion appears on other side
if params.shift_x != 0.0:
image_transformed = np.roll(image_transformed, int(params.shift_x), axis=1)
if params.shift_y != 0.0:
image_transformed = np.roll(image_transformed, -int(params.shift_y), axis=0)
# Scale
if params.scale != 1.0:
image_transformed = rescale(image_transformed, params.scale)
image_transformed = to_torch(image, image_transformed)
return image_transformed
def rescale(image, scale):
"""Rescales images based on given scale factor"""
scale_transform = skimage.transform.SimilarityTransform(scale=scale)
image = skimage.transform.warp(
image, scale_transform.inverse, mode="constant", cval=image.min(),
)
return image
def to_torch(image, image_transformed):
"""Converts numpy matrix to torch tensor with correct shape"""
image_transformed = image_transformed.reshape(image.shape)
if torch.is_tensor(image_transformed):
return image_transformed.float()
if torch.is_tensor(image):
image_transformed = torch.from_numpy(image_transformed).float()
return image_transformed
def get_transform_params(
n_rotations, n_x_translations, n_y_translations, scaling_factors,
):
"""Returns transform params corresponding given values.
Translations subdivide translation interval.
Args:
n_rotations (int): number of subdivisions of 360 to apply.
n_x_translations (int): number of shifts along x-axis
n_y_translations (int): number of shifts along y-axis
scaling_factors (list or tuple floats): representing the scaling factors to use
Returns: Params object
"""
shifts_x = get_shifts(n_x_translations, TRANSLATION_INTERVAL)
shifts_y = get_shifts(n_y_translations, TRANSLATION_INTERVAL)
for angle in get_rotation_angles(n_rotations):
for shift_x, shift_y in itertools.product(shifts_x, shifts_y):
for scale in scaling_factors:
params = Params(
angle=angle, shift_x=shift_x, shift_y=shift_y, scale=scale
)
yield params
def get_shifts(n_translations, interval):
"""Returns shifts along given axis by dividing interval.
Args:
interval (list of ints): [0, n_pixels]
n_translations (int): should be divisible by n_pixels
"""
if n_translations == 0:
return [0]
elif n_translations == 1:
return [0, interval[1] // 2]
min_shift = round(interval[1] / (n_translations + 1))
steps = [n * min_shift for n in range(n_translations + 1)]
return steps
def get_rotation_angles(n_rotations):
"""Yields rotation angles based on subdivisions given.
Example:
>>> get_rotation_angles(2) => [0.0, 120.0, 240.0]
"""
min_angle = 360.0 / (n_rotations + 1)
for n in range(n_rotations + 1):
yield min_angle * n
def shift_to_angle(shift_val, n_transformations):
"""Returns the angle corresponding to the shift_val.
Example: [0, 32], shift_val = 4, we should get 4 / 32 * 360
"""
if shift_val == TRANSLATION_INTERVAL[1]:
return 0.0
shift_ratio = float(shift_val) / TRANSLATION_INTERVAL[1]
angle = 360.0 * shift_ratio
return angle
|
Addressing-the-Topological-Defects-of-Disentanglement-main
|
datasets/transformations.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
import torch
from torch.utils.data import Dataset
from torchvision import transforms
from sklearn.model_selection import StratifiedShuffleSplit
import torchvision
from . import data_utils
from abc import ABC, abstractmethod
from datasets import transformations
import numpy as np
import random
import json
class AbstractDataset(ABC):
"""
Defines common fields needed for datasets
Attributes:
batch_size (int): batch size used for dataloaders
train_load (torch.utils.data.Dataset): X1, X2, Angle(s)
test_load (torch.utils.data.Dataset): X1, X2, Angle(s)
pairs (bool): indicates whether to use Pairs dataset where both x1 and x2 are transformed.
Otherwise, Single dataset is used where only x1 is transformed.
"""
def __init__(
self,
batch_size,
n_rotations=0,
n_x_translations=0,
n_y_translations=0,
scaling_factors=(1.0,),
seed=0,
pairs=True,
):
AbstractDataset.set_seed(seed)
self.batch_size = batch_size
self.n_x_translations, self.n_y_translations = (
n_x_translations,
n_y_translations,
)
self.n_rotations, self.scaling_factors = n_rotations, scaling_factors
self.X_orig_train, self.X_orig_valid, self.X_orig_test = self.get_original()
self.transform_params = list(
transformations.get_transform_params(
n_rotations=self.n_rotations,
n_x_translations=self.n_x_translations,
n_y_translations=self.n_y_translations,
scaling_factors=self.scaling_factors,
)
)
data_cls = Pairs if pairs else Single
self.X_train = data_cls(self.X_orig_train, self.transform_params)
self.train_loader = torch.utils.data.DataLoader(
self.X_train,
batch_size=self.batch_size,
shuffle=True,
collate_fn=Pairs.collate,
)
# For validation and test, use shuffle = False to have SequentialSampler(dataset) by default
# (see https://github.com/pytorch/pytorch/blob/bfa94487b968ccb570ef8cd9547029b967e76ed0/torch/utils/data/dataloader.py#L257)
self.X_valid = data_cls(self.X_orig_valid, self.transform_params)
self.valid_loader = torch.utils.data.DataLoader(
self.X_valid,
batch_size=self.batch_size,
shuffle=False,
collate_fn=Pairs.collate,
)
self.X_test = data_cls(self.X_orig_test, self.transform_params)
self.test_loader = torch.utils.data.DataLoader(
self.X_test,
batch_size=self.batch_size,
shuffle=False,
collate_fn=Pairs.collate,
)
self.test_loader_batch_1 = torch.utils.data.DataLoader(
self.X_test, batch_size=1, shuffle=False, collate_fn=Pairs.collate,
)
self.test_loader_batch_100 = torch.utils.data.DataLoader(
self.X_test, batch_size=100, shuffle=False, collate_fn=Pairs.collate,
)
def __repr__(self):
attributes = {
"n_rotations": self.n_rotations,
"n_x_translations": self.n_x_translations,
"n_y_translations": self.n_y_translations,
"scaling_factors": self.scaling_factors,
}
return json.dumps(attributes)
@abstractmethod
def get_original(self):
"""Sets X_train and X_test to images in original dataset"""
pass
@property
def total_n_transformations(self):
"""Computes the total number of transformations"""
n_translations = (1 + self.n_x_translations) * (1 + self.n_y_translations)
n = n_translations * (1 + self.n_rotations) * len(self.scaling_factors)
return n
@staticmethod
def set_seed(seed):
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
@classmethod
def __subclasshook__(cls, C):
"""Verifies dataset has loader of correct type"""
for loader in ["train_loader", "test_loader"]:
is_valid = hasattr(cls, loader) and isinstance(
(getattr(cls, loader)), Dataset
)
if not is_valid:
return False
return True
class ProjectiveMNIST(AbstractDataset):
"""Builds MNIST dataset with transformations applied lazly.
Loader contains: (digit, rotated_digit, angle)
Shape of Data: (batch_size, 1, 28, 28)
Args:
batch_size (int): batch size to user for dataloaders
n_rotations (int): number discrete rotations per image
train_set_proportion (float): proportion of training set to keep
valid_set_proportion (float): proportion of training set to keep
test_set_proportion (float): proportion of training set to keep
"""
def __init__(
self,
batch_size,
n_rotations=4,
n_x_translations=0,
n_y_translations=0,
scaling_factors=(1.0,),
train_set_proportion=0.1,
valid_set_proportion=1.0,
test_set_proportion=1.0,
seed=0,
pairs=True,
):
self.train_set_proportion = train_set_proportion
self.valid_set_proportion = valid_set_proportion
self.test_set_proportion = test_set_proportion
super().__init__(
batch_size,
n_rotations,
n_x_translations,
n_y_translations,
scaling_factors,
seed,
pairs,
)
self.n_pixels = self.X_orig_train[0].shape[1]
self.n_channels = 1
def get_original(self):
"""Returns original training and test images"""
mnist_train, mnist_val, mnist_test = self.download_mnist()
# normalize MNIST so values are between [0, 1]
x_train = mnist_train.data.unsqueeze(1) / 255.0
x_val = mnist_val.data.unsqueeze(1) / 255.0
x_test = mnist_test.data.unsqueeze(1) / 255.0
return x_train, x_val, x_test
@staticmethod
def stratified_sample(X, y, size):
"""Returns a stratified sample"""
if size == 1.0:
return X
test_size = 1 - size
sampler = StratifiedShuffleSplit(
n_splits=1, test_size=test_size, random_state=0
)
indices, _ = next(sampler.split(X, y))
X_sample = X[indices]
return X_sample
@staticmethod
def split_train_valid(train_set, split=10000):
num_train = len(train_set)
indices = list(range(num_train))
train_idx, valid_idx = indices[split:], indices[:split]
train_data = train_set.data[train_idx]
valid_data = train_set.data[valid_idx]
train_targets = train_set.targets[train_idx]
valid_targets = train_set.targets[valid_idx]
return train_data, train_targets, valid_data, valid_targets
def download_mnist(self):
"""Skips download if cache is available"""
train_set = torchvision.datasets.MNIST(
"/tmp/",
train=True,
download=True,
transform=transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
),
)
test_set = torchvision.datasets.MNIST(
"/tmp/",
train=False,
download=True,
transform=transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
),
)
(
train_data,
train_targets,
valid_data,
valid_targets,
) = ProjectiveMNIST.split_train_valid(train_set)
# stratified samples
train_data = ProjectiveMNIST.stratified_sample(
train_data, train_targets, self.train_set_proportion
)
valid_data = ProjectiveMNIST.stratified_sample(
valid_data, valid_targets, self.valid_set_proportion
)
test_data = ProjectiveMNIST.stratified_sample(
test_set.data, test_set.targets, self.test_set_proportion
)
return train_data, valid_data, test_data
class ProjectiveSingleDigitMNIST(AbstractDataset):
"""Builds MNIST dataset with transformations applied lazly.
Loader contains: (digit, rotated_digit, angle)
Shape of Data: (batch_size, 1, 28, 28)
Args:
batch_size (int): batch size to user for dataloaders
n_rotations (int): number discrete rotations per image
train_set_proportion (float): proportion of training set to keep
valid_set_proportion (float): proportion of training set to keep
test_set_proportion (float): proportion of training set to keep
"""
def __init__(
self,
batch_size,
n_rotations=4,
n_x_translations=0,
n_y_translations=0,
scaling_factors=(1.0,),
train_set_proportion=0.1,
valid_set_proportion=1.0,
test_set_proportion=1.0,
seed=0,
pairs=True,
digit=4,
):
self.train_set_proportion = train_set_proportion
self.valid_set_proportion = valid_set_proportion
self.test_set_proportion = test_set_proportion
self.digit = digit
super().__init__(
batch_size,
n_rotations,
n_x_translations,
n_y_translations,
scaling_factors,
seed,
pairs,
)
self.n_pixels = self.X_orig_train[0].shape[1]
self.n_channels = 1
def get_original(self):
"""Returns original training and test images"""
mnist_train, mnist_val, mnist_test = self.download_mnist()
# normalize MNIST so values are between [0, 1]
x_train = mnist_train.data.unsqueeze(1) / 255.0
x_val = mnist_val.data.unsqueeze(1) / 255.0
x_test = mnist_test.data.unsqueeze(1) / 255.0
return x_train, x_val, x_test
@staticmethod
def split_train_valid(train_set, split=10000):
num_train = len(train_set)
indices = list(range(num_train))
train_idx, valid_idx = indices[split:], indices[:split]
train_data = train_set.data[train_idx]
valid_data = train_set.data[valid_idx]
train_targets = train_set.targets[train_idx]
valid_targets = train_set.targets[valid_idx]
return train_data, train_targets, valid_data, valid_targets
def sample_single_digit(self, x, targets, proportion):
idx = targets == self.digit
x_digit = x[idx]
sample_size = int(len(idx) * proportion)
return x_digit[:sample_size]
def download_mnist(self):
"""Skips download if cache is available"""
train_set = torchvision.datasets.MNIST(
"/tmp/",
train=True,
download=True,
transform=transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
),
)
test_set = torchvision.datasets.MNIST(
"/tmp/",
train=False,
download=True,
transform=transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
),
)
(
train_data,
train_targets,
valid_data,
valid_targets,
) = ProjectiveMNIST.split_train_valid(train_set)
# stratified samples
train_data = self.sample_single_digit(
train_data, train_targets, self.train_set_proportion
)
valid_data = self.sample_single_digit(
valid_data, valid_targets, self.valid_set_proportion
)
test_data = self.sample_single_digit(
test_set.data, test_set.targets, self.test_set_proportion
)
return train_data, valid_data, test_data
class SimpleShapes(AbstractDataset):
def __init__(
self,
batch_size,
n_pixels=28,
n_classes=300,
n_points=5,
n_rotations=9,
n_x_translations=0,
n_y_translations=0,
scaling_factors=(1.0,),
n_channels=1,
seed=0,
pairs=True,
):
self.n_pixels, self.n_classes = n_pixels, n_classes
self.n_points, self.n_channels = n_points, n_channels
super().__init__(
batch_size,
n_rotations,
n_x_translations,
n_y_translations,
scaling_factors,
seed,
pairs,
)
@staticmethod
def normalize(X):
return torch.clamp(X + 1, 0.0, 1.0)
def get_original(self):
np.random.seed(1) # Sets seed
data = data_utils.generate_dataset(self.n_pixels, self.n_classes, self.n_points)
(X_train, _), (X_test, _) = data
X_trainvalid = torch.from_numpy(X_train).unsqueeze(1).float()
N = X_trainvalid.size(0)
Nvalid = int(N * 0.2) # Keeps 20% for validation
X_valid = SimpleShapes.normalize(X_trainvalid[:Nvalid, ...])
X_train = SimpleShapes.normalize(X_trainvalid[Nvalid:, ...])
X_test = SimpleShapes.normalize(torch.from_numpy(X_test).unsqueeze(1).float())
return X_train, X_valid, X_test
class Single(Dataset):
"""Contains x1 transformed with parameters.
Total number of samples == x1 transformed
"""
def __init__(self, X, params):
self.X = X
self.params = params
def __len__(self):
return self.X.shape[0] * len(self.params)
@staticmethod
def collate(batch):
"""Used for dataloader"""
X1 = torch.stack([item[0] for item in batch])
X2 = torch.stack([item[1] for item in batch])
params = [item[2] for item in batch]
return X1, X2, params
def get_x_idx(self, idx):
"""Returns the idx of the original image x."""
return idx // len(self.params)
def get_x1(self, idx, x_idx):
x = self.X[x_idx]
p = len(self.params)
x1_params_idx = idx % p
x1_params = self.params[x1_params_idx]
x1 = transformations.transform(x, x1_params)
return x1, x1_params
def __getitem__(self, idx):
x_idx = self.get_x_idx(idx)
x1, x1_params = self.get_x1(idx, x_idx)
x2 = self.X[x_idx]
return x1, x2, x1_params
class Pairs(Dataset):
"""Contains x1, x2, and transformation params.
Total of n_samples * num_params^2 pairs:
(x0, t0) => x1
(x1, t0) => x2
(x0, t0) => x1
(x1, t1) => x2
Args:
X (original images): [n_samples, n_pixels, n_pixels]
params (list of transformations.Params): parameters for transformations
"""
def __init__(self, X, params):
self.X = X
self.params = params
def __len__(self):
return self.X.shape[0] * (len(self.params) ** 2)
@staticmethod
def collate(batch):
"""Used for dataloader"""
X1 = torch.stack([item[0] for item in batch])
X2 = torch.stack([item[1] for item in batch])
params = [item[2] for item in batch]
return X1, X2, params
def get_x_idx(self, idx):
"""Returns the idx of the original image x."""
return idx // (len(self.params) ** 2)
def get_x1(self, idx, x_idx):
x = self.X[x_idx]
p = len(self.params)
x1_params_idx = (idx - (x_idx) * p * p) // p
x1_params = self.params[x1_params_idx]
x1 = transformations.transform(x, x1_params)
return x1
def get_x2_params(self, idx, x_idx):
p = len(self.params)
x1_params_idx = (idx - (x_idx) * p * p) // p
x2_params_idx = idx - ((x_idx * p * p) + (x1_params_idx * p))
return self.params[x2_params_idx]
def __getitem__(self, idx):
x_idx = self.get_x_idx(idx)
x1 = self.get_x1(idx, x_idx)
x2_params = self.get_x2_params(idx, x_idx)
x2 = transformations.transform(x1, x2_params)
x1, x2 = x1, x2
return x1, x2, x2_params
class ShapeNet(AbstractDataset):
pass
class ShapeNetIterator(Dataset):
"""ShapeNet Iterator"""
def __init__(self, V, transform=None):
self.V = V
self.preprocess = transforms.Compose(
[
# transforms.Resize(256),
# transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
),
]
)
def __len__(self):
return len(self.V[0])
def __getitem__(self, idx):
return tuple([self.preprocess(self.V[v][idx]) for v in range(len(self.V))])
|
Addressing-the-Topological-Defects-of-Disentanglement-main
|
datasets/datasets.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
|
Addressing-the-Topological-Defects-of-Disentanglement-main
|
datasets/__init__.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
"""Script demonstrating drawing of anti-aliased lines using Xiaolin Wu's line
algorithm
usage: python xiaolinwu.py [output-file]
"""
from __future__ import division
import sys
from PIL import Image
def _fpart(x):
return x - int(x)
def _rfpart(x):
return 1 - _fpart(x)
def putpixel(img, xy, color, alpha=1):
"""Paints color over the background at the point xy in img.
Use alpha for blending. alpha=1 means a completely opaque foreground.
"""
c = tuple(map(lambda bg, fg: int(round(alpha * fg + (1-alpha) * bg)),
img.getpixel(xy), color))
img.putpixel(xy, c)
def draw_line(img, p1, p2, color):
"""Draws an anti-aliased line in img from p1 to p2 with the given color."""
x1, y1 = p1
x2, y2 = p2
dx, dy = x2-x1, y2-y1
steep = abs(dx) < abs(dy)
p = lambda px, py: ((px,py), (py,px))[steep]
if steep:
x1, y1, x2, y2, dx, dy = y1, x1, y2, x2, dy, dx
if x2 < x1:
x1, x2, y1, y2 = x2, x1, y2, y1
grad = dy/dx
intery = y1 + _rfpart(x1) * grad
def draw_endpoint(pt):
x, y = pt
xend = round(x)
yend = y + grad * (xend - x)
xgap = _rfpart(x + 0.5)
px, py = int(xend), int(yend)
putpixel(img, p(px, py), color, _rfpart(yend) * xgap)
putpixel(img, p(px, py+1), color, _fpart(yend) * xgap)
return px
xstart = draw_endpoint(p(*p1)) + 1
xend = draw_endpoint(p(*p2))
for x in range(xstart, xend):
y = int(intery)
putpixel(img, p(x, y), color, _rfpart(intery))
putpixel(img, p(x, y+1), color, _fpart(intery))
intery += grad
|
Addressing-the-Topological-Defects-of-Disentanglement-main
|
datasets/xiaolinwu.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
from torch.utils.data import Dataset, DataLoader
import numpy as np
from PIL import Image
from .xiaolinwu import draw_line
blue = (0, 0, 255)
yellow = (255, 255, 0)
white = (255, 255, 255)
black = (0, 0, 0)
def generate_images_from_coords(NPX, NP, C, cols):
images = list()
for c in range(C.shape[2]):
img = Image.new("RGB", (NPX, NPX), white)
for p in range(NP - 1):
if (C[0, p + 1, c] != C[0, p, c]) or (C[1, p + 1, c] != C[1, p, c]):
draw_line(
img,
(C[0, p + 1, c], C[1, p + 1, c]),
(C[0, p, c], C[1, p, c]),
cols[c],
)
draw_line(
img,
(C[0, p, c], C[1, p, c]),
(C[0, p + 1, c], C[1, p + 1, c]),
cols[c],
)
if (C[0, p + 1, c] != C[0, 0, c]) or (C[1, p + 1, c] != C[1, 0, c]):
draw_line(
img, (C[0, p + 1, c], C[1, p + 1, c]), (C[0, 0, c], C[1, 0, c]), cols[c]
)
draw_line(
img, (C[0, 0, c], C[1, 0, c]), (C[0, p + 1, c], C[1, p + 1, c]), cols[c]
)
images.append(np.array(img))
return images
# Draw images correspoding to different classes
def plot_and_save_grid(NPX, images, margin=1, name="FIGS/junk.png"):
grid = np.zeros((NPX + 2 * margin, NPX * NC + margin * NC + margin, 3))
pointer = 0
for img in images:
grid[
margin : NPX + margin, 0 + pointer + margin : NPX + pointer + margin, :
] = img
pointer += NPX + margin
im = Image.fromarray(np.uint8((grid)))
im.save(name)
return im
class MyDataset(Dataset):
"""Face Landmarks dataset."""
def __init__(self, V, transform=None):
"""
Args:
csv_file (string): Path to the csv file with annotations.
root_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
# self.root = ts.root
# self.transform = transforms.ToTensor()
self.V = V
def __len__(self):
return len(self.V[0])
def __getitem__(self, idx):
try:
return tuple([self.V[v][idx] for v in range(len(self.V))])
except:
pdb.set_trace()
# return (self.transform(self.train_data[idx,:,:,:]),self.train_labels[idx])
# return Dataset.__getitem__(self, idx)
# super()
def pytorch_dataset(V, batch_size):
# order = np.random.permutation(NS)
ts = MyDataset(V)
loader = torch.utils.data.DataLoader(ts, batch_size=batch_size, shuffle=True)
return loader
def generate_dataset(NPX, NC, NP):
NS = NC * 2 # number of samples
# coordinates of each classes of objects
C = np.random.randint(0 + NPX / 6, NPX - 1 - NPX / 6, (2, NP, NC))
cols = np.zeros((NS, 3))
# Generate images corresponding to different classes using Xiaolin Wu's line algorithm for anti-aliasing
cols = np.zeros((NS, 3))
X = np.array(
generate_images_from_coords(NPX, NP, C[:, :, :].reshape((2, NP, NC)), cols)
)
X = 1 - np.mean(X, axis=3)
# normalize (negative sign ensure background is min)
X = X / -X.mean()
y = np.arange(NC)
y = y.flatten()
Y = y.astype(int)
split = NS // 4
Xtrain = X[:split]
Ytrain = Y[:split]
Xtest = X[split:]
Ytest = Y[split:]
return ((Xtrain, Ytrain), (Xtest, Ytest))
def generate_angles(NT1, NT2, NC):
# create pairs of shape with all angles
NT = NT1 * NT2 ** 2
[ind1, ind2] = np.meshgrid(range(NT), range(NT))
s1 = ind1.flatten()
s2 = ind2.flatten()
alphas = (s1 - s2) % (NT1)
sangle1 = np.floor(s1 / NT2 ** 2)
sangle2 = np.floor(s2 / NT2 ** 2)
strans1 = s1 % NT2 ** 2
strans2 = s2 % NT2 ** 2
stransx1 = np.floor(strans1 / NT2)
stransx2 = np.floor(strans2 / NT2)
stransy1 = strans1 % NT2
stransy2 = strans2 % NT2
alphas1 = (sangle1 - sangle2) % (NT1)
alphas2 = (stransx1 - stransx2) % (NT2)
alphas3 = (stransy1 - stransy2) % (NT2)
s1_all_shapes = (
np.tile(s1, (int(NC / 2)))
+ NT * np.tile(np.arange(int(NC / 2)).T, (NT * NT, 1)).T.flatten()
)
s2_all_shapes = (
np.tile(s2, (int(NC / 2)))
+ NT * np.tile(np.arange(int(NC / 2)).T, (NT * NT, 1)).T.flatten()
)
alphas_all_shapes1 = np.tile(alphas1, int(NC / 2))
alphas_all_shapes2 = np.tile(alphas2, int(NC / 2))
alphas_all_shapes3 = np.tile(alphas3, int(NC / 2))
alphas = (alphas1, alphas2, alphas3)
alphas_all_shapes = (alphas_all_shapes1, alphas_all_shapes2, alphas_all_shapes3)
return s1, s2, s1_all_shapes, s2_all_shapes, alphas, alphas_all_shapes
def x_to_image(x):
"""Takes a single input x and transforms it into image for im.show"""
if x.dim() == 2:
n_channels = 1
else:
n_channels = x.shape[0]
n_pixels = x.shape[1]
x_image = x.reshape(n_channels, n_pixels, n_pixels)
x_image = x_image.permute(1, 2, 0)
# sequeeze to remove in case of a singel channel
x_image = x_image.squeeze()
return x_image
|
Addressing-the-Topological-Defects-of-Disentanglement-main
|
datasets/data_utils.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
import pytest
from datasets import datasets
from cci_variational_autoencoder import CCIVariationalAutoEncoder
BATCH_SIZE = 16
@pytest.fixture(scope="module")
def rotated_mnist():
rotated_mnist = datasets.ProjectiveMNIST(
BATCH_SIZE,
n_rotations=9,
train_set_proportion=0.001,
test_set_proportion=0.001,
valid_set_proportion=0.001,
)
return rotated_mnist
@pytest.fixture(scope="module")
def simple_shapes():
batch_size = 16
return datasets.SimpleShapes(batch_size, n_classes=10)
class TestCCIVariationalAutoEncoder:
def test_vae(self, simple_shapes):
n_epochs, learning_rate = 1, 0.001
model = CCIVariationalAutoEncoder(
simple_shapes,
beta=1.0,
c_max=0.0,
device="cpu",
n_epochs=n_epochs,
learning_rate=learning_rate,
)
model.train()
def test_beta_vae(self, simple_shapes):
n_epochs, learning_rate = 1, 0.001
model = CCIVariationalAutoEncoder(
simple_shapes,
beta=1.0,
c_max=0.0,
device="cpu",
n_epochs=n_epochs,
learning_rate=learning_rate,
)
model.train()
def test_cci_vae(self, simple_shapes):
n_epochs, learning_rate = 1, 0.001
model = CCIVariationalAutoEncoder(
simple_shapes,
beta=100.0,
c_max=36.0,
device="cpu",
n_epochs=n_epochs,
learning_rate=learning_rate,
)
model.train()
class TestProjectiveMNISTVAE:
def test_vae(self, rotated_mnist):
n_epochs, learning_rate = 1, 0.001
model = CCIVariationalAutoEncoder(
rotated_mnist,
beta=1.0,
c_max=0.0,
device="cpu",
n_epochs=n_epochs,
learning_rate=learning_rate,
)
model.train(stop_early=True)
def test_cci_vae(self, rotated_mnist):
n_epochs, learning_rate = 1, 0.001
model = CCIVariationalAutoEncoder(
rotated_mnist,
beta=100.0,
c_max=36.0,
device="cpu",
n_epochs=n_epochs,
learning_rate=learning_rate,
)
model.train(stop_early=True)
|
Addressing-the-Topological-Defects-of-Disentanglement-main
|
tests/test_cci_vae.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
import torch
import math
from datasets import transformations
from datasets import datasets
class TestSimpleShapes:
def test_train_loader(self):
simple_shapes = datasets.SimpleShapes(16, n_classes=3)
assert hasattr(simple_shapes, "train_loader")
assert hasattr(simple_shapes, "test_loader")
assert len(simple_shapes.train_loader) > 0
assert len(simple_shapes.test_loader) > 0
def test_transformations(self):
simple_shapes = datasets.SimpleShapes(
16,
n_classes=3,
n_rotations=9,
n_x_translations=5,
n_y_translations=10,
scaling_factors=(1.0, 1.2),
)
assert simple_shapes.total_n_transformations > 50
class TestProjectiveMNIST:
def test_creation(self):
"""Verifies rotated mnist is created properly"""
n_rotations = 9
batch_size = 16
train_size = 5000
rotated_mnist = datasets.ProjectiveMNIST(batch_size, n_rotations=n_rotations)
expected_n_batches = math.ceil(
(rotated_mnist.total_n_transformations ** 2) * train_size / batch_size
)
assert len(rotated_mnist.train_loader) == expected_n_batches
# test shape of x2
assert rotated_mnist.X_train[3][1].shape == torch.Size([1, 28, 28])
def test_proportion(self):
n_rotations = 9
batch_size = 16
train_proportion = 0.001
test_proportion = 0.005
# 10k for validation
full_train_size = 50000
full_test_size = 10000
rotated_mnist = datasets.ProjectiveMNIST(
batch_size,
n_rotations=n_rotations,
train_set_proportion=train_proportion,
valid_set_proportion=train_proportion,
test_set_proportion=test_proportion,
)
expected_train_size = (
full_train_size * train_proportion * (n_rotations + 1) ** 2
)
expected_test_size = full_test_size * test_proportion * (n_rotations + 1) ** 2
assert len(rotated_mnist.X_train) == expected_train_size
assert len(rotated_mnist.X_test) == expected_test_size
class TestTransformations:
def test_transform(self):
shape = (1, 30, 30)
image = torch.rand(shape)
params = transformations.Params(angle=45.0)
rotated_X = transformations.transform(image, params)
assert torch.is_tensor(rotated_X)
assert rotated_X.shape == image.shape
|
Addressing-the-Topological-Defects-of-Disentanglement-main
|
tests/test_datasets.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
import pytest
from datasets import datasets
from autoencoder import AutoEncoder
class TestAutoencoder:
@pytest.fixture(scope="module")
def simple_shapes(self):
batch_size = 4
return datasets.SimpleShapes(batch_size, n_classes=10, n_rotations=3)
def test_autoencoder(self, simple_shapes):
n_epochs, learning_rate = 1, 0.001
model = AutoEncoder(
simple_shapes, device="cpu", n_epochs=n_epochs, learning_rate=learning_rate
)
model.run(stop_early=True)
def test_autoencoder_with_shift_operator(self, simple_shapes):
"""Tests autoencoder with latent rotation"""
n_epochs, learning_rate = 1, 0.001
model = AutoEncoder(
simple_shapes,
device="cpu",
n_epochs=n_epochs,
learning_rate=learning_rate,
latent_operator_name="ShiftOperator",
)
model.run(stop_early=True)
def test_autoencoder_with_disentangled_rotation(self, simple_shapes):
"""Tests autoencoder with latent rotation"""
n_epochs, learning_rate = 1, 0.001
model = AutoEncoder(
simple_shapes,
device="cpu",
n_epochs=n_epochs,
learning_rate=learning_rate,
latent_operator_name="DisentangledRotation",
)
model.run(stop_early=True)
class TestProjectiveMnistAutoencoder:
def __init__(self):
self.n_epochs = 1
self.learning_rate = 0.01
def test_standard_autoencoder(self, rotated_mnist):
model = AutoEncoder(
rotated_mnist, n_epochs=self.n_epochs, learning_rate=self.learning_rate
)
model.run(stop_early=True)
def test_rotated_autoencoder(self, rotated_mnist):
model = AutoEncoder(
rotated_mnist,
z_dim=400,
latent_operator_name="DisentangledRotation",
n_epochs=self.n_epochs,
learning_rate=self.learning_rate,
)
model.run(stop_early=True)
def test_shift_operator_autoencoder(self, rotated_mnist):
model = AutoEncoder(
rotated_mnist,
z_dim=400,
latent_operator_name="ShiftOperator",
n_epochs=self.n_epochs,
learning_rate=self.learning_rate,
)
model.run(stop_early=True)
|
Addressing-the-Topological-Defects-of-Disentanglement-main
|
tests/test_autoencoder.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
|
Addressing-the-Topological-Defects-of-Disentanglement-main
|
complex_shift_operator/__init__.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
import argparse
import torch
import sys
sys.path.append("..")
from datasets import datasets
from weakly_complex_shift_autoencoder import WeaklyComplexAutoEncoder
from complex_shift_autoencoder import ComplexAutoEncoder
import sys
import os
import numpy as np
import random
import torch.backends.cudnn as cudnn
use_cuda = True if torch.cuda.is_available() else False
parser = argparse.ArgumentParser(
description="Fully/Weakly supervised version of shift operator"
)
# General arguments
parser.add_argument("--seed", type=int, default=0)
parser.add_argument(
"--output_directory",
type=str,
default="output",
help="In this directory the models will be "
"saved. Will be created if doesn't exist.",
)
parser.add_argument("--n_epochs", type=int, default="10", help="Number of epochs.")
parser.add_argument("--lr", type=float, default="0.001", help="Learning rate.")
parser.add_argument("--bs", type=int, default="16", help="Batch size.")
parser.add_argument(
"--n_rot", type=int, default="9", help="Number of rotations (for the model)."
)
parser.add_argument(
"--data_n_rot", type=int, default="9", help="Number of rotations (for the data)."
)
parser.add_argument(
"--n_x",
type=int,
default="0",
help="Number of x translations in x (for the model).",
)
parser.add_argument(
"--data_n_x",
type=int,
default="0",
help="Number of x translations in x (for the data).",
)
parser.add_argument(
"--n_y",
type=int,
default="0",
help="Number of y translations in y (for the model).",
)
parser.add_argument(
"--data_n_y",
type=int,
default="0",
help="Number of y translations in y (for the data).",
)
parser.add_argument("--tr_prop", type=float, default="0.01", help="Train proportion.")
parser.add_argument("--te_prop", type=float, default="0.01", help="Test proportion.")
parser.add_argument("--val_prop", type=float, default="0.01", help="Valid proportion.")
parser.add_argument("--n_classes", type=int, default="300", help="Number of classes.")
parser.add_argument("--dataset", type=str, default="mnist", help="Dataset")
parser.add_argument(
"--sftmax", type=int, default="1", help="If 1, switches to weighting and summing (deprecated softmax is always used)"
)
parser.add_argument("--tau", type=float, default=0.1, help="Temperature of softmax.")
parser.add_argument("--mode", type=str, default="train", help="training or test mode")
parser.add_argument("--supervised", type=int, default=0, help="Switches between weakly and fully supervised.")
def main(params):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(f"running on {device}")
args = parser.parse_args(params)
SEED = int(args.seed)
random.seed(SEED)
torch.manual_seed(SEED)
np.random.seed(SEED)
torch.cuda.manual_seed_all(SEED)
if args.dataset == "simpleshapes":
data = datasets.SimpleShapes(
batch_size=args.bs,
n_x_translations=args.data_n_x,
n_y_translations=args.data_n_y,
n_rotations=args.data_n_rot,
n_classes=args.n_classes,
n_pixels=28,
)
elif args.dataset == "mnist":
data = datasets.ProjectiveMNIST(
batch_size=args.bs,
n_x_translations=args.data_n_x,
n_y_translations=args.data_n_y,
n_rotations=args.data_n_rot,
train_set_proportion=args.tr_prop,
test_set_proportion=args.te_prop,
valid_set_proportion=args.val_prop,
)
if args.mode == "train":
print("Training")
if args.mode == "test":
print("Testing")
# automatically set z_dim to image size
image_size = data.n_pixels ** 2
if not os.path.exists(args.output_directory):
os.mkdir(args.output_directory)
dict_args = vars(args)
save_name = "_".join(
[
"{0}_{1}".format(key, dict_args[key])
for key in dict_args
if key not in ["output_directory", "mode"]
]
)
if args.supervised:
transformation_types = []
indexes = []
if args.n_rot > 0:
transformation_types.append("ComplexShiftOperator")
indexes.append(0)
if args.n_x > 0:
transformation_types.append("ComplexShiftOperator")
indexes.append(1)
if args.n_y > 0:
transformation_types.append("ComplexShiftOperator")
indexes.append(2)
model_with_rotation = ComplexAutoEncoder(
data,
transformation_types=transformation_types,
indexes=indexes,
device=device,
z_dim=image_size,
seed=SEED,
output_directory=args.output_directory,
save_name=save_name,
n_rotations=args.n_rot,
n_x_translations=args.n_x,
n_y_translations=args.n_y,
)
n_transfos = len(indexes)
else:
model_with_rotation = WeaklyComplexAutoEncoder(
data,
transformation_type="ComplexShiftOperator",
device=device,
z_dim=image_size,
seed=SEED,
temperature=args.tau,
output_directory=args.output_directory,
save_name=save_name,
use_softmax=args.sftmax,
n_rotations=args.n_rot,
n_x_translations=args.n_x,
n_y_translations=args.n_y,
)
if args.mode == "train":
(
train_loss,
valid_loss,
train_mse,
valid_mse,
test_mse,
) = model_with_rotation.run(n_epochs=args.n_epochs, learning_rate=args.lr)
perf = np.array([train_mse, valid_mse, test_mse])
torch.save(perf, os.path.join(args.output_directory, "final_mse_" + save_name))
torch.save(
train_loss, os.path.join(args.output_directory, "train_loss_" + save_name)
)
torch.save(
valid_loss, os.path.join(args.output_directory, "valid_loss_" + save_name)
)
file_name = "best_checkpoint_{}.pth.tar".format(model_with_rotation.save_name)
path_to_model = os.path.join(args.output_directory, file_name)
best_mse, best_epoch = model_with_rotation.load_model(path_to_model)
##### Plots train reconstructions
samples_pairs = np.random.randint(
0, len(model_with_rotation.data.X_train), size=(10,)
).tolist()
model_with_rotation.plot_x2_reconstructions(
indices=samples_pairs,
train_set=True,
save_name=os.path.join(args.output_directory, "plots_train_reconstructions_" + save_name),
)
##### Plots train rotations of samples
train_indices = np.random.randint(
0, len(model_with_rotation.data.X_orig_train), size=(10,)
).tolist()
figsave_name=os.path.join(args.output_directory, "plots_train_rotations_" + save_name + '.png')
if args.supervised:
if n_transfos == 1:
if args.data_n_x > 0:
param_name = 'tx'
elif args.data_n_y > 0:
param_name = 'ty'
if args.data_n_rot > 0:
param_name = 'angle'
model_with_rotation.plot_multiple_transformations(indices=train_indices, train_set = True,
param_name=param_name, save_name=figsave_name
)
else:
model_with_rotation.plot_multiple_transformations_stacked(indices=train_indices, train_set = True,
n_plots = 10, save_name=figsave_name
)
else:
if args.data_n_x > 0:
param_name = 'tx'
elif args.data_n_y > 0:
param_name = 'ty'
if args.data_n_rot > 0:
param_name = 'angle'
model_with_rotation.plot_multiple_transformations(indices=train_indices, train_set = True,
param_name=param_name,save_name=figsave_name
)
##### Plots test reconstructions
samples_pairs = np.random.randint(
0, len(model_with_rotation.data.X_test), size=(10,)
).tolist()
model_with_rotation.plot_x2_reconstructions(
indices=samples_pairs,
train_set=False,
save_name=os.path.join(args.output_directory, "plots_test_reconstructions_" + save_name),
)
##### Plots test rotations of samples
test_indices = np.random.randint(
0, len(model_with_rotation.data.X_orig_test), size=(10,)
).tolist()
figsave_name=os.path.join(args.output_directory, "plots_test_rotations_" + save_name + '.png')
if args.supervised:
if n_transfos == 1:
if args.data_n_x > 0:
param_name = 'tx'
elif args.data_n_y > 0:
param_name = 'ty'
if args.data_n_rot > 0:
param_name = 'angle'
model_with_rotation.plot_multiple_transformations(indices=test_indices, train_set = False,
param_name=param_name, save_name=figsave_name
)
else:
model_with_rotation.plot_multiple_transformations_stacked(indices=test_indices, train_set = False,
n_plots = 10, save_name=figsave_name
)
else:
if args.data_n_x > 0:
param_name = 'tx'
elif args.data_n_y > 0:
param_name = 'ty'
if args.data_n_rot > 0:
param_name = 'angle'
model_with_rotation.plot_multiple_transformations(indices=test_indices, train_set = False,
param_name=param_name, save_name=figsave_name
)
elif args.mode == "test":
file_name = "best_checkpoint_{}.pth.tar".format(model_with_rotation.save_name)
path_to_model = os.path.join(args.output_directory, file_name)
model_with_rotation.load_model(path_to_model)
if args.supervised:
loss_func = model_with_rotation.reconstruction_mse_transformed_z1
else:
loss_func = model_with_rotation.reconstruction_mse_transformed_z1_weak
test_mse = model_with_rotation.compute_test_loss(
loss_func, model_with_rotation.data.test_loader_batch_100
)
torch.save(
torch.FloatTensor([test_mse]),
os.path.join(
args.output_directory, "test_mse_" + model_with_rotation.save_name
),
)
if __name__ == "__main__":
main(sys.argv[1:])
|
Addressing-the-Topological-Defects-of-Disentanglement-main
|
complex_shift_operator/__main__.py
|
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the CC-by-NC license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import torch.nn as nn
from functools import partial
from convit import VisionTransformer
from timm.models.efficientnet import EfficientNet
from timm.models.vision_transformer import _cfg
from timm.models.registry import register_model
@register_model
def convit_tiny(pretrained=False, **kwargs):
num_heads = 4
kwargs['embed_dim'] *= num_heads
model = VisionTransformer(
num_heads=num_heads,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/convit/convit_tiny.pth",
map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint)
return model
@register_model
def convit_small(pretrained=False, **kwargs):
num_heads = 9
kwargs['embed_dim'] *= num_heads
model = VisionTransformer(
num_heads=num_heads,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/convit/convit_small.pth",
map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint)
return model
@register_model
def convit_base(pretrained=False, **kwargs):
num_heads = 16
kwargs['embed_dim'] *= num_heads
model = VisionTransformer(
num_heads=num_heads,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/convit/convit_base.pth",
map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint)
return model
|
convit-main
|
models.py
|
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the CC-by-NC license found in the
# LICENSE file in the root directory of this source tree.
#
"""
A script to run multinode training with submitit.
"""
import argparse
import os
import uuid
from pathlib import Path
import time
import shutil
import itertools
import main as classification
import submitit
def parse_args():
classification_parser = classification.get_args_parser()
parser = argparse.ArgumentParser("Submitit for ConViT", parents=[classification_parser])
parser.add_argument("--ngpus", default=8, type=int, help="Number of gpus to request on each node")
parser.add_argument("--nodes", default=2, type=int, help="Number of nodes to request")
parser.add_argument("--timeout", default=1000, type=int, help="Duration of the job")
parser.add_argument("--job_dir", default="", type=str, help="Job dir. Leave empty for automatic.")
parser.add_argument("--partition", default="dev,learnfair,scavenge", type=str, help="Partition where to submit")
parser.add_argument("--use_volta32", action='store_true', help="Big models? Use this")
parser.add_argument('--comment', default="icml", type=str,
help='Comment to pass to scheduler, e.g. priority message')
return parser.parse_args()
def get_shared_folder() -> Path:
user = os.getenv("USER")
if Path("/checkpoint/").is_dir():
p = Path(f"/checkpoint/{user}/convit")
# p = p / str(int(time.time()))
p = p / str(1614800338)
p.mkdir(exist_ok=True)
return p
raise RuntimeError("No shared folder available")
def get_init_file(shared_folder):
# Init file must not exist, but it's parent dir must exist.
init_file = shared_folder / f"{uuid.uuid4().hex}_init"
if init_file.exists():
os.remove(str(init_file))
return init_file
class Trainer(object):
def __init__(self, args):
self.args = args
def __call__(self):
import main as classification
self._setup_gpu_args()
classification.main(self.args)
def checkpoint(self):
import os
import submitit
self.args.dist_url = get_init_file(self.args.shared_dir).as_uri()
checkpoint_file = os.path.join(self.args.output_dir, "checkpoint.pth")
if os.path.exists(checkpoint_file):
self.args.resume = checkpoint_file
print("Requeuing ", self.args)
empty_trainer = type(self)(self.args)
return submitit.helpers.DelayedSubmission(empty_trainer)
def _setup_gpu_args(self):
import submitit
from pathlib import Path
job_env = submitit.JobEnvironment()
self.args.output_dir = Path(str(self.args.output_dir).replace("%j", str(job_env.job_id)))
self.args.gpu = job_env.local_rank
self.args.rank = job_env.global_rank
self.args.world_size = job_env.num_tasks
print(f"Process group: {job_env.num_tasks} tasks, rank: {job_env.global_rank}")
def copy_py(dst_folder, root='.'):
if not os.path.exists(dst_folder):
print("Folder doesn't exist!")
return
for f in os.listdir(root):
if f.endswith('.py'):
shutil.copy2(f, dst_folder)
def main():
args = parse_args()
shared_folder = get_shared_folder()
copy_py(shared_folder)
os.chdir(shared_folder)
grid = {
'model': ['convit_base'],
}
def dict_product(d):
keys = d.keys()
for element in itertools.product(*d.values()):
yield dict(zip(keys, element))
for params in dict_product(grid):
name = '_'.join(['{}_{}'.format(k,v) for k,v in params.items()])
args.shared_dir = shared_folder
args.job_dir = shared_folder / name
if os.path.exists(args.job_dir / 'checkpoint.pth'):
args.resume = args.job_dir / 'checkpoint.pth'
# Note that the folder will depend on the job_id, to easily track experiments
executor = submitit.AutoExecutor(folder=args.job_dir, slurm_max_num_timeout=30)
num_gpus_per_node = args.ngpus
nodes = args.nodes
timeout_min = args.timeout
partition = args.partition
args.use_volta32 = True
kwargs = {}
if args.use_volta32:
kwargs['slurm_constraint'] = 'volta32gb'
if args.comment:
kwargs['slurm_comment'] = args.comment
executor.update_parameters(
mem_gb= 80 * num_gpus_per_node,
gpus_per_node=num_gpus_per_node,
tasks_per_node=num_gpus_per_node, # one task per GPU
cpus_per_task=10,
nodes=nodes,
timeout_min=timeout_min, # max is 60 * 72
slurm_partition=partition,
slurm_signal_delay_s=120,
**kwargs
)
for k,v in params.items():
setattr(args,k,v)
executor.update_parameters(name=name)
args.dist_url = get_init_file(shared_folder).as_uri()
args.output_dir = args.job_dir
trainer = Trainer(args)
job = executor.submit(trainer)
print("Submitted job_id:", job.job_id)
if __name__ == "__main__":
main()
|
convit-main
|
run_with_submitit.py
|
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the CC-by-NC license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import json
import random
from torchvision import datasets, transforms
from torchvision.datasets.folder import ImageFolder, DatasetFolder, default_loader
from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.data import create_transform
from typing import Any, Callable, cast, Dict, List, Optional, Tuple
IMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif', '.tiff', '.webp')
def has_file_allowed_extension(filename: str, extensions: Tuple[str, ...]) -> bool:
return filename.lower().endswith(extensions)
def make_subsampled_dataset(
directory, class_to_idx, extensions=None,is_valid_file=None, sampling_ratio=1., nb_classes=None):
instances = []
directory = os.path.expanduser(directory)
both_none = extensions is None and is_valid_file is None
both_something = extensions is not None and is_valid_file is not None
if both_none or both_something:
raise ValueError("Both extensions and is_valid_file cannot be None or not None at the same time")
if extensions is not None:
def is_valid_file(x: str) -> bool:
return has_file_allowed_extension(x, cast(Tuple[str, ...], extensions))
is_valid_file = cast(Callable[[str], bool], is_valid_file)
for i, target_class in enumerate(sorted(class_to_idx.keys())):
if nb_classes is not None and i>=nb_classes:
break
class_index = class_to_idx[target_class]
target_dir = os.path.join(directory, target_class)
if not os.path.isdir(target_dir):
continue
num_imgs = int(len(os.listdir(target_dir))*sampling_ratio)
imgs=0
for root, _, fnames in sorted(os.walk(target_dir, followlinks=True)):
for fname in sorted(fnames):
if imgs==num_imgs :
break
path = os.path.join(root, fname)
if is_valid_file(path):
item = path, class_index
instances.append(item)
imgs+=1
return instances
class INatDataset(ImageFolder):
def __init__(self, root, train=True, year=2018, transform=None, target_transform=None,
category='name', loader=default_loader):
self.transform = transform
self.loader = loader
self.target_transform = target_transform
self.year = year
# assert category in ['kingdom','phylum','class','order','supercategory','family','genus','name']
path_json = os.path.join(root, f'{"train" if train else "val"}{year}.json')
with open(path_json) as json_file:
data = json.load(json_file)
with open(os.path.join(root, 'categories.json')) as json_file:
data_catg = json.load(json_file)
path_json_for_targeter = os.path.join(root, f"train{year}.json")
with open(path_json_for_targeter) as json_file:
data_for_targeter = json.load(json_file)
targeter = {}
indexer = 0
for elem in data_for_targeter['annotations']:
king = []
king.append(data_catg[int(elem['category_id'])][category])
if king[0] not in targeter.keys():
targeter[king[0]] = indexer
indexer += 1
self.nb_classes = len(targeter)
self.samples = []
for elem in data['images']:
cut = elem['file_name'].split('/')
target_current = int(cut[2])
path_current = os.path.join(root, cut[0], cut[2], cut[3])
categors = data_catg[target_current]
target_current_true = targeter[categors[category]]
self.samples.append((path_current, target_current_true))
# __getitem__ and __len__ inherited from ImageFolder
class SubsampledDatasetFolder(DatasetFolder):
def __init__(self, root, loader, extensions=None, transform=None, target_transform=None, is_valid_file=None, sampling_ratio=1., nb_classes=None):
super(DatasetFolder, self).__init__(root, transform=transform,
target_transform=target_transform)
classes, class_to_idx = self._find_classes(self.root)
samples = make_subsampled_dataset(self.root, class_to_idx, extensions, is_valid_file, sampling_ratio=sampling_ratio, nb_classes=nb_classes)
if len(samples) == 0:
msg = "Found 0 files in subfolders of: {}\n".format(self.root)
if extensions is not None:
msg += "Supported extensions are: {}".format(",".join(extensions))
raise RuntimeError(msg)
self.loader = loader
self.extensions = extensions
self.classes = classes
self.class_to_idx = class_to_idx
self.samples = samples
self.targets = [s[1] for s in samples]
# __getitem__ and __len__ inherited from DatasetFolder
class ImageNetDataset(SubsampledDatasetFolder):
def __init__(self, root, loader=default_loader, is_valid_file=None, **kwargs):
super(ImageNetDataset, self).__init__(root, loader, IMG_EXTENSIONS if is_valid_file is None else None,
is_valid_file=is_valid_file, **kwargs)
self.imgs = self.samples
def build_dataset(is_train, args):
transform = build_transform(is_train, args)
if args.data_set == 'CIFAR10':
args.data_path = "/datasets01/cifar-pytorch/11222017/"
dataset = datasets.CIFAR10(args.data_path, train=is_train, transform=transform)
nb_classes = 10
if args.data_set == 'CIFAR100':
args.data_path = "/datasets01/cifar100/022818/data/"
dataset = datasets.CIFAR100(args.data_path, train=is_train, transform=transform)
nb_classes = 100
elif args.data_set == 'IMNET':
root = os.path.join(args.data_path, 'train' if is_train else 'val')
dataset = ImageNetDataset(root, transform=transform,
sampling_ratio= (args.sampling_ratio if is_train else 1.), nb_classes=args.nb_classes)
nb_classes = args.nb_classes if args.nb_classes is not None else 1000
elif args.data_set == 'INAT':
dataset = INatDataset(args.data_path, train=is_train, year=2018,
category=args.inat_category, transform=transform)
nb_classes = dataset.nb_classes
elif args.data_set == 'INAT19':
args.data_path = "/datasets01/inaturalist/090619/"
dataset = INatDataset(args.data_path, train=is_train, year=2019,
category=args.inat_category, transform=transform)
nb_classes = dataset.nb_classes
return dataset, nb_classes
def build_transform(is_train, args):
resize_im = args.input_size > 32
if is_train:
# this should always dispatch to transforms_imagenet_train
transform = create_transform(
input_size=args.input_size,
is_training=True,
color_jitter=args.color_jitter,
auto_augment=args.aa,
interpolation=args.train_interpolation,
re_prob=args.reprob,
re_mode=args.remode,
re_count=args.recount,
)
if not resize_im:
# replace RandomResizedCropAndInterpolation with
# RandomCrop
transform.transforms[0] = transforms.RandomCrop(
args.input_size, padding=4)
return transform
t = []
if resize_im:
size = int((256 / 224) * args.input_size)
t.append(
transforms.Resize(size, interpolation=3), # to maintain same ratio w.r.t. 224 images
)
t.append(transforms.CenterCrop(args.input_size))
t.append(transforms.ToTensor())
t.append(transforms.Normalize(IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD))
return transforms.Compose(t)
|
convit-main
|
datasets.py
|
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the CC-by-NC license found in the
# LICENSE file in the root directory of this source tree.
#
"""
Train and eval functions used in main.py
"""
import math
import sys
from typing import Iterable, Optional
import torch
from timm.data import Mixup
from timm.utils import accuracy, ModelEma
import utils
def train_one_epoch(model: torch.nn.Module, criterion: torch.nn.Module,
data_loader: Iterable, optimizer: torch.optim.Optimizer,
device: torch.device, epoch: int, loss_scaler, max_norm: float = 0,
model_ema: Optional[ModelEma] = None, mixup_fn: Optional[Mixup] = None):
# TODO fix this for finetuning
model.train()
criterion.train()
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = 'Epoch: [{}]'.format(epoch)
print_freq = 10
for samples, targets in metric_logger.log_every(data_loader, print_freq, header):
samples = samples.to(device, non_blocking=True)
targets = targets.to(device, non_blocking=True)
if mixup_fn is not None:
samples, targets = mixup_fn(samples, targets)
with torch.cuda.amp.autocast():
outputs = model(samples)
loss = criterion(outputs, targets)
loss_value = loss.item()
if not math.isfinite(loss_value):
print("Loss is {}, stopping training".format(loss_value))
sys.exit(1)
optimizer.zero_grad()
# this attribute is added by timm on one optimizer (adahessian)
is_second_order = hasattr(optimizer, 'is_second_order') and optimizer.is_second_order
loss_scaler(loss, optimizer, clip_grad=max_norm,
parameters=model.parameters(), create_graph=is_second_order)
torch.cuda.synchronize()
if model_ema is not None:
model_ema.update(model)
metric_logger.update(loss=loss_value)
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
@torch.no_grad()
def evaluate(data_loader, model, device):
criterion = torch.nn.CrossEntropyLoss()
metric_logger = utils.MetricLogger(delimiter=" ")
header = 'Test:'
# switch to evaluation mode
model.eval()
for images, target in metric_logger.log_every(data_loader, 10, header):
images = images.to(device, non_blocking=True)
target = target.to(device, non_blocking=True)
# compute output
with torch.cuda.amp.autocast():
output = model(images)
loss = criterion(output, target)
acc1, acc5 = accuracy(output, target, topk=(1, 5))
batch_size = images.shape[0]
metric_logger.update(loss=loss.item())
metric_logger.meters['acc1'].update(acc1.item(), n=batch_size)
metric_logger.meters['acc5'].update(acc5.item(), n=batch_size)
print('* Acc@1 {top1.global_avg:.3f} Acc@5 {top5.global_avg:.3f} loss {losses.global_avg:.3f}'
.format(top1=metric_logger.acc1, top5=metric_logger.acc5, losses=metric_logger.loss))
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
|
convit-main
|
engine.py
|
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the CC-by-NC license found in the
# LICENSE file in the root directory of this source tree.
#
"""
Misc functions, including distributed helpers.
Mostly copy-paste from torchvision references.
"""
import io
import os
import time
from collections import defaultdict, deque
import time
import datetime
import torch
import torch.distributed as dist
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} ({global_avg:.4f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
if not is_dist_avail_and_initialized():
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[-1]
def __str__(self):
return self.fmt.format(
median=self.median,
avg=self.avg,
global_avg=self.global_avg,
max=self.max,
value=self.value)
class MetricLogger(object):
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self).__name__, attr))
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append(
"{}: {}".format(name, str(meter))
)
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
i = 0
if not header:
header = ''
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt='{avg:.4f}')
data_time = SmoothedValue(fmt='{avg:.4f}')
space_fmt = ':' + str(len(str(len(iterable)))) + 'd'
log_msg = [
header,
'[{0' + space_fmt + '}/{1}]',
'eta: {eta}',
'{meters}',
'time: {time}',
'data: {data}'
]
if torch.cuda.is_available():
log_msg.append('max mem: {memory:.0f}')
log_msg = self.delimiter.join(log_msg)
MB = 1024.0 * 1024.0
for obj in iterable:
data_time.update(time.time() - end)
yield obj
iter_time.update(time.time() - end)
if i % print_freq == 0 or i == len(iterable) - 1:
eta_seconds = iter_time.global_avg * (len(iterable) - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time),
memory=torch.cuda.max_memory_allocated() / MB))
else:
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time)))
i += 1
end = time.time()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('{} Total time: {} ({:.4f} s / it)'.format(
header, total_time_str, total_time / len(iterable)))
def _load_checkpoint_for_ema(model_ema, checkpoint):
"""
Workaround for ModelEma._load_checkpoint to accept an already-loaded object
"""
mem_file = io.BytesIO()
torch.save(checkpoint, mem_file)
mem_file.seek(0)
model_ema._load_checkpoint(mem_file)
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def save_on_master(*args, **kwargs):
if is_main_process():
torch.save(*args, **kwargs)
def init_distributed_mode(args):
if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ['WORLD_SIZE'])
args.gpu = int(os.environ['LOCAL_RANK'])
elif 'SLURM_PROCID' in os.environ:
args.rank = int(os.environ['SLURM_PROCID'])
args.gpu = args.rank % torch.cuda.device_count()
else:
print('Not using distributed mode')
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = 'nccl'
print('| distributed init (rank {}): {}'.format(
args.rank, args.dist_url), flush=True)
torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
torch.distributed.barrier()
setup_for_distributed(args.rank == 0)
@torch.no_grad()
def compute_throughput(model, batch_size=128, resolution=224):
torch.cuda.empty_cache()
warmup_iters = 3
num_iters = 30
model.eval()
model.to("cuda")
timing = []
inputs = torch.randn(batch_size, 3, resolution, resolution, device="cuda")
# warmup
for _ in range(warmup_iters):
model(inputs)
torch.cuda.synchronize()
for _ in range(num_iters):
start = time.time()
model(inputs)
torch.cuda.synchronize()
timing.append(time.time() - start)
timing = torch.as_tensor(timing, dtype=torch.float32)
return batch_size / timing.mean()
|
convit-main
|
utils.py
|
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the CC-by-NC license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import datetime
import numpy as np
import time
import torch
import torch.backends.cudnn as cudnn
import json
import gc
from pathlib import Path
from timm.data import Mixup
from timm.models import create_model
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy
from timm.scheduler import create_scheduler
from timm.optim import create_optimizer
from timm.utils import NativeScaler, get_state_dict, ModelEma
from datasets import build_dataset
from engine import train_one_epoch, evaluate
from samplers import RASampler
import models
import utils
def get_args_parser():
parser = argparse.ArgumentParser('ConViT training and evaluation script', add_help=False)
parser.add_argument('--batch-size', default=64, type=int)
parser.add_argument('--epochs', default=300, type=int)
# Model parameters
parser.add_argument('--model', default='convit_small', type=str, metavar='MODEL',
help='Name of model to train')
parser.add_argument('--pretrained', action='store_true')
parser.add_argument('--input-size', default=224, type=int, help='images input size')
parser.add_argument('--embed_dim', default=48, type=int, help='embedding dimension per head')
parser.add_argument('--drop', type=float, default=0.0, metavar='PCT',
help='Dropout rate (default: 0.)')
parser.add_argument('--drop-path', type=float, default=0.1, metavar='PCT',
help='Drop path rate (default: 0.1)')
parser.add_argument('--drop-block', type=float, default=None, metavar='PCT',
help='Drop block rate (default: None)')
parser.add_argument('--model-ema', action='store_true')
parser.add_argument('--no-model-ema', action='store_false', dest='model_ema')
parser.set_defaults(model_ema=False)
parser.add_argument('--model-ema-decay', type=float, default=0.99996, help='')
parser.add_argument('--model-ema-force-cpu', action='store_true', default=False, help='')
# Optimizer parameters
parser.add_argument('--opt', default='adamw', type=str, metavar='OPTIMIZER',
help='Optimizer (default: "adamw"')
parser.add_argument('--opt-eps', default=1e-8, type=float, metavar='EPSILON',
help='Optimizer Epsilon (default: 1e-8)')
parser.add_argument('--opt-betas', default=None, type=float, nargs='+', metavar='BETA',
help='Optimizer Betas (default: None, use opt default)')
parser.add_argument('--clip-grad', type=float, default=None, metavar='NORM',
help='Clip gradient norm (default: None, no clipping)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--weight-decay', type=float, default=0.05,
help='weight decay (default: 0.05)')
# Learning rate schedule parameters
parser.add_argument('--sched', default='cosine', type=str, metavar='SCHEDULER',
help='LR scheduler (default: "cosine"')
parser.add_argument('--lr', type=float, default=5e-4, metavar='LR',
help='learning rate (default: 5e-4)')
parser.add_argument('--lr-noise', type=float, nargs='+', default=None, metavar='pct, pct',
help='learning rate noise on/off epoch percentages')
parser.add_argument('--lr-noise-pct', type=float, default=0.67, metavar='PERCENT',
help='learning rate noise limit percent (default: 0.67)')
parser.add_argument('--lr-noise-std', type=float, default=1.0, metavar='STDDEV',
help='learning rate noise std-dev (default: 1.0)')
parser.add_argument('--warmup-lr', type=float, default=1e-6, metavar='LR',
help='warmup learning rate (default: 1e-6)')
parser.add_argument('--min-lr', type=float, default=1e-5, metavar='LR',
help='lower lr bound for cyclic schedulers that hit 0 (1e-5)')
parser.add_argument('--decay-epochs', type=float, default=30, metavar='N',
help='epoch interval to decay LR')
parser.add_argument('--warmup-epochs', type=int, default=5, metavar='N',
help='epochs to warmup LR, if scheduler supports')
parser.add_argument('--cooldown-epochs', type=int, default=10, metavar='N',
help='epochs to cooldown LR at min_lr, after cyclic schedule ends')
parser.add_argument('--patience-epochs', type=int, default=10, metavar='N',
help='patience epochs for Plateau LR scheduler (default: 10')
parser.add_argument('--decay-rate', '--dr', type=float, default=0.1, metavar='RATE',
help='LR decay rate (default: 0.1)')
# Augmentation parameters
parser.add_argument('--color-jitter', type=float, default=0.4, metavar='PCT',
help='Color jitter factor (default: 0.4)')
parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5-inc1', metavar='NAME',
help='Use AutoAugment policy. "v0" or "original". " + \
"(default: rand-m9-mstd0.5-inc1)'),
parser.add_argument('--smoothing', type=float, default=0.1, help='Label smoothing (default: 0.1)')
parser.add_argument('--train-interpolation', type=str, default='bicubic',
help='Training interpolation (random, bilinear, bicubic default: "bicubic")')
parser.add_argument('--repeated-aug', action='store_true')
parser.add_argument('--no-repeated-aug', action='store_false', dest='repeated_aug')
parser.set_defaults(repeated_aug=True)
# * Random Erase params
parser.add_argument('--reprob', type=float, default=0.25, metavar='PCT',
help='Random erase prob (default: 0.25)')
parser.add_argument('--remode', type=str, default='pixel',
help='Random erase mode (default: "pixel")')
parser.add_argument('--recount', type=int, default=1,
help='Random erase count (default: 1)')
parser.add_argument('--resplit', action='store_true', default=False,
help='Do not random erase first (clean) augmentation split')
# * Mixup params
parser.add_argument('--mixup', type=float, default=0.8,
help='mixup alpha, mixup enabled if > 0. (default: 0.8)')
parser.add_argument('--cutmix', type=float, default=1.0,
help='cutmix alpha, cutmix enabled if > 0. (default: 1.0)')
parser.add_argument('--cutmix-minmax', type=float, nargs='+', default=None,
help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)')
parser.add_argument('--mixup-prob', type=float, default=1.0,
help='Probability of performing mixup or cutmix when either/both is enabled')
parser.add_argument('--mixup-switch-prob', type=float, default=0.5,
help='Probability of switching to cutmix when both mixup and cutmix enabled')
parser.add_argument('--mixup-mode', type=str, default='batch',
help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"')
# Dataset parameters
parser.add_argument('--data-path', default='/datasets01/imagenet_full_size/061417/', type=str,
help='dataset path')
parser.add_argument('--data-set', default='IMNET', choices=['CIFAR10', 'CIFAR100', 'IMNET', 'INAT', 'INAT19'],
type=str, help='Image Net dataset path')
parser.add_argument('--sampling_ratio', default=1.,
type=float, help='fraction of samples to keep in the training set of imagenet')
parser.add_argument('--nb_classes', default=None,
type=int, help='number of classes in imagenet')
parser.add_argument('--inat-category', default='name',
choices=['kingdom', 'phylum', 'class', 'order', 'supercategory', 'family', 'genus', 'name'],
type=str, help='semantic granularity')
parser.add_argument('--output_dir', default='',
help='path where to save, empty for no saving')
parser.add_argument('--device', default='cuda',
help='device to use for training / testing')
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--resume', default='', help='resume from checkpoint')
parser.add_argument('--save_every', default=None, type=int, help='save model every epochs')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='start epoch')
parser.add_argument('--eval', action='store_true', help='Perform evaluation only')
parser.add_argument('--num_workers', default=10, type=int)
parser.add_argument('--pin-mem', action='store_true',
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--no-pin-mem', action='store_false', dest='pin_mem',
help='')
parser.set_defaults(pin_mem=True)
# distributed training parameters
parser.add_argument('--world_size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
# locality parameters
parser.add_argument('--local_up_to_layer', default=10, type=int,
help='number of GPSA layers')
parser.add_argument('--locality_strength', default=1., type=float,
help='Determines how focused each head is around its attention center')
return parser
def main(args):
utils.init_distributed_mode(args)
print(args)
device = torch.device(args.device)
# fix the seed for reproducibility
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
cudnn.benchmark = True
dataset_train, args.nb_classes = build_dataset(is_train=True, args=args)
dataset_val, _ = build_dataset(is_train=False, args=args)
if True: # args.distributed:
num_tasks = utils.get_world_size()
global_rank = utils.get_rank()
if args.repeated_aug:
sampler_train = RASampler(
dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True
)
else:
sampler_train = torch.utils.data.DistributedSampler(
dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True
)
else:
sampler_train = torch.utils.data.RandomSampler(dataset_train)
data_loader_train = torch.utils.data.DataLoader(
dataset_train, sampler=sampler_train,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=True,
)
data_loader_val = torch.utils.data.DataLoader(
dataset_val, batch_size=int(1.5 * args.batch_size),
shuffle=False, num_workers=args.num_workers,
pin_memory=args.pin_mem, drop_last=False
)
mixup_fn = None
mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None
if mixup_active:
mixup_fn = Mixup(
mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax,
prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode,
label_smoothing=args.smoothing, num_classes=args.nb_classes)
print(f"Creating model: {args.model}")
model = create_model(
args.model,
pretrained=args.pretrained,
num_classes=args.nb_classes,
drop_rate=args.drop,
drop_path_rate=args.drop_path,
drop_block_rate=args.drop_block,
local_up_to_layer=args.local_up_to_layer,
locality_strength=args.locality_strength,
embed_dim = args.embed_dim,
)
print(model)
model.to(device)
model_ema = None
if args.model_ema:
model_ema = ModelEma(
model,
decay=args.model_ema_decay,
device='cpu' if args.model_ema_force_cpu else '',
resume='')
model_without_ddp = model
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
model_without_ddp = model.module
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
print('number of params:', n_parameters)
linear_scaled_lr = args.lr * args.batch_size * utils.get_world_size() / 512.0
args.lr = linear_scaled_lr
optimizer = create_optimizer(args, model)
loss_scaler = NativeScaler()
lr_scheduler, _ = create_scheduler(args, optimizer)
criterion = LabelSmoothingCrossEntropy()
if args.mixup > 0.:
# smoothing is handled with mixup label transform
criterion = SoftTargetCrossEntropy()
elif args.smoothing:
criterion = LabelSmoothingCrossEntropy(smoothing=args.smoothing)
else:
criterion = torch.nn.CrossEntropyLoss()
output_dir = Path(args.output_dir)
torch.save(args, output_dir / "args.pyT")
if args.resume:
if str(args.resume).startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(
args.resume, map_location='cpu', check_hash=True)
else:
checkpoint = torch.load(args.resume, map_location='cpu')
model_without_ddp.load_state_dict(checkpoint['model'])
if not args.eval and 'optimizer' in checkpoint and 'lr_scheduler' in checkpoint and 'epoch' in checkpoint:
optimizer.load_state_dict(checkpoint['optimizer'])
lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
args.start_epoch = checkpoint['epoch'] + 1
if args.model_ema:
utils._load_checkpoint_for_ema(model_ema, checkpoint['model_ema'])
if args.eval:
throughput = utils.compute_throughput(model, resolution=args.input_size)
print(f"Throughput : {throughput:.2f}")
test_stats = evaluate(data_loader_val, model, device)
print(f"Accuracy of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%")
return
print("Start training")
start_time = time.time()
max_accuracy = 0.0
for epoch in range(args.start_epoch, args.epochs):
gc.collect()
if args.distributed:
data_loader_train.sampler.set_epoch(epoch)
train_stats = train_one_epoch(
model, criterion, data_loader_train,
optimizer, device, epoch, loss_scaler,
args.clip_grad, model_ema, mixup_fn
)
lr_scheduler.step(epoch)
if args.output_dir:
checkpoint_paths = [output_dir / 'checkpoint.pth']
if args.save_every is not None:
if epoch % args.save_every == 0: checkpoint_paths.append(output_dir / 'checkpoint_{}.pth'.format(epoch))
for checkpoint_path in checkpoint_paths:
utils.save_on_master({
'model': model_without_ddp.state_dict(),
'optimizer': optimizer.state_dict(),
'lr_scheduler': lr_scheduler.state_dict(),
'epoch': epoch,
'model_ema': get_state_dict(model_ema) if model_ema else None,
'args': args,
}, checkpoint_path)
test_stats = evaluate(data_loader_val, model, device)
print(f"Accuracy of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%")
max_accuracy = max(max_accuracy, test_stats["acc1"])
print(f'Max accuracy: {max_accuracy:.2f}%')
nonlocality = {}
gating_params = {}
distances = {}
batch = next(iter(data_loader_val))[0]
batch = batch.to(device)
batch = model_without_ddp.patch_embed(batch)
for l in range(len(model_without_ddp.blocks)):
attn = model_without_ddp.blocks[l].attn
nonlocality[l] = attn.get_attention_map(batch).detach().cpu().numpy().tolist()
if 'convit' in args.model and l<args.local_up_to_layer:
p = attn.pos_proj.weight
span = -1/p.data[:,-1]
dist_x = p.data[:,0]*span/2
dist_y = p.data[:,1]*span/2
dist = (dist_x**2+dist_y**2)**.5
distances[l] = dist.cpu().numpy().tolist()
gating_params[l] = attn.gating_param.data.cpu().numpy().tolist()
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
**{f'test_{k}': v for k, v in test_stats.items()},
**{f'nonlocality_{k}': v for k, v in nonlocality.items()},
**{f'distances_{k}': v for k, v in distances.items()},
**{f'gating_params_{k}': v for k, v in gating_params.items()},
'epoch': epoch,
'n_parameters': n_parameters}
print(log_stats)
if args.output_dir and utils.is_main_process():
with (output_dir / "log.txt").open("a") as f:
f.write(json.dumps(log_stats) + "\n")
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
if __name__ == '__main__':
parser = argparse.ArgumentParser('ConViT training and evaluation script', parents=[get_args_parser()])
args = parser.parse_args()
if args.output_dir:
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
main(args)
|
convit-main
|
main.py
|
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the CC-by-NC license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import torch.distributed as dist
import math
class RASampler(torch.utils.data.Sampler):
"""Sampler that restricts data loading to a subset of the dataset for distributed,
with repeated augmentation.
It ensures that different each augmented version of a sample will be visible to a
different process (GPU)
Heavily based on torch.utils.data.DistributedSampler
"""
def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True):
if num_replicas is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
num_replicas = dist.get_world_size()
if rank is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
rank = dist.get_rank()
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.num_samples = int(math.ceil(len(self.dataset) * 3.0 / self.num_replicas))
self.total_size = self.num_samples * self.num_replicas
# self.num_selected_samples = int(math.ceil(len(self.dataset) / self.num_replicas))
self.num_selected_samples = int(math.floor(len(self.dataset) // 256 * 256 / self.num_replicas))
self.shuffle = shuffle
def __iter__(self):
# deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(self.epoch)
if self.shuffle:
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = list(range(len(self.dataset)))
# add extra samples to make it evenly divisible
indices = [ele for ele in indices for i in range(3)]
indices += indices[:(self.total_size - len(indices))]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices[:self.num_selected_samples])
def __len__(self):
return self.num_selected_samples
def set_epoch(self, epoch):
self.epoch = epoch
|
convit-main
|
samplers.py
|
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the CC-by-NC license found in the
# LICENSE file in the root directory of this source tree.
#
'''These modules are adapted from those of timm, see
https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
'''
import torch
import torch.nn as nn
from functools import partial
import torch.nn.functional as F
from timm.models.helpers import load_pretrained
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
from timm.models.registry import register_model
import torch
import torch.nn as nn
import matplotlib.pyplot as plt
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class GPSA(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.,
locality_strength=1., use_local_init=True):
super().__init__()
self.num_heads = num_heads
self.dim = dim
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qk = nn.Linear(dim, dim * 2, bias=qkv_bias)
self.v = nn.Linear(dim, dim, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.pos_proj = nn.Linear(3, num_heads)
self.proj_drop = nn.Dropout(proj_drop)
self.locality_strength = locality_strength
self.gating_param = nn.Parameter(torch.ones(self.num_heads))
self.apply(self._init_weights)
if use_local_init:
self.local_init(locality_strength=locality_strength)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def forward(self, x):
B, N, C = x.shape
if not hasattr(self, 'rel_indices') or self.rel_indices.size(1)!=N:
self.get_rel_indices(N)
attn = self.get_attention(x)
v = self.v(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
def get_attention(self, x):
B, N, C = x.shape
qk = self.qk(x).reshape(B, N, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k = qk[0], qk[1]
pos_score = self.rel_indices.expand(B, -1, -1,-1)
pos_score = self.pos_proj(pos_score).permute(0,3,1,2)
patch_score = (q @ k.transpose(-2, -1)) * self.scale
patch_score = patch_score.softmax(dim=-1)
pos_score = pos_score.softmax(dim=-1)
gating = self.gating_param.view(1,-1,1,1)
attn = (1.-torch.sigmoid(gating)) * patch_score + torch.sigmoid(gating) * pos_score
attn /= attn.sum(dim=-1).unsqueeze(-1)
attn = self.attn_drop(attn)
return attn
def get_attention_map(self, x, return_map = False):
attn_map = self.get_attention(x).mean(0) # average over batch
distances = self.rel_indices.squeeze()[:,:,-1]**.5
dist = torch.einsum('nm,hnm->h', (distances, attn_map))
dist /= distances.size(0)
if return_map:
return dist, attn_map
else:
return dist
def local_init(self, locality_strength=1.):
self.v.weight.data.copy_(torch.eye(self.dim))
locality_distance = 1 #max(1,1/locality_strength**.5)
kernel_size = int(self.num_heads**.5)
center = (kernel_size-1)/2 if kernel_size%2==0 else kernel_size//2
for h1 in range(kernel_size):
for h2 in range(kernel_size):
position = h1+kernel_size*h2
self.pos_proj.weight.data[position,2] = -1
self.pos_proj.weight.data[position,1] = 2*(h1-center)*locality_distance
self.pos_proj.weight.data[position,0] = 2*(h2-center)*locality_distance
self.pos_proj.weight.data *= locality_strength
def get_rel_indices(self, num_patches):
img_size = int(num_patches**.5)
rel_indices = torch.zeros(1, num_patches, num_patches, 3)
ind = torch.arange(img_size).view(1,-1) - torch.arange(img_size).view(-1, 1)
indx = ind.repeat(img_size,img_size)
indy = ind.repeat_interleave(img_size,dim=0).repeat_interleave(img_size,dim=1)
indd = indx**2 + indy**2
rel_indices[:,:,:,2] = indd.unsqueeze(0)
rel_indices[:,:,:,1] = indy.unsqueeze(0)
rel_indices[:,:,:,0] = indx.unsqueeze(0)
device = self.qk.weight.device
self.rel_indices = rel_indices.to(device)
class MHSA(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def get_attention_map(self, x, return_map = False):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn_map = (q @ k.transpose(-2, -1)) * self.scale
attn_map = attn_map.softmax(dim=-1).mean(0)
img_size = int(N**.5)
ind = torch.arange(img_size).view(1,-1) - torch.arange(img_size).view(-1, 1)
indx = ind.repeat(img_size,img_size)
indy = ind.repeat_interleave(img_size,dim=0).repeat_interleave(img_size,dim=1)
indd = indx**2 + indy**2
distances = indd**.5
distances = distances.to('cuda')
dist = torch.einsum('nm,hnm->h', (distances, attn_map))
dist /= N
if return_map:
return dist, attn_map
else:
return dist
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, use_gpsa=True, **kwargs):
super().__init__()
self.norm1 = norm_layer(dim)
self.use_gpsa = use_gpsa
if self.use_gpsa:
self.attn = GPSA(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop, **kwargs)
else:
self.attn = MHSA(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop, **kwargs)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x):
x = x + self.drop_path(self.attn(self.norm1(x)))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class PatchEmbed(nn.Module):
""" Image to Patch Embedding, from timm
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
self.apply(self._init_weights)
def forward(self, x):
B, C, H, W = x.shape
assert H == self.img_size[0] and W == self.img_size[1], \
f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
x = self.proj(x).flatten(2).transpose(1, 2)
return x
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
class HybridEmbed(nn.Module):
""" CNN Feature Map Embedding, from timm
"""
def __init__(self, backbone, img_size=224, feature_size=None, in_chans=3, embed_dim=768):
super().__init__()
assert isinstance(backbone, nn.Module)
img_size = to_2tuple(img_size)
self.img_size = img_size
self.backbone = backbone
if feature_size is None:
with torch.no_grad():
training = backbone.training
if training:
backbone.eval()
o = self.backbone(torch.zeros(1, in_chans, img_size[0], img_size[1]))[-1]
feature_size = o.shape[-2:]
feature_dim = o.shape[1]
backbone.train(training)
else:
feature_size = to_2tuple(feature_size)
feature_dim = self.backbone.feature_info.channels()[-1]
self.num_patches = feature_size[0] * feature_size[1]
self.proj = nn.Linear(feature_dim, embed_dim)
self.apply(self._init_weights)
def forward(self, x):
x = self.backbone(x)[-1]
x = x.flatten(2).transpose(1, 2)
x = self.proj(x)
return x
class VisionTransformer(nn.Module):
""" Vision Transformer with support for patch or hybrid CNN input stage
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=48, depth=12,
num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0.,
drop_path_rate=0., hybrid_backbone=None, norm_layer=nn.LayerNorm, global_pool=None,
local_up_to_layer=10, locality_strength=1., use_pos_embed=True):
super().__init__()
self.num_classes = num_classes
self.local_up_to_layer = local_up_to_layer
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
self.locality_strength = locality_strength
self.use_pos_embed = use_pos_embed
if hybrid_backbone is not None:
self.patch_embed = HybridEmbed(
hybrid_backbone, img_size=img_size, in_chans=in_chans, embed_dim=embed_dim)
else:
self.patch_embed = PatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
num_patches = self.patch_embed.num_patches
self.num_patches = num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_drop = nn.Dropout(p=drop_rate)
if self.use_pos_embed:
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))
trunc_normal_(self.pos_embed, std=.02)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.ModuleList([
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer,
use_gpsa=True,
locality_strength=locality_strength)
if i<local_up_to_layer else
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer,
use_gpsa=False)
for i in range(depth)])
self.norm = norm_layer(embed_dim)
# Classifier head
self.feature_info = [dict(num_chs=embed_dim, reduction=0, module='head')]
self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
trunc_normal_(self.cls_token, std=.02)
self.head.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'cls_token'}
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=''):
self.num_classes = num_classes
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
B = x.shape[0]
x = self.patch_embed(x)
cls_tokens = self.cls_token.expand(B, -1, -1)
if self.use_pos_embed:
x = x + self.pos_embed
x = self.pos_drop(x)
for u,blk in enumerate(self.blocks):
if u == self.local_up_to_layer :
x = torch.cat((cls_tokens, x), dim=1)
x = blk(x)
x = self.norm(x)
return x[:, 0]
def forward(self, x):
x = self.forward_features(x)
x = self.head(x)
return x
|
convit-main
|
convit.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved.
import os
import argparse
# run each job single-threaded, paralellize using pathos
os.environ["OMP_NUM_THREADS"] = "1"
os.environ["MKL_NUM_THREADS"] = "1"
os.environ["NUMEXPR_NUM_THREADS"] = "1"
# multi-socket friendly args
os.environ["KMP_AFFINITY"] = "granularity=fine,compact,1,0"
os.environ["KMP_BLOCKTIME"] = "1"
import torch
# force torch to 1 thread too just in case
torch.set_num_interop_threads(1)
torch.set_num_threads(1)
import time
from copy import deepcopy
from pathlib import Path
from aepsych.benchmark import BenchmarkLogger, PathosBenchmark, combine_benchmarks
from problems import (
DiscrimHighDim,
Hartmann6Binary,
ContrastSensitivity6d, # This takes a few minutes to instantiate due to fitting the model
)
chunks = 5
reps_per_chunk = 20
log_frequency = 10
large_opt_size = 750
nproc = 124
global_seed = 1000
inits = [100, 250, 500]
if __name__ == "__main__":
out_fname_base = Path("../data/init_sensitivity")
out_fname_base.mkdir(
parents=True, exist_ok=True
) # make an output folder if not exist
problems = [
DiscrimHighDim(),
Hartmann6Binary(),
ContrastSensitivity6d(),
]
bench_config = {
"common": {
"outcome_type": "single_probit",
"strategy_names": "[init_strat, opt_strat]",
},
"init_strat": {"generator": "SobolGenerator"},
"opt_strat": {
"model": "GPClassificationModel",
"generator": "OptimizeAcqfGenerator",
"refit_every": 10,
},
"GPClassificationModel": {
"inducing_size": 100,
"mean_covar_factory": "default_mean_covar_factory",
"inducing_point_method": "auto",
},
"default_mean_covar_factory": {
"fixed_mean": False,
"lengthscale_prior": "gamma",
"outputscale_prior": "gamma",
"kernel": "RBFKernel",
},
"OptimizeAcqfGenerator": {
"acqf": [
"LocalMI",
"GlobalMI",
"EAVC",
],
"restarts": 2,
"samps": 100,
},
# Add the probit transform for non-probit-specific acqfs
"MCLevelSetEstimation": {"objective": "ProbitObjective"},
"BernoulliMCMutualInformation": {"objective": "ProbitObjective"},
"MCPosteriorVariance": {"objective": "ProbitObjective"},
}
for chunk in range(chunks):
for problem in problems:
out_fname = Path(f"{out_fname_base}/{problem.name}_chunk{chunk}_out.csv")
intermediate_fname = Path(
f"{out_fname_base}/{problem.name}_chunk{chunk}_checkpoint.csv"
)
print(f"starting {problem.name} benchmark... chunk {chunk} ")
logger = BenchmarkLogger(log_every=log_frequency)
benches = []
for init in inits:
local_config = deepcopy(bench_config)
local_config["common"]["lb"] = str(problem.lb.tolist())
local_config["common"]["ub"] = str(problem.ub.tolist())
local_config["common"]["target"] = problem.threshold
local_config["init_strat"]["n_trials"] = init
local_config["opt_strat"]["n_trials"] = large_opt_size - init
benches.append(
PathosBenchmark(
nproc=nproc,
problem=problem,
logger=logger,
configs=local_config,
global_seed=global_seed,
n_reps=reps_per_chunk,
)
)
bench = combine_benchmarks(*benches)
bench.start_benchmarks()
# checkpoint every minute in case something breaks
while not bench.is_done:
time.sleep(60)
collate_start = time.time()
print(
f"Checkpointing bench {problem} chunk {chunk}..., {len(bench.futures)}/{bench.num_benchmarks} alive"
)
bench.collate_benchmarks(wait=False)
temp_results = bench.logger.pandas()
if len(temp_results) > 0:
temp_results["rep"] = temp_results["rep"] + reps_per_chunk * chunk
temp_results["problem"] = problem.name
temp_results.to_csv(intermediate_fname)
print(
f"Collate done in {time.time()-collate_start} seconds, {len(bench.futures)}/{bench.num_benchmarks} left"
)
print(f"Problem {problem} chunk {chunk} fully done!")
final_results = bench.logger.pandas()
final_results["rep"] = final_results["rep"] + reps_per_chunk * chunk
final_results["problem"] = problem.name
final_results.to_csv(out_fname)
|
bernoulli_lse-main
|
init_sensitivity_study.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved.
import os
import argparse
from copy import deepcopy
from pathlib import Path
global_seed = 1000
n_reps = 20
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Gentime Benchmarks")
parser.add_argument("--nproc", type=int, default=1)
parser.add_argument(
"--output_path", type=Path, default=Path("../data/gentime_bench")
)
args = parser.parse_args()
os.environ["OMP_NUM_THREADS"] = str(args.nproc)
os.environ["MKL_NUM_THREADS"] = str(args.nproc)
os.environ["NUMEXPR_NUM_THREADS"] = str(args.nproc)
# multi-socket friendly args
os.environ["KMP_AFFINITY"] = "granularity=fine,compact,1,0"
os.environ["KMP_BLOCKTIME"] = str(args.nproc)
import torch
torch.set_num_interop_threads(args.nproc)
torch.set_num_threads(args.nproc)
from aepsych.benchmark import BenchmarkLogger, Benchmark
from problems import (
DiscrimLowDim,
DiscrimHighDim,
Hartmann6Binary,
)
out_fname_base = args.output_path
out_fname_base.mkdir(
parents=True, exist_ok=True
) # make an output folder if not exist
problems = [
DiscrimLowDim(),
DiscrimHighDim(),
Hartmann6Binary(),
]
bench_config = {
"common": {
"outcome_type": "single_probit",
"strategy_names": "[init_strat, opt_strat]",
},
"init_strat": {"n_trials": [10, 250, 500, 750], "generator": "SobolGenerator"},
"opt_strat": {
"n_trials": 2,
"model": "GPClassificationModel",
"generator": "OptimizeAcqfGenerator",
"refit_every": 1,
},
"GPClassificationModel": {
"inducing_size": 100,
"mean_covar_factory": "default_mean_covar_factory",
"inducing_point_method": "auto",
},
"default_mean_covar_factory": {
"fixed_mean": False,
"lengthscale_prior": "gamma",
"outputscale_prior": "gamma",
"kernel": "RBFKernel",
},
"OptimizeAcqfGenerator": {
"acqf": [
"LocalMI",
"MCLevelSetEstimation", # Straddle
"EAVC",
],
"restarts": 2,
"samps": 100,
},
# Add the probit transform for non-probit-specific acqfs
"MCLevelSetEstimation": {"objective": "ProbitObjective"},
"BernoulliMCMutualInformation": {"objective": "ProbitObjective"},
"MCPosteriorVariance": {"objective": "ProbitObjective"},
}
problem = problems[0]
for problem in problems:
out_fname = Path(f"{out_fname_base}/{problem.name}_{args.nproc}threads_out.csv")
print(f"starting {problem.name} benchmark...")
local_config = deepcopy(bench_config)
local_config["common"]["lb"] = str(problem.lb.tolist())
local_config["common"]["ub"] = str(problem.ub.tolist())
local_config["common"]["target"] = problem.threshold
logger = BenchmarkLogger(log_every=1)
bench = Benchmark(
problem=problem,
logger=logger,
configs=local_config,
global_seed=global_seed,
n_reps=n_reps,
)
bench.run_benchmarks()
print(f"Problem {problem} fully done!")
final_results = bench.logger.pandas()
final_results["problem"] = problem.name
final_results["nproc"] = args.nproc
final_results.to_csv(out_fname)
|
bernoulli_lse-main
|
gentime_bench.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved.
import numpy as np
import torch
from aepsych.benchmark.test_functions import (
modified_hartmann6,
discrim_highdim,
novel_discrimination_testfun,
)
from aepsych.models import GPClassificationModel
from aepsych.benchmark.problem import LSEProblem
class LSEProblemWithEdgeLogging(LSEProblem):
eps = 0.05
def evaluate(self, strat):
metrics = super().evaluate(strat)
# add number of edge samples to the log
# get the trials selected by the final strat only
n_opt_trials = strat.strat_list[-1].n_trials
lb, ub = strat.lb, strat.ub
r = ub - lb
lb2 = lb + self.eps * r
ub2 = ub - self.eps * r
near_edge = (
np.logical_or(
(strat.x[-n_opt_trials:, :] <= lb2), (strat.x[-n_opt_trials:, :] >= ub2)
)
.any(axis=-1)
.double()
)
metrics["prop_edge_sampling_mean"] = near_edge.mean().item()
metrics["prop_edge_sampling_err"] = (2 * near_edge.std() / np.sqrt(len(near_edge))).item()
return metrics
class DiscrimLowDim(LSEProblemWithEdgeLogging):
name = "discrim_lowdim"
bounds = torch.tensor([[-1, 1], [-1, 1]], dtype=torch.double).T
threshold = 0.75
def f(self, x: torch.Tensor) -> torch.Tensor:
return torch.tensor(novel_discrimination_testfun(x), dtype=torch.double)
class DiscrimHighDim(LSEProblemWithEdgeLogging):
name = "discrim_highdim"
threshold = 0.75
bounds = torch.tensor(
[
[-1, 1],
[-1, 1],
[0.5, 1.5],
[0.05, 0.15],
[0.05, 0.2],
[0, 0.9],
[0, 3.14 / 2],
[0.5, 2],
],
dtype=torch.double,
).T
def f(self, x: torch.Tensor) -> torch.Tensor:
return torch.tensor(discrim_highdim(x), dtype=torch.double)
class Hartmann6Binary(LSEProblemWithEdgeLogging):
name = "hartmann6_binary"
threshold = 0.5
bounds = torch.stack(
(
torch.zeros(6, dtype=torch.double),
torch.ones(6, dtype=torch.double),
)
)
def f(self, X: torch.Tensor) -> torch.Tensor:
y = torch.tensor([modified_hartmann6(x) for x in X], dtype=torch.double)
f = 3 * y - 2.0
return f
class ContrastSensitivity6d(LSEProblemWithEdgeLogging):
"""
Uses a surrogate model fit to real data from a constrast sensitivity study.
"""
name = "contrast_sensitivity_6d"
threshold = 0.75
bounds = torch.tensor(
[[-1.5, 0], [-1.5, 0], [0, 20], [0.5, 7], [1, 10], [0, 10]],
dtype=torch.double,
).T
def __init__(self):
# Load the data
self.data = np.loadtxt("data/csf_dataset.csv", delimiter=",", skiprows=1)
y = torch.LongTensor(self.data[:, 0])
x = torch.Tensor(self.data[:, 1:])
# Fit a model, with a large number of inducing points
self.m = GPClassificationModel(
lb=self.bounds[0],
ub=self.bounds[1],
inducing_size=100,
inducing_point_method="kmeans++",
)
self.m.fit(
x,
y,
)
def f(self, X: torch.Tensor) -> torch.Tensor:
# clamp f to 0 since we expect p(x) to be lower-bounded at 0.5
return torch.clamp(self.m.predict(torch.tensor(X))[0], min=0)
|
bernoulli_lse-main
|
problems.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved.
import os
import argparse
# run each job single-threaded, paralellize using pathos
os.environ["OMP_NUM_THREADS"] = "1"
os.environ["MKL_NUM_THREADS"] = "1"
os.environ["NUMEXPR_NUM_THREADS"] = "1"
# multi-socket friendly args
os.environ["KMP_AFFINITY"] = "granularity=fine,compact,1,0"
os.environ["KMP_BLOCKTIME"] = "1"
import torch
# force torch to 1 thread too just in case
torch.set_num_interop_threads(1)
torch.set_num_threads(1)
import time
from copy import deepcopy
from pathlib import Path
from aepsych.benchmark import BenchmarkLogger, PathosBenchmark
from problems import (
DiscrimLowDim,
DiscrimHighDim,
Hartmann6Binary,
ContrastSensitivity6d, # This takes a few minutes to instantiate due to fitting the model
)
problem_map = {
"discrim_lowdim": DiscrimLowDim,
"discrim_highdim": DiscrimHighDim,
"hartmann6_binary": Hartmann6Binary,
"contrast_sensitivity_6d": ContrastSensitivity6d,
}
def make_argparser():
parser = argparse.ArgumentParser(description="Lookahead LSE Benchmarks")
parser.add_argument("--nproc", type=int, default=124)
parser.add_argument("--reps_per_chunk", type=int, default=20)
parser.add_argument("--chunks", type=int, default=15)
parser.add_argument("--large_opt_size", type=int, default=740)
parser.add_argument("--small_opt_size", type=int, default=490)
parser.add_argument("--init_size", type=int, default=10)
parser.add_argument("--global_seed", type=int, default=1000)
parser.add_argument("--log_frequency", type=int, default=10)
parser.add_argument("--output_path", type=Path, default=Path("../data/cameraready"))
parser.add_argument(
"--problem",
type=str,
choices=[
"discrim_highdim",
"discrim_lowdim",
"hartmann6_binary",
"contrast_sensitivity_6d",
"all",
],
default="all",
)
return parser
if __name__ == "__main__":
parser = make_argparser()
args = parser.parse_args()
out_fname_base = args.output_path
out_fname_base.mkdir(
parents=True, exist_ok=True
) # make an output folder if not exist
if args.problem == "all":
problems = [
DiscrimLowDim(),
DiscrimHighDim(),
Hartmann6Binary(),
ContrastSensitivity6d(),
]
else:
problems = [problem_map[args.problem]()]
bench_config = {
"common": {
"outcome_type": "single_probit",
"strategy_names": "[init_strat, opt_strat]",
},
"init_strat": {"n_trials": args.init_size, "generator": "SobolGenerator"},
"opt_strat": {
"model": "GPClassificationModel",
"generator": "OptimizeAcqfGenerator",
"refit_every": args.log_frequency,
},
"GPClassificationModel": {
"inducing_size": 100,
"mean_covar_factory": "default_mean_covar_factory",
"inducing_point_method": "auto",
},
"default_mean_covar_factory": {
"fixed_mean": False,
"lengthscale_prior": "gamma",
"outputscale_prior": "gamma",
"kernel": "RBFKernel",
},
"OptimizeAcqfGenerator": {
"acqf": [
"LocalMI",
"MCLevelSetEstimation", # Straddle
"LocalSUR",
"GlobalMI",
"GlobalSUR",
"EAVC",
"ApproxGlobalSUR",
"MCPosteriorVariance", # BALV
"BernoulliMCMutualInformation", # BALD
],
"restarts": 2,
"samps": 100,
},
# Add the probit transform for non-probit-specific acqfs
"MCLevelSetEstimation": {"objective": "ProbitObjective"},
"BernoulliMCMutualInformation": {"objective": "ProbitObjective"},
"MCPosteriorVariance": {"objective": "ProbitObjective"},
}
for chunk in range(args.chunks):
for problem in problems:
out_fname = Path(f"{out_fname_base}/{problem.name}_chunk{chunk}_out.csv")
intermediate_fname = Path(
f"{out_fname_base}/{problem.name}_chunk{chunk}_checkpoint.csv"
)
print(f"starting {problem.name} benchmark... chunk {chunk} ")
local_config = deepcopy(bench_config)
local_config["common"]["lb"] = str(problem.lb.tolist())
local_config["common"]["ub"] = str(problem.ub.tolist())
local_config["common"]["target"] = problem.threshold
local_config["opt_strat"]["n_trials"] = (
args.small_opt_size
if problem.name == "discrim_lowdim"
else args.large_opt_size
)
logger = BenchmarkLogger(log_every=args.log_frequency)
acq_bench = PathosBenchmark(
nproc=args.nproc,
problem=problem,
logger=logger,
configs=local_config,
global_seed=args.global_seed,
n_reps=args.reps_per_chunk,
)
sobol_config = deepcopy(local_config)
sobol_config["opt_strat"]["generator"] = "SobolGenerator"
del sobol_config["OptimizeAcqfGenerator"]
sobol_bench = PathosBenchmark(
nproc=args.nproc,
problem=problem,
logger=logger,
configs=sobol_config,
global_seed=args.global_seed,
n_reps=args.reps_per_chunk,
)
bench = acq_bench + sobol_bench
bench.start_benchmarks()
# checkpoint every minute in case something breaks
while not bench.is_done:
time.sleep(60)
collate_start = time.time()
print(
f"Checkpointing bench {problem} chunk {chunk}..., {len(bench.futures)}/{bench.num_benchmarks} alive"
)
bench.collate_benchmarks(wait=False)
temp_results = bench.logger.pandas()
if len(temp_results) > 0:
temp_results["rep"] = (
temp_results["rep"] + args.reps_per_chunk * chunk
)
temp_results["problem"] = problem.name
temp_results.to_csv(intermediate_fname)
print(
f"Collate done in {time.time()-collate_start} seconds, {len(bench.futures)}/{bench.num_benchmarks} left"
)
print(f"Problem {problem} chunk {chunk} fully done!")
final_results = bench.logger.pandas()
final_results["rep"] = final_results["rep"] + args.reps_per_chunk * chunk
final_results["problem"] = problem.name
final_results.to_csv(out_fname)
|
bernoulli_lse-main
|
run_experiments.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved.
import os
import argparse
# run each job single-threaded, paralellize using pathos
os.environ["OMP_NUM_THREADS"] = "1"
os.environ["MKL_NUM_THREADS"] = "1"
os.environ["NUMEXPR_NUM_THREADS"] = "1"
# multi-socket friendly args
os.environ["KMP_AFFINITY"] = "granularity=fine,compact,1,0"
os.environ["KMP_BLOCKTIME"] = "1"
import torch
# force torch to 1 thread too just in case
torch.set_num_interop_threads(1)
torch.set_num_threads(1)
import time
from copy import deepcopy
from pathlib import Path
from aepsych.benchmark import BenchmarkLogger, PathosBenchmark, combine_benchmarks
from problems import (
DiscrimHighDim,
Hartmann6Binary,
ContrastSensitivity6d, # This takes a few minutes to instantiate due to fitting the model
)
chunks = 5
reps_per_chunk = 20
log_frequency = 10
large_opt_size = 750
nproc = 124
global_seed = 1000
if __name__ == "__main__":
out_fname_base = Path("../data/thresh_sensitivity")
out_fname_base.mkdir(
parents=True, exist_ok=True
) # make an output folder if not exist
problems = [
DiscrimHighDim(),
Hartmann6Binary(),
ContrastSensitivity6d(),
]
bench_config = {
"common": {
"outcome_type": "single_probit",
"strategy_names": "[init_strat, opt_strat]",
"target": [0.5, 0.65, 0.95]
},
"init_strat": {"n_trials": 10, "generator": "SobolGenerator"},
"opt_strat": {
"n_trials": 740,
"model": "GPClassificationModel",
"generator": "OptimizeAcqfGenerator",
"refit_every": 10,
},
"GPClassificationModel": {
"inducing_size": 100,
"mean_covar_factory": "default_mean_covar_factory",
"inducing_point_method": "auto",
},
"default_mean_covar_factory": {
"fixed_mean": False,
"lengthscale_prior": "gamma",
"outputscale_prior": "gamma",
"kernel": "RBFKernel",
},
"OptimizeAcqfGenerator": {
"acqf": [
"LocalMI",
"GlobalMI",
"EAVC",
],
"restarts": 2,
"samps": 100,
},
# Add the probit transform for non-probit-specific acqfs
"MCLevelSetEstimation": {"objective": "ProbitObjective"},
"BernoulliMCMutualInformation": {"objective": "ProbitObjective"},
"MCPosteriorVariance": {"objective": "ProbitObjective"},
}
for chunk in range(chunks):
for problem in problems:
out_fname = Path(f"{out_fname_base}/{problem.name}_chunk{chunk}_out.csv")
intermediate_fname = Path(
f"{out_fname_base}/{problem.name}_chunk{chunk}_checkpoint.csv"
)
print(f"starting {problem.name} benchmark... chunk {chunk} ")
local_config = deepcopy(bench_config)
local_config["common"]["lb"] = str(problem.lb.tolist())
local_config["common"]["ub"] = str(problem.ub.tolist())
logger = BenchmarkLogger(log_every=log_frequency)
bench = PathosBenchmark(
nproc=nproc,
problem=problem,
logger=logger,
configs=local_config,
global_seed=global_seed,
n_reps=reps_per_chunk,
)
bench.start_benchmarks()
# checkpoint every minute in case something breaks
while not bench.is_done:
time.sleep(60)
collate_start = time.time()
print(
f"Checkpointing bench {problem} chunk {chunk}..., {len(bench.futures)}/{bench.num_benchmarks} alive"
)
bench.collate_benchmarks(wait=False)
temp_results = bench.logger.pandas()
if len(temp_results) > 0:
temp_results["rep"] = temp_results["rep"] + reps_per_chunk * chunk
temp_results["problem"] = problem.name
temp_results.to_csv(intermediate_fname)
print(
f"Collate done in {time.time()-collate_start} seconds, {len(bench.futures)}/{bench.num_benchmarks} left"
)
print(f"Problem {problem} chunk {chunk} fully done!")
final_results = bench.logger.pandas()
final_results["rep"] = final_results["rep"] + reps_per_chunk * chunk
final_results["problem"] = problem.name
final_results.to_csv(out_fname)
|
bernoulli_lse-main
|
thresh_sensitivity_study.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved.
from pathlib import Path
import pandas as pd
import numpy as np
from plot_config import *
run_data = list(Path("../data/gentime_bench/").glob("*out.csv"))
import re
def make_figure():
alld = []
for f in run_data:
dlocal = pd.read_csv(f)
dlocal["nthreads"] = re.findall(".*(\d+)threads_out.csv", str(f))[0]
alld.append(dlocal)
df = pd.concat(alld)
df["method"] = df.OptimizeAcqfGenerator_acqf.fillna("Quasi-random").astype(
"category"
)
df["method"] = df.method.cat.rename_categories(
{
"MCLevelSetEstimation": "Straddle",
"MCPosteriorVariance": "BALV",
"BernoulliMCMutualInformation": "BALD",
}
)
df = df[df.trial_id.isin(([251]))]
methods = [
"EAVC",
"LocalMI",
"Straddle",
]
fig = plt.figure(figsize=(6.75, 2.3))
# Plot each problem
prob = "hartmann6_binary"
ax = fig.add_subplot(131)
for method in methods:
df_m = df[(df['problem'] == prob) & (df['method'] == method)]
res = df_m.groupby('nthreads').agg({'gen_time': ['mean', 'std', 'count']})
res = res.droplevel(axis=1, level=0).reset_index()
ymean = res['mean']
yerr = 2 * res['std'] / np.sqrt(res['count'])
color, ls = method_styles[method]
ax.errorbar(res['nthreads'], ymean, yerr=yerr, lw=1, alpha=0.3, color=color, ls=ls)
ax.plot(res['nthreads'], ymean, lw=1, label=method, color=color, ls=ls)
ax.set_ylim([0., 3.2])
ax.set_yticks([0, 1, 2, 3])
ax.grid(alpha=0.1)
ax.set_title("Binarized Hartmann6 (6-d)")
ax.set_xlabel("Number of threads")
ax.set_ylabel("Acquisition wall time (s)")
ax.legend(loc="lower left", bbox_to_anchor=(0.9, -0.63), ncol=4)
prob = "discrim_lowdim"
ax = fig.add_subplot(132)
for method in methods:
df_m = df[(df['problem'] == prob) & (df['method'] == method)]
res = df_m.groupby('nthreads').agg({'gen_time': ['mean', 'std', 'count']})
res = res.droplevel(axis=1, level=0).reset_index()
ymean = res['mean']
yerr = 2 * res['std'] / np.sqrt(res['count'])
color, ls = method_styles[method]
ax.errorbar(res['nthreads'], ymean, yerr=yerr, lw=1, alpha=0.3, color=color, ls=ls)
ax.plot(res['nthreads'], ymean, lw=1, label=method, color=color, ls=ls)
ax.set_ylim([0., 3.2])
ax.set_yticks([0, 1, 2, 3])
ax.set_yticklabels([])
ax.grid(alpha=0.1)
ax.set_title("Psych. Discrimination (2-d)")
ax.set_xlabel("Number of threads")
prob = "discrim_highdim"
ax = fig.add_subplot(133)
for method in methods:
df_m = df[(df['problem'] == prob) & (df['method'] == method)]
res = df_m.groupby('nthreads').agg({'gen_time': ['mean', 'std', 'count']})
res = res.droplevel(axis=1, level=0).reset_index()
ymean = res['mean']
yerr = 2 * res['std'] / np.sqrt(res['count'])
color, ls = method_styles[method]
ax.errorbar(res['nthreads'], ymean, yerr=yerr, lw=1, alpha=0.3, color=color, ls=ls)
ax.plot(res['nthreads'], ymean, lw=1, label=method, color=color, ls=ls)
ax.set_ylim([0., 3.2])
ax.set_yticks([0, 1, 2, 3])
ax.set_yticklabels([])
ax.grid(alpha=0.1)
ax.set_title("Psych. Discrimination (8-d)")
ax.set_xlabel("Number of threads")
fig.subplots_adjust(bottom=0.34, left=0.05, top=0.91, right=0.99, wspace=0.1)
plt.savefig("pdfs/gentime_plots.pdf", pad_inches=0)
if __name__ == "__main__":
make_figure()
|
bernoulli_lse-main
|
figures/plot_gentimes.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved.
from matplotlib import pyplot as plt
from matplotlib.lines import Line2D
from matplotlib import rc
import matplotlib
rc('font', family='serif', style='normal', variant='normal', weight='normal', stretch='normal', size=8)
matplotlib.rcParams['ps.useafm'] = True
matplotlib.rcParams['pdf.use14corefonts'] = True
matplotlib.rcParams['text.usetex'] = True
matplotlib.rcParams['xtick.labelsize'] = 7
matplotlib.rcParams['ytick.labelsize'] = 7
matplotlib.rcParams['axes.titlesize'] = 9
cmap = plt.get_cmap("tab10")
method_styles = {
'Straddle': (cmap(0), ':'),
'EAVC': (cmap(1), '-'),
'LocalMI': (cmap(2), '--'),
'GlobalMI': (cmap(3), '-'),
'LocalSUR': (cmap(4), '--'),
'GlobalSUR': (cmap(5), '-'),
'ApproxGlobalSUR': (cmap(6), '-'),
'Quasi-random': (cmap(7), ':'),
'BALD': (cmap(8), ':'),
'BALV': (cmap(9), ':'),
}
model_to_method_name = {
'MCLevelSetEstimation': 'Straddle',
'EAVC': 'EAVC',
'LocalMI': 'LocalMI',
'GlobalMI': 'GlobalMI',
'LocalSUR': 'LocalSUR',
'GlobalSUR': 'GlobalSUR',
'ApproxGlobalSUR': 'ApproxGlobalSUR',
'random': 'Quasi-random',
}
mean_covar_config = {
"fixed_mean": False,
"lengthscale_prior": "gamma",
"outputscale_prior": "gamma",
"kernel": "RBFKernel",
}
|
bernoulli_lse-main
|
figures/plot_config.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved.
import numpy as np
from contrast_discrimination.helpers import HalfGrating
from psychopy import visual, monitors
screen = monitors.Monitor("testMonitor", gamma=1)
win = visual.Window(
allowGUI=True,
units="deg",
monitor=screen,
bpc=(8, 8, 8),
size=[300, 300],
fullscr=False,
)
base_args = {
"pedestal": 0,
"contrast": 1,
"orientation": 0,
"temporal_frequency": 0, # no animation so we screenshot
"spatial_frequency": 10,
"size": 10,
"eccentricity": 0, # just plot in the middle so we can screenshot
"angle_dist": 0, # not used in synth test function
}
p100_args = {
"pedestal": [-0.5],
"orientation": [
60
], # not used in the synth test function, but we used it in the plot in the paper
"spatial_frequency": [3],
"size": [7],
}
p75_args = {
"pedestal": [-1.2],
"spatial_frequency": [2],
"size": [2.5],
}
p50_args = {
"pedestal": [-1.5],
"contrast": [-1.5],
}
def make_stim_image(args):
stim = HalfGrating(**base_args, win=win)
stim.update(args)
image = stim.get_texture(phase=0, noisy_half="left")
bg_color = np.array([stim.pedestal_psychopy_scale] * 3)
win.setColor(bg_color)
win.color = bg_color
win.flip()
stim._stim.image = image
stim._stim.draw()
win.flip()
frame = win.getMovieFrame()
return frame
if __name__ == "__main__":
f50 = make_stim_image(p50_args)
f50.save("pdfs/p50.png")
f75 = make_stim_image(p75_args)
f75.save("pdfs/p75.png")
f100 = make_stim_image(p100_args)
f100.save("pdfs/p100.png")
|
bernoulli_lse-main
|
figures/make_stim_plots.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved.
import numpy as np
import pandas as pd
from copy import deepcopy
import sys
sys.path.append("..")
from plot_config import *
from plot_experiment_results import compile_results, run_data
def make_classerr_figure():
res, itrs = compile_results(run_data)
# Make the plot
fig = plt.figure(figsize=(6.75, 2.3))
metric = "class_errors"
# Plot each problem
prob = "hartmann6_binary"
methods = list(method_styles.keys())[:8]
ax = fig.add_subplot(131)
for method in methods:
y = res[metric][prob][method]
ymean = np.mean(y, axis=0)
yerr = 2 * np.std(y, axis=0) / np.sqrt(y.shape[0])
color, ls = method_styles[method]
ax.errorbar(itrs[prob], ymean, yerr=yerr, lw=1, alpha=0.3, color=color, ls=ls)
ax.plot(itrs[prob], ymean, lw=1, label=method, color=color, ls=ls)
ax.set_xlim([0, 760])
ax.set_xticks([0, 250, 500, 750])
ax.set_ylim([0.15, 0.45])
# ax.set_yticks([0.2, 0.3, 0.4, 0.5])
ax.grid(alpha=0.1)
ax.set_title("Binarized Hartmann6 (6-d)")
ax.set_xlabel("Iteration")
ax.set_ylabel("Classification Error")
ax.legend(loc="lower left", bbox_to_anchor=(0.45, -0.63), ncol=4)
prob = "discrim_lowdim"
ax = fig.add_subplot(132)
for method in methods:
y = res[metric][prob][method]
ymean = np.mean(y, axis=0)
yerr = 2 * np.std(y, axis=0) / np.sqrt(y.shape[0])
color, ls = method_styles[method]
ax.errorbar(itrs[prob], ymean, yerr=yerr, lw=1, alpha=0.3, color=color, ls=ls)
ax.plot(itrs[prob], ymean, lw=1, label=method, color=color, ls=ls)
ax.set_xlim([0, 510])
ax.set_ylim([0.015, 0.11])
# ax.set_yticks([0.02, 0.04, 0.06, 0.08, 0.1])
ax.set_xticks([0, 100, 200, 300, 400, 500])
ax.grid(alpha=0.1)
ax.set_title("Psych. Discrimination (2-d)")
ax.set_xlabel("Iteration")
prob = "discrim_highdim"
ax = fig.add_subplot(133)
for method in methods:
y = res[metric][prob][method]
ymean = np.mean(y, axis=0)
yerr = 2 * np.std(y, axis=0) / np.sqrt(y.shape[0])
color, ls = method_styles[method]
ax.errorbar(itrs[prob], ymean, yerr=yerr, lw=1, alpha=0.3, color=color, ls=ls)
ax.plot(itrs[prob], ymean, lw=1, label=method, color=color, ls=ls)
ax.set_xlim([0, 760])
ax.set_xticks([0, 250, 500, 750])
ax.set_ylim([0.1, 0.42])
# ax.set_yticks([0.1, 0.2, 0.3, 0.4, 0.5])
ax.grid(alpha=0.1)
ax.set_title("Psych. Discrimination (8-d)")
ax.set_xlabel("Iteration")
fig.subplots_adjust(bottom=0.34, left=0.07, top=0.91, right=0.99, wspace=0.2)
plt.savefig("pdfs/benchmark_classerr.pdf", pad_inches=0)
def make_bald_figure():
res, itrs = compile_results(run_data)
# Make the plot
fig = plt.figure(figsize=(6.75, 2.3))
# Plot each problem
metric = "brier"
prob = "hartmann6_binary"
ax = fig.add_subplot(131)
methods = list(method_styles.keys())
for method in methods:
y = res[metric][prob][method]
ymean = np.mean(y, axis=0)
yerr = 2 * np.std(y, axis=0) / np.sqrt(y.shape[0])
color, ls = method_styles[method]
ax.errorbar(itrs[prob], ymean, yerr=yerr, lw=1, alpha=0.3, color=color, ls=ls)
ax.plot(itrs[prob], ymean, lw=1, label=method, color=color, ls=ls)
ax.set_xlim([0, 760])
ax.set_xticks([0, 250, 500, 750])
ax.set_ylim([0.15, 0.5])
ax.set_yticks([0.2, 0.3, 0.4, 0.5])
ax.grid(alpha=0.1)
ax.set_title("Binarized Hartmann6 (6-d)")
ax.set_xlabel("Iteration")
ax.set_ylabel("Brier Score")
ax.legend(loc="lower left", bbox_to_anchor=(0.22, -0.63), ncol=5)
prob = "discrim_lowdim"
ax = fig.add_subplot(132)
for method in methods:
y = res[metric][prob][method]
ymean = np.mean(y, axis=0)
yerr = 2 * np.std(y, axis=0) / np.sqrt(y.shape[0])
color, ls = method_styles[method]
ax.errorbar(itrs[prob], ymean, yerr=yerr, lw=1, alpha=0.3, color=color, ls=ls)
ax.plot(itrs[prob], ymean, lw=1, label=method, color=color, ls=ls)
ax.set_xlim([0, 510])
ax.set_ylim([0.01, 0.10])
ax.set_yticks([0.02, 0.04, 0.06, 0.08, 0.1])
ax.set_xticks([0, 100, 200, 300, 400, 500])
ax.grid(alpha=0.1)
ax.set_title("Psych. Discrimination (2-d)")
ax.set_xlabel("Iteration")
prob = "discrim_highdim"
ax = fig.add_subplot(133)
for method in methods:
y = res[metric][prob][method]
ymean = np.mean(y, axis=0)
yerr = 2 * np.std(y, axis=0) / np.sqrt(y.shape[0])
color, ls = method_styles[method]
ax.errorbar(itrs[prob], ymean, yerr=yerr, lw=1, alpha=0.3, color=color, ls=ls)
ax.plot(itrs[prob], ymean, lw=1, label=method, color=color, ls=ls)
ax.set_xlim([0, 760])
ax.set_xticks([0, 250, 500, 750])
ax.set_ylim([0.1, 1.0])
ax.set_yticks([0.2, 0.4, 0.6, 0.8, 1.0])
ax.grid(alpha=0.1)
ax.set_title("Psych. Discrimination (8-d)")
ax.set_xlabel("Iteration")
fig.subplots_adjust(bottom=0.34, left=0.065, top=0.91, right=0.99, wspace=0.2)
plt.savefig("pdfs/benchmarks_bald.pdf", pad_inches=0)
if __name__ == "__main__":
make_classerr_figure()
make_bald_figure()
|
bernoulli_lse-main
|
figures/plot_supplement_experiment_results.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved.
|
bernoulli_lse-main
|
figures/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved.
from copy import deepcopy
import numpy as np
import torch
from botorch.utils.sampling import draw_sobol_samples
import sys
sys.path.append('..')
from plot_config import *
from problems import DiscrimLowDim
from aepsych.models.gp_classification import GPClassificationModel
from aepsych.factory.factory import default_mean_covar_factory
from aepsych.config import Config
from aepsych.acquisition import (
MCLevelSetEstimation,
GlobalSUR,
GlobalMI,
EAVC,
LocalMI,
LocalSUR,
)
def make_figure():
# Generate training data for the model
prob = DiscrimLowDim()
X = draw_sobol_samples(
bounds=torch.tensor(prob.bounds, dtype=torch.double), n=20, q=1, seed=1403
).squeeze(1)
np.random.seed(1403)
y = torch.LongTensor([1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1])
###print(X)
###tensor([[ 0.2829, 0.2585],
###[-0.8400, -0.4620],
###[-0.3771, 0.8218],
###[ 0.9996, -0.8986],
###[ 0.5347, 0.6318],
###[-0.0918, -0.5853],
###[-0.6252, 0.1951],
###[ 0.2478, -0.0219],
###[ 0.0526, 0.9270],
###[-0.5706, -0.8485],
###[-0.1469, 0.4888],
###[ 0.7304, -0.2870],
###[ 0.8047, 0.0576],
###[-0.3227, -0.2292],
###[-0.8948, 0.6194],
###[ 0.4783, -0.6676],
###[ 0.3968, 0.5543],
###[-0.9803, -0.7246],
###[-0.3026, 0.1158],
###[ 0.8207, -0.1633]], dtype=torch.float64)
# Fit a model
lb, ub = prob.bounds
config = deepcopy(mean_covar_config)
config["lb"] = str(lb.tolist())
config["ub"] = str(ub.tolist())
mean, covar = default_mean_covar_factory(Config({"default_mean_covar_factory": config}))
# Fit a model
m = GPClassificationModel(lb=lb, ub=ub, mean_module=mean, covar_module=covar)
m.fit(train_x=X, train_y=y)
# Create a grid for plotting
ngrid = 25
xp = np.linspace(-1, 1, ngrid)
yp = np.linspace(-1, 1, ngrid)
xv, yv = np.meshgrid(xp, yp)
x_plt = torch.tensor(np.vstack((xv.flatten(), yv.flatten())).T)
# Make the plot
fig = plt.figure(figsize=(6.75, 1.5))
Xrnd = draw_sobol_samples(bounds=prob.bounds, n=512, q=1, seed=1000).squeeze(1)
# Evaluate each acquisition fn on x_plt and Xrnd
for i, acq in enumerate([
MCLevelSetEstimation,
LocalSUR,
LocalMI,
GlobalSUR,
GlobalMI,
EAVC,
]):
if i == 0:
acqf = acq(model=m, target=0.75, beta=3.84)
elif i in [3, 4, 5]:
acqf = acq(model=m, target=0.75, Xq=Xrnd)
else:
acqf = acq(model=m, target=0.75)
ax = fig.add_subplot(1, 6, i + 1)
vals_plt = acqf(x_plt.unsqueeze(1)).detach().numpy()
vals_opt = acqf(Xrnd.unsqueeze(1)).detach().numpy()
r = vals_plt.max() - vals_plt.min()
levels = np.linspace(vals_plt.min() - 0.01 * r, vals_plt.max() + 0.01 * r, 30)
ax.contourf(yv, xv, vals_plt.reshape(ngrid, ngrid), alpha=0.2, levels=levels)
indx_max = np.argmax(vals_opt)
ax.plot(Xrnd[indx_max, 1], Xrnd[indx_max, 0], 'r*', mew=0.5, ms=5, fillstyle='full')
ax.set_xlim([-1.08, 1.08])
ax.set_ylim([-1.08, 1.08])
ax.set_title(model_to_method_name[acq.__name__])
ax.set_xticks([-1, 0, 1])
ax.set_yticks([-1, 0, 1])
ax.set_xlabel('$x_1$')
if i > 0:
ax.set_yticklabels([])
else:
ax.set_ylabel('$x_2$')
fig.subplots_adjust(wspace=0.08, left=0.058, right=0.995, top=0.87, bottom=0.23)
plt.savefig('pdfs/acquisitions.pdf', pad_inches=0)
if __name__ == '__main__':
make_figure()
|
bernoulli_lse-main
|
figures/plot_acquisition.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved.
from copy import deepcopy
import numpy as np
import torch
from botorch.utils.sampling import draw_sobol_samples
import sys
sys.path.append('..')
from plot_config import *
from problems import DiscrimLowDim
from aepsych.models.gp_classification import GPClassificationModel
from aepsych.factory.factory import default_mean_covar_factory
from aepsych.config import Config
from aepsych.acquisition.lookahead_utils import lookahead_at_xstar
def make_figure():
# Generate training data for the model
prob = DiscrimLowDim()
X = draw_sobol_samples(
bounds=torch.tensor(prob.bounds, dtype=torch.double), n=20, q=1, seed=1403
).squeeze(1)
np.random.seed(1403)
y = torch.LongTensor([1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1])
###print(X)
###tensor([[ 0.2829, 0.2585],
###[-0.8400, -0.4620],
###[-0.3771, 0.8218],
###[ 0.9996, -0.8986],
###[ 0.5347, 0.6318],
###[-0.0918, -0.5853],
###[-0.6252, 0.1951],
###[ 0.2478, -0.0219],
###[ 0.0526, 0.9270],
###[-0.5706, -0.8485],
###[-0.1469, 0.4888],
###[ 0.7304, -0.2870],
###[ 0.8047, 0.0576],
###[-0.3227, -0.2292],
###[-0.8948, 0.6194],
###[ 0.4783, -0.6676],
###[ 0.3968, 0.5543],
###[-0.9803, -0.7246],
###[-0.3026, 0.1158],
###[ 0.8207, -0.1633]], dtype=torch.float64)
# Fit a model
lb, ub = prob.bounds
config = deepcopy(mean_covar_config)
config["lb"] = str(lb.tolist())
config["ub"] = str(ub.tolist())
mean, covar = default_mean_covar_factory(Config({"default_mean_covar_factory": config}))
m = GPClassificationModel(lb=lb, ub=ub, mean_module=mean, covar_module=covar)
m.fit(train_x=X, train_y=y)
# Create a grid for plotting
ngrid = 25
xp = np.linspace(-1, 1, ngrid)
yp = np.linspace(-1, 1, ngrid)
xv, yv = np.meshgrid(xp, yp)
x_plt = torch.tensor(np.vstack((xv.flatten(), yv.flatten())).T)
indx_star = 165
Xstar = x_plt[indx_star, :].unsqueeze(0)
Xq = x_plt
Px, P1, P0, py1 = lookahead_at_xstar(model=m, Xstar=Xstar, Xq=x_plt, gamma=prob.f_threshold)
fig = plt.figure(figsize=(6.75, 1.75))
# Plot the not-look-ahead level-set posterior
axs = []
ax = fig.add_subplot(131)
axs.append(ax)
ax.contourf(yv, xv, Px.detach().numpy().reshape(ngrid, ngrid), levels=np.linspace(0, 1, 20), alpha=0.2)
ax.set_title('Level-set posterior\n$\pi(\mathbf{x} | \mathcal{D}_n)$')
y_is_1 = y == 1
ax.plot(X[y_is_1, 1], X[y_is_1, 0], 'kx', ls='None', mew=0.5, ms=5, alpha=0.3, label='$y=1$')
ax.plot(X[~y_is_1, 1], X[~y_is_1, 0], 'ko', ls='None', mew=0.5, ms=5, fillstyle='none', alpha=0.3, label='$y=0$')
ax.plot(x_plt[indx_star, 1], x_plt[indx_star, 0], 'r+', mew=1, ms=5, label='$\mathbf{x}_*$')
ax.set_xlabel('$x_1$')
ax.set_ylabel('$x_2$')
ax.set_yticks([-1.0, -0.5, 0.0, 0.5, 1.0])
ax.legend(loc='lower left', bbox_to_anchor=(3.8, 0.5))
# Posterior under a 1 observation
ax = fig.add_subplot(132)
axs.append(ax)
ax.contourf(yv, xv, P1.detach().numpy().reshape(ngrid, ngrid), levels=np.linspace(0, 1, 20), alpha=0.2)
ax.set_title('Look-ahead posterior\n$\pi(\mathbf{x} | \mathcal{D}_{n+1}(\mathbf{x}_*, y_*=1))$')
ax.plot(X[y_is_1, 1], X[y_is_1, 0], 'kx', ls='None', mew=0.5, ms=5, alpha=0.3)
ax.plot(X[~y_is_1, 1], X[~y_is_1, 0], 'ko', ls='None', mew=0.5, ms=5, fillstyle='none', alpha=0.3)
ax.plot(x_plt[indx_star, 1], x_plt[indx_star, 0], 'rx', ls='None', mew=1, ms=5)
ax.set_xlabel('$x_1$')
ax.set_yticks([-1.0, -0.5, 0.0, 0.5, 1.0])
ax.set_yticklabels([])
# Posterior under a 0 observation
ax = fig.add_subplot(133)
axs.append(ax)
cf = ax.contourf(yv, xv, P0.detach().numpy().reshape(ngrid, ngrid), levels=np.linspace(0, 1, 20), alpha=0.2)
ax.set_title('Look-ahead posterior\n$\pi(\mathbf{x} | \mathcal{D}_{n+1}(\mathbf{x}_*, y_*=0))$')
ax.plot(X[y_is_1, 1], X[y_is_1, 0], 'kx', ls='None', mew=0.5, ms=5, alpha=0.3)
ax.plot(X[~y_is_1, 1], X[~y_is_1, 0], 'ko', ls='None', mew=0.5, ms=5, fillstyle='none', alpha=0.3)
ax.plot(x_plt[indx_star, 1], x_plt[indx_star, 0], 'ro', ls='None', mew=1, ms=5, fillstyle='none')
ax.set_xlabel('$x_1$')
ax.set_yticks([-1.0, -0.5, 0.0, 0.5, 1.0])
ax.set_yticklabels([])
fig.subplots_adjust(bottom=0.2, left=0.13, top=0.8, right=0.85)
fig.colorbar(cf, ax=axs, ticks=[0, 0.25, 0.5, 0.75, 1.0], pad=0.01)
plt.savefig('pdfs/posteriors.pdf', pad_inches=0)
if __name__ == '__main__':
make_figure()
|
bernoulli_lse-main
|
figures/plot_posteriors.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved.
import numpy as np
import pandas as pd
from pathlib import Path
import matplotlib.pyplot as plt
import sys
sys.path.append("..")
from plot_config import *
# need cameraready for original thresh
rundata = list(Path("../data/cameraready/").glob("*out.csv"))
sensitivity_rundata = list(Path("../data/thresh_sensitivity").glob("*out.csv"))
def compile_results():
orig_dfiles = pd.concat([pd.read_csv(d) for d in rundata])
thresh_dfiles = pd.concat([pd.read_csv(d) for d in sensitivity_rundata])
# the hartmann thresh=0.5 runs were duplicated so drop them
idx = np.logical_not(np.logical_and(
thresh_dfiles.problem == "hartmann6_binary",
thresh_dfiles.opt_strat_target == 0.5,
))
df = pd.concat([orig_dfiles, thresh_dfiles[idx]])
df["method"] = df.OptimizeAcqfGenerator_acqf.fillna("Quasi-random").astype(
"category"
)
df["method"] = df.method.cat.rename_categories(
{
"MCLevelSetEstimation": "Straddle",
"MCPosteriorVariance": "BALV", # comment me out for largerun
"BernoulliMCMutualInformation": "BALD", # comment me out for largerun
}
)
df = df[df.final == True]
acqfs = [
"EAVC",
"LocalMI",
"GlobalMI",
]
problems = list(df["problem"].unique())
problems.remove("discrim_lowdim")
res = {prob: {} for prob in problems}
for levels, g in df.groupby(["method", "rep", "problem", "opt_strat_target"]):
method, _, prob, target = levels
if method in acqfs and prob in problems:
acqf = f"{method}_{target}"
if acqf not in res[prob]:
res[prob][acqf] = []
res[prob][acqf].append(g["brier"].item())
for prob in problems:
for acqf in res[prob]:
res[prob][acqf] = np.array(res[prob][acqf])
return res
def make_thresh_sensitivity_figure():
res = compile_results()
methods = [
"EAVC",
"LocalMI",
"GlobalMI",
#"Quasi-random" # Not run for this study
]
# Make the plot
fig = plt.figure(figsize=(6.75, 2.3))
prob = "hartmann6_binary"
threshes = [0.5, 0.65, 0.95]
ax = fig.add_subplot(131)
for i, method in enumerate(methods):
ymean = [res[prob][f"{method}_{thresh}"].mean() for thresh in threshes]
yerr = [
2
* res[prob][f"{method}_{thresh}"].std()
/ np.sqrt(len(res[prob][f"{method}_{thresh}"]))
for thresh in threshes
]
color, ls = method_styles[method]
ax.errorbar(
threshes,
ymean,
yerr=yerr,
lw=1,
alpha=0.3,
color=color,
ls=ls,
)
ax.plot(threshes, ymean, color=color, ls=ls, label=method, lw=1)
ax.set_xticks(threshes)
ax.grid(alpha=0.1)
ax.set_title("Binarized Hartmann6 (6-d)")
ax.set_xlabel("Target threshold")
ax.set_ylabel("Final Brier Score\nafter 750 iterations")
ax.legend(loc="lower left", bbox_to_anchor=(0.9, -0.63), ncol=3)
prob = "discrim_highdim"
ax = fig.add_subplot(132)
threshes = [0.65, 0.75, 0.95]
for i, method in enumerate(methods):
ymean = [res[prob][f"{method}_{thresh}"].mean() for thresh in threshes]
yerr = [
2
* res[prob][f"{method}_{thresh}"].std()
/ np.sqrt(len(res[prob][f"{method}_{thresh}"]))
for thresh in threshes
]
color, ls = method_styles[method]
ax.errorbar(
threshes,
ymean,
yerr=yerr,
lw=1,
alpha=0.3,
color=color,
ls=ls,
)
ax.plot(threshes, ymean, color=color, ls=ls, label=method, lw=1)
ax.set_xticks(threshes)
ax.grid(alpha=0.1)
ax.set_title("Psych. Discrimination (8-d)")
ax.set_xlabel("Target threshold")
prob = "contrast_sensitivity_6d"
ax = fig.add_subplot(133)
for i, method in enumerate(methods):
ymean = [res[prob][f"{method}_{thresh}"].mean() for thresh in threshes]
yerr = [
2
* res[prob][f"{method}_{thresh}"].std()
/ np.sqrt(len(res[prob][f"{method}_{thresh}"]))
for thresh in threshes
]
color, ls = method_styles[method]
ax.errorbar(
threshes,
ymean,
yerr=yerr,
lw=1,
alpha=0.3,
color=color,
ls=ls,
)
ax.plot(threshes, ymean, color=color, ls=ls, label=method, lw=1)
ax.set_xticks(threshes)
ax.grid(alpha=0.1)
ax.set_xlabel("Target threshold")
ax.set_title("Contrast Sensitivity (6-d)")
fig.subplots_adjust(bottom=0.34, left=0.09, top=0.91, right=0.99, wspace=0.3)
plt.savefig("pdfs/thresh_sensitivity.pdf", pad_inches=0)
if __name__ == "__main__":
make_thresh_sensitivity_figure()
|
bernoulli_lse-main
|
figures/plot_thresh_sensitivity_results.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved.
import pickle
import matplotlib.pyplot as plt
import numpy as np
import sys
sys.path.append('..')
from plot_config import *
from plot_experiment_results import compile_results, run_data
def make_figure():
res, itrs = compile_results(run_data)
edge_samp = res['prop_edge_sampling_mean']
# Make the plot
fig = plt.figure(figsize=(6.75, 3))
prob = 'hartmann6_binary'
methods = list(method_styles.keys())[:8]
ax = fig.add_subplot(131)
for i, model_name in enumerate(methods):
y = edge_samp[prob][model_name][:,-1].mean()
n = edge_samp[prob][model_name].shape[0]
yerr = 2 * edge_samp[prob][model_name][:,-1].std() / np.sqrt(n)
ax.bar([i], [y], yerr=yerr, color=method_styles[model_name][0])
print(model_name, y)
ax.set_xticks(range(8))
ax.set_xticklabels(methods, rotation=90)
ax.set_ylim([0, 1])
ax.set_yticks([0, 0.25, 0.5, 0.75, 1])
ax.set_yticklabels(['0\%', '25\%', '50\%', '75\%', '100\%'])
ax.grid(alpha=0.1)
ax.set_ylabel('Proportion of samples near edge')
ax.set_title('Binarized Hartmann6')
prob = 'discrim_lowdim'
ax = fig.add_subplot(132)
for i, model_name in enumerate(methods):
y = edge_samp[prob][model_name][:,-1].mean()
n = edge_samp[prob][model_name].shape[0]
yerr = 2 * edge_samp[prob][model_name][:,-1].std() / np.sqrt(n)
ax.bar([i], [y], yerr=yerr, color=method_styles[model_name][0])
ax.set_xticks(range(8))
ax.set_xticklabels(methods, rotation=90)
ax.set_ylim([0, 1])
ax.set_yticks([0, 0.25, 0.5, 0.75, 1])
ax.set_yticklabels([])
ax.grid(alpha=0.1)
ax.set_title('Psych. Discrimination (2-d)')
prob = 'discrim_highdim'
ax = fig.add_subplot(133)
for i, model_name in enumerate(methods):
y = edge_samp[prob][model_name][:,-1].mean()
n = edge_samp[prob][model_name].shape[0]
yerr = 2 * edge_samp[prob][model_name][:,-1].std() / np.sqrt(n)
ax.bar([i], [y], yerr=yerr, color=method_styles[model_name][0])
ax.set_xticks(range(8))
ax.set_xticklabels(methods, rotation=90)
ax.set_ylim([0, 1])
ax.set_yticks([0, 0.25, 0.5, 0.75, 1])
ax.set_yticklabels([])
ax.grid(alpha=0.1)
ax.set_title('Psych. Discrimination (8-d)')
fig.subplots_adjust(bottom=0.34, left=0.08, top=0.93, right=0.99, wspace=0.1)
plt.savefig('pdfs/edge_sampling.pdf', pad_inches=0)
if __name__ == '__main__':
make_figure()
|
bernoulli_lse-main
|
figures/plot_edge_sampling.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved.
import numpy as np
import pandas as pd
import sys
from pathlib import Path
import matplotlib.pyplot as plt
sys.path.append('..')
from plot_config import *
import re
run_data = list(Path("../data/cameraready/").glob("*out.csv"))
def compile_results(run_data):
dfiles = [pd.read_csv(d) for d in run_data]
df = pd.concat(dfiles)
df["method"] = df.OptimizeAcqfGenerator_acqf.fillna("Quasi-random").astype(
"category"
)
df["method"] = df.method.cat.rename_categories(
{
"MCLevelSetEstimation": "Straddle",
"MCPosteriorVariance": "BALV",
"BernoulliMCMutualInformation": "BALD",
}
)
df = df[df.final==False]
acqfs = [
"Straddle",
"EAVC",
"LocalMI",
"GlobalMI",
"LocalSUR",
"GlobalSUR",
"ApproxGlobalSUR",
"Quasi-random",
"BALD",
"BALV",
]
metrics = [
"brier",
"class_errors",
"prop_edge_sampling_mean",
"fit_time",
"gen_time",
]
problems = df["problem"].unique()
res = {
metric: {prob: {acqf: [] for acqf in acqfs} for prob in problems}
for metric in metrics
}
itrs = {}
for _, g in df.groupby(["method", "rep", "problem"]):
prob = g["problem"].values[0]
if prob in itrs:
if not np.array_equal(g["trial_id"].values, itrs[prob]):
raise Exception
else:
itrs[prob] = g["trial_id"].values
acqf = g["method"].values[0]
for metric in metrics:
res[metric][prob][acqf].append(g[metric])
for metric in metrics:
for prob in problems:
for acqf in acqfs:
res[metric][prob][acqf] = np.array(res[metric][prob][acqf])
return res, itrs
def make_benchmark_figure():
res, itrs = compile_results(run_data)
# Make the plot
fig = plt.figure(figsize=(6.75, 2.3))
# Plot each problem
metric = "brier"
prob = "hartmann6_binary"
methods = list(method_styles.keys())[:8]
# prob = 'Binarized\n Hartmann6'
ax = fig.add_subplot(131)
for method in methods:
y = res[metric][prob][method]
ymean = np.mean(y, axis=0)
yerr = 2 * np.std(y, axis=0) / np.sqrt(y.shape[0])
color, ls = method_styles[method]
ax.errorbar(itrs[prob], ymean, yerr=yerr, lw=1, alpha=0.3, color=color, ls=ls)
ax.plot(itrs[prob], ymean, lw=1, label=method, color=color, ls=ls)
ax.set_xlim([0, 760])
ax.set_xticks([0, 250, 500, 750])
ax.set_ylim([0.15, 0.5])
ax.set_yticks([0.2, 0.3, 0.4, 0.5])
ax.grid(alpha=0.1)
ax.set_title("Binarized Hartmann6 (6-d)")
ax.set_xlabel("Iteration")
ax.set_ylabel("Brier Score")
ax.legend(loc="lower left", bbox_to_anchor=(0.45, -0.63), ncol=4)
prob = "discrim_lowdim"
ax = fig.add_subplot(132)
for method in methods:
y = res[metric][prob][method]
ymean = np.mean(y, axis=0)
yerr = 2 * np.std(y, axis=0) / np.sqrt(y.shape[0])
color, ls = method_styles[method]
ax.errorbar(itrs[prob], ymean, yerr=yerr, lw=1, alpha=0.3, color=color, ls=ls)
ax.plot(itrs[prob], ymean, lw=1, label=method, color=color, ls=ls)
ax.set_xlim([0, 510])
ax.set_ylim([0.01, 0.10])
ax.set_yticks([0.02, 0.04, 0.06, 0.08, 0.1])
ax.set_xticks([0, 100, 200, 300, 400, 500])
ax.grid(alpha=0.1)
ax.set_title("Psych. Discrimination (2-d)")
ax.set_xlabel("Iteration")
prob = "discrim_highdim"
ax = fig.add_subplot(133)
for method in methods:
y = res[metric][prob][method]
ymean = np.mean(y, axis=0)
yerr = 2 * np.std(y, axis=0) / np.sqrt(y.shape[0])
color, ls = method_styles[method]
ax.errorbar(itrs[prob], ymean, yerr=yerr, lw=1, alpha=0.3, color=color, ls=ls)
ax.plot(itrs[prob], ymean, lw=1, label=method, color=color, ls=ls)
ax.set_xlim([0, 760])
ax.set_xticks([0, 250, 500, 750])
ax.set_ylim([0.1, 0.5])
ax.set_yticks([0.1, 0.2, 0.3, 0.4, 0.5])
ax.grid(alpha=0.1)
ax.set_title("Psych. Discrimination (8-d)")
ax.set_xlabel("Iteration")
fig.subplots_adjust(bottom=0.34, left=0.065, top=0.91, right=0.99, wspace=0.2)
plt.savefig("pdfs/benchmarks.pdf", pad_inches=0)
def make_realworld_figure():
res, itrs = compile_results(run_data)
# Make the plot
fig = plt.figure(figsize=(3.25, 3))
ax = fig.add_subplot(111)
prob = "contrast_sensitivity_6d"
metric = "brier"
methods = list(method_styles.keys())[:8]
for method in methods:
y = res[metric][prob][method]
ymean = np.mean(y, axis=0)
yerr = 2 * np.std(y, axis=0) / np.sqrt(y.shape[0])
color, ls = method_styles[method]
ax.errorbar(itrs[prob], ymean, yerr=yerr, lw=1, alpha=0.3, color=color, ls=ls)
ax.plot(itrs[prob], ymean, lw=1, label=method, color=color, ls=ls)
ax.set_xlim([0, 760])
ax.set_xticks([0, 250, 500, 750])
ax.set_ylim([0.1, 0.6])
ax.set_yticks([0.1, 0.2, 0.3, 0.4, 0.5, 0.6])
ax.grid(alpha=0.1)
ax.set_title("Contrast Sensitivity (6-d)")
ax.set_xlabel("Iteration")
ax.set_ylabel("Brier Score")
ax.legend(ncol=2, loc="lower left", bbox_to_anchor=[0.05, -0.7])
fig.subplots_adjust(bottom=0.38, left=0.13, top=0.93, right=0.98)
plt.savefig("pdfs/realworld.pdf", pad_inches=0)
if __name__ == "__main__":
make_benchmark_figure()
make_realworld_figure()
|
bernoulli_lse-main
|
figures/plot_experiment_results.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved.
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import sys
sys.path.append("..")
from pathlib import Path
from plot_config import *
# need cameraready for init=10
rundata = list(Path("../data/cameraready/").glob("*out.csv")) + list(Path("../data/init_sensitivity").glob("*out.csv"))
def compile_results():
dfiles = [pd.read_csv(d) for d in rundata]
df = pd.concat(dfiles)
df["method"] = df.OptimizeAcqfGenerator_acqf.fillna("Quasi-random").astype(
"category"
)
df["method"] = df.method.cat.rename_categories(
{
"MCLevelSetEstimation": "Straddle",
"MCPosteriorVariance": "BALV",
"BernoulliMCMutualInformation": "BALD",
}
)
df = df[df.final == True]
acqfs = [
"EAVC",
"LocalMI",
"GlobalMI",
"Quasi-random",
]
problems = list(df["problem"].unique())
problems.remove("discrim_lowdim")
res = {prob: {} for prob in problems}
for levels, g in df.groupby(["method", "rep", "problem", "init_strat_n_trials"]):
method, _, prob, n_init = levels
if method in acqfs and prob in problems:
acqf = f"{method}_{n_init}"
if acqf not in res[prob]:
res[prob][acqf] = []
res[prob][acqf].append(g["brier"].item())
for prob in problems:
for acqf in res[prob]:
res[prob][acqf] = np.array(res[prob][acqf])
return res
def make_init_sensitivity_figure():
res = compile_results()
ninits = [10, 100, 250, 500]
# Make the plot
fig = plt.figure(figsize=(6.75, 2.3))
prob = "hartmann6_binary"
ax = fig.add_subplot(131)
methods = [
"EAVC",
"LocalMI",
"GlobalMI",
]
for i, method in enumerate(methods):
ymean = [res[prob][f"{method}_{ninit}"].mean() for ninit in ninits]
yerr = [
2
* res[prob][f"{method}_{ninit}"].std()
/ np.sqrt(len(res[prob][f"{method}_{ninit}"]))
for ninit in ninits
]
color, ls = method_styles[method]
ax.errorbar(
ninits,
ymean,
yerr=yerr,
lw=1,
alpha=0.3,
color=color,
ls=ls,
)
ax.plot(ninits, ymean, lw=1, label=method, color=color, ls=ls)
# Add Sobol
ymean = [res[prob]["Quasi-random_10"].mean() for ninit in ninits]
yerr = [
2
* res[prob]["Quasi-random_10"].std()
/ np.sqrt(len(res[prob]["Quasi-random_10"]))
for ninit in ninits
]
color, ls = method_styles["Quasi-random"]
ax.errorbar(
ninits,
ymean,
yerr=yerr,
lw=1,
alpha=0.3,
color=color,
ls=ls,
)
ax.plot(ninits, ymean, lw=1, label="Quasi-random", color=color, ls=ls)
ax.set_xticks(ninits)
ax.set_ylim([0.15, 0.3])
ax.set_yticks([0.15, 0.2, 0.25, 0.3])
ax.grid(alpha=0.1)
ax.set_title("Binarized Hartmann6 (6-d)")
ax.set_xlabel("Initial design size")
ax.set_ylabel("Final Brier Score\nafter 750 iterations")
ax.legend(loc="lower left", bbox_to_anchor=(0.45, -0.63), ncol=4)
prob = "discrim_highdim"
ax = fig.add_subplot(132)
for i, method in enumerate(methods):
ymean = [res[prob][f"{method}_{ninit}"].mean() for ninit in ninits]
yerr = [
2
* res[prob][f"{method}_{ninit}"].std()
/ np.sqrt(len(res[prob][f"{method}_{ninit}"]))
for ninit in ninits
]
color, ls = method_styles[method]
ax.errorbar(
ninits,
ymean,
yerr=yerr,
lw=1,
alpha=0.3,
color=color,
ls=ls,
)
ax.plot(ninits, ymean, lw=1, label=method, color=color, ls=ls)
# Add Sobol
ymean = [res[prob]["Quasi-random_10"].mean() for ninit in ninits]
yerr = [
2
* res[prob]["Quasi-random_10"].std()
/ np.sqrt(len(res[prob]["Quasi-random_10"]))
for ninit in ninits
]
color, ls = method_styles["Quasi-random"]
ax.errorbar(
ninits,
ymean,
yerr=yerr,
lw=1,
alpha=0.3,
color=color,
ls=ls,
)
ax.plot(ninits, ymean, lw=1, label="Quasi-random", color=color, ls=ls)
ax.set_xticks(ninits)
ax.grid(alpha=0.1)
ax.set_title("Psych. Discrimination (8-d)")
ax.set_xlabel("Initial design size")
ax.set_ylim([0.1, 0.4])
ax.set_yticks([0.1, 0.2, 0.3, 0.4])
prob = "contrast_sensitivity_6d"
ax = fig.add_subplot(133)
for i, method in enumerate(methods):
ymean = [res[prob][f"{method}_{ninit}"].mean() for ninit in ninits]
yerr = [
2
* res[prob][f"{method}_{ninit}"].std()
/ np.sqrt(len(res[prob][f"{method}_{ninit}"]))
for ninit in ninits
]
color, ls = method_styles[method]
ax.errorbar(
ninits,
ymean,
yerr=yerr,
lw=1,
alpha=0.3,
color=color,
ls=ls,
)
ax.plot(ninits, ymean, lw=1, label=method, color=color, ls=ls)
# Add Sobol
ymean = [res[prob]["Quasi-random_10"].mean() for ninit in ninits]
yerr = [
2
* res[prob]["Quasi-random_10"].std()
/ np.sqrt(len(res[prob]["Quasi-random_10"]))
for ninit in ninits
]
color, ls = method_styles["Quasi-random"]
ax.errorbar(
ninits,
ymean,
yerr=yerr,
lw=1,
alpha=0.3,
color=color,
ls=ls,
)
ax.plot(ninits, ymean, lw=1, label="Quasi-random", color=color, ls=ls)
ax.set_xticks(ninits)
ax.set_ylim([0.12, 0.25])
ax.set_yticks([0.12, 0.15, 0.18, 0.21, 0.24])
ax.grid(alpha=0.1)
ax.set_xlabel("Initial design size")
ax.set_title("Contrast Sensitivity (6-d)")
fig.subplots_adjust(bottom=0.34, left=0.093, top=0.91, right=0.99, wspace=0.3)
plt.savefig("pdfs/init_sensitivity.pdf", pad_inches=0)
if __name__ == "__main__":
make_init_sensitivity_figure()
|
bernoulli_lse-main
|
figures/plot_init_sensitivity_results.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved.
from datetime import datetime
"""
Develop an experiment that measures and combination of the following features:
spatial_frequency
temporal_frequency
mean_luminance
eccentricity
field_angle
orientation
"""
constants = dict(
savefolder="./databases/",
timestamp=datetime.now().strftime("%Y-%m-%d_%H-%M-%S"),
config_path="./config_8d.ini",
seed=1,
)
base_params = {
"spatial_frequency": 2,
"orientation": 0,
"pedestal": 0.5,
"contrast": 0.75,
"temporal_frequency": 0,
"size": 10,
"angle_dist":0,
"eccentricity": 0,
}
psychopy_vars = dict(
setSizePix=[1680, 1050],
setWidth=47.475,
setDistance=57,
pre_duration_s=0.0,
stim_duration_s=5.0,
post_duration_s=1,
response_wait=2,
iti=0,
)
|
bernoulli_lse-main
|
human_data_collection/config.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved.
import numpy as np
import torch
from aepsych.server import AEPsychServer
from psychopy import core, data, event, gui, monitors, visual
from contrast_discrimination import config
from contrast_discrimination.helpers import HalfGrating
class ServerHelper:
def __init__(self, config_path, db_path):
self._server = AEPsychServer(database_path=db_path)
with open(config_path, "r") as f:
configstr = f.read()
self._server.handle_setup_v01(
{"type": "setup", "message": {"config_str": configstr}}
)
def ask(self):
request = {"message": "", "type": "ask"}
return self._server.handle_ask_v01(request)["config"]
def tell(self, config, outcome):
request = {
"type": "tell",
"message": {"config": config, "outcome": outcome},
}
self._server.handle_tell(request)
def run_experiment():
seed = config.constants["seed"]
config_path = config.constants["config_path"]
torch.manual_seed(seed)
np.random.seed(seed)
expInfo = {}
expInfo["dateStr"] = data.getDateStr() # add the current time
# present a dialogue to change params
dlg = gui.DlgFromDict(expInfo, title="multi-D JND Exp", fixed=["dateStr"])
if not dlg.OK:
core.quit() # the user hit cancel so exit
# where to save data
fileName = "../data/csf_dataset"
screen = monitors.Monitor("testMonitor", gamma=1)
screen.setSizePix(config.psychopy_vars["setSizePix"])
screen.setWidth(config.psychopy_vars["setWidth"])
screen.setDistance(config.psychopy_vars["setDistance"])
win = visual.Window(
allowGUI=True,
units="deg",
monitor=screen,
bpc=(8, 8, 8),
size=config.psychopy_vars["setSizePix"],
fullscr=False,
)
screen_text_g = visual.TextStim(win, text=None, alignHoriz="center", color="green")
screen_text_r = visual.TextStim(win, text=None, alignHoriz="center", color="red")
screen_text = visual.TextStim(win, text=None, alignHoriz="center", color="gray")
# display instructions and wait
message2 = visual.TextStim(
win,
pos=[0, +3],
text="Hit the space bar key when ready and "
"to advance to the next trial after you see a red cross.",
)
message1 = visual.TextStim(
win,
pos=[0, -3],
text="You'll see a stimulus. One side will have a grating and the other will be noise."
" "
"Press left or right corresponding to the side with noise. If you don't know, please guess.",
)
message1.draw()
message2.draw()
win.flip() # to show our newly drawn 'stimuli'
# pause until there's a keypress
event.waitKeys()
# start the trial: draw grating
clock = core.Clock()
screen_text_r.setText(
"+"
) # this should update the fixation with the new background color, but it isnt for some reason
screen_text_r.draw(win=win)
win.flip()
server_helper = ServerHelper(config_path=config_path, db_path=f"{fileName}.db")
# create stimulus
stim = HalfGrating(**config.base_params, win=win)
i = 0
while not server_helper._server.strat.finished:
trial_params = server_helper.ask()
stim.update(trial_params)
bg_color = np.array([stim.pedestal_psychopy_scale] * 3)
win.setColor(bg_color)
win.color = bg_color
win.flip()
screen_text_r.setText("+")
screen_text_r.draw(win=win)
win.flip()
fixation_keys = []
while not fixation_keys:
fixation_keys = event.getKeys(keyList=["space"])
if "space" in fixation_keys:
screen_text.setText("+")
screen_text.draw(win=win)
win.flip()
noisy_half = "left" if np.random.randint(2) == 0 else "right"
clock.reset()
keys = stim.draw(
noisy_half=noisy_half,
win=win,
pre_duration_s=config.psychopy_vars["pre_duration_s"],
stim_duration_s=config.psychopy_vars["stim_duration_s"],
)
response = noisy_half in keys
win.flip()
if response:
screen_text_g.setText("Correct")
screen_text_g.draw()
win.flip()
else:
screen_text_r.setText("Incorrect")
screen_text_r.draw()
win.flip()
server_helper.tell(trial_params, response)
event.clearEvents()
i = i + 1
win.close()
pd_df = server_helper._server.get_dataframe_from_replay()
pd_df.to_csv(fileName + ".csv", index=False)
core.quit()
if __name__ == "__main__":
run_experiment()
|
bernoulli_lse-main
|
human_data_collection/experiment.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved.
import numpy as np
from psychopy.visual.image import ImageStim
from psychopy import core, event
import pyglet
pyglet.options["debug_gl"] = False
GL = pyglet.gl
def polar_to_cartesian(r, theta):
z = r * np.exp(1j * np.radians(theta))
return z.real, z.imag
def cartesian_to_polar(x, y):
z = x + 1j * y
return (np.abs(z), np.angle(z, deg=True))
class AnimatedGrating:
param_transforms = {"contrast": lambda x: 10 ** x, "pedestal": lambda x: 10 ** x}
def __init__(
self,
spatial_frequency,
orientation,
pedestal,
contrast,
temporal_frequency,
eccentricity,
size,
angle_dist,
win,
cpd=60, # display cycles per degree
Lmin=0, # min luminance in nits
Lmax=255, # max luminance in nits
res=256, # texture resolution
noisy=False,
*args,
**kw,
):
self.spatial_frequency = spatial_frequency
self.temporal_frequency = temporal_frequency
self.orientation = orientation
self.pedestal = pedestal
self.contrast = contrast
self.settable_params = (
"spatial_frequency",
"temporal_frequency",
"orientation",
"pedestal",
"contrast",
"size",
"eccentricity",
"angle_dist",
)
self.cpd = cpd
self.Lmin = Lmin
self.Lmax = Lmax
self.res = res
self.noisy = noisy
self.initial_phase = np.random.uniform(low=0, high=0.2, size=(1))
img = np.zeros((self.res, self.res))
self.win = win
self._stim = ImageStim(image=img, mask="gauss", win=win, *args, **kw)
# these get set on _stim
self.size = size
self.eccentricity = eccentricity
self.angle_dist = angle_dist
def update(self, trial_config):
for k, v in trial_config.items():
if k in self.settable_params:
if k in self.param_transforms:
setattr(self, k, self.param_transforms[k](v[0]))
else:
setattr(self, k, v[0])
@property
def size(self):
return self._stim.size
@size.setter
def size(self, x):
self._stim.size = x
@property
def eccentricity(self):
return cartesian_to_polar(*self._stim.pos)[0]
@eccentricity.setter
def eccentricity(self, x):
current_coords = cartesian_to_polar(*self._stim.pos)
self._stim.pos = polar_to_cartesian(x, current_coords[1])
@property
def angle_dist(self):
return cartesian_to_polar(*self._stim.pos)[1]
@angle_dist.setter
def angle_dist(self, deg):
current_coords = cartesian_to_polar(*self._stim.pos)
self._stim.pos = polar_to_cartesian(current_coords[0], deg + 90)
@property
def pedestal_psychopy_scale(self):
return self.pedestal * 2 - 1
def draw(
self,
noisy=False,
win=None,
pre_duration_s=0.1,
stim_duration_s=5.0,
*args,
**kwargs,
):
win = win or self.win
clock = core.Clock()
clock.reset()
self._stim.image = self.get_texture(self.initial_phase, noisy=noisy)
while clock.getTime() < pre_duration_s:
win.flip()
start_time = clock.getTime()
while clock.getTime() < pre_duration_s + stim_duration_s:
if self.temporal_frequency > 0:
newphase = (clock.getTime() - start_time) * self.temporal_frequency
self._stim.image = self.get_texture(
newphase + self.initial_phase, noisy=noisy
)
self._stim.draw()
def get_texture(self, phase=0, noisy=False):
pedestal_lum = self.pedestal * (self.Lmax - self.Lmin) + self.Lmin
grating_max = (self.contrast * (2 * pedestal_lum + self.Lmin) + self.Lmin) / 2
x = np.arange(0, self.res) / self.cpd + phase
y = np.arange(0, self.res) / self.cpd + phase
x_grid, y_grid = np.meshgrid(x, y)
wave = x_grid * np.cos(np.radians(self.orientation)) + y_grid * np.sin(
np.radians(self.orientation)
)
scaled_imag_wave = 1j * 2 * np.pi * self.spatial_frequency * wave
img = grating_max * np.real(np.exp(scaled_imag_wave)) + pedestal_lum
# convert from luminance to values in [-1, 1] as psychopy wants
img = img / ((self.Lmax - self.Lmin) / 2) - 1
if noisy:
flatimg = img.flatten()
np.random.shuffle(flatimg)
img = flatimg.reshape(self.res, self.res)
return img
class HalfGrating(AnimatedGrating):
def noisify_half_texture(self, img, noisy_half):
img = img.T # transpose so our indexing tricks work
flatimg = img.flatten()
if noisy_half == "left":
noisy = flatimg[: (self.res ** 2) // 2]
np.random.shuffle(noisy)
img = np.r_[noisy, flatimg[(self.res ** 2) // 2 :]].reshape(
self.res, self.res
)
else:
noisy = flatimg[(self.res ** 2) // 2 :]
np.random.shuffle(noisy)
img = np.r_[flatimg[: (self.res ** 2) // 2], noisy].reshape(
self.res, self.res
)
return img.T # untranspose
def get_texture(self, phase, noisy_half):
img = super().get_texture(phase, noisy=False)
img = self.noisify_half_texture(img, noisy_half)
return img
def draw(
self,
noisy_half="left",
win=None,
pre_duration_s=0.1,
*args,
**kwargs,
):
win = win or self.win
clock = core.Clock()
clock.reset()
event.clearEvents()
self._stim.image = self.get_texture(self.initial_phase, noisy_half=noisy_half)
while clock.getTime() < pre_duration_s:
win.flip()
start_time = clock.getTime()
while True:
if self.temporal_frequency > 0:
newphase = (clock.getTime() - start_time) * self.temporal_frequency
self._stim.image = self.get_texture(
newphase + self.initial_phase, noisy_half=noisy_half
)
self._stim.draw()
keys = event.getKeys(keyList=["left", "right"])
win.flip()
if len(keys) > 0:
return keys
return keys
class ExperimentAborted(Exception):
pass
class QuitHelper:
"""Helper to quit the experiment by pressing a key twice within 500ms.
It quits by simply raising 'ExperimentAborted'. This is necessary because
from the separate thread that psychopy checks its global key events in, you
cannot raise an Exception in the main thread.
"""
def __init__(self):
self.quit_requested = False
self.debounce_timestamp = None
def request_quit(self):
"""Must be called twice in 500ms to set a flag that causes ExperimentAborted
to be raised when quit_if_requested is called. This indirection is needed if request_quit
is called from a separate thread (as with psychopy global event keys)
"""
tprev = self.debounce_timestamp
tnow = core.getTime()
if not tprev is None and tnow - tprev < 0.5:
self.quit_requested = True
self.debounce_timestamp = tnow
def quit_if_requested(self):
"""Raises ExperimentAborted if request_quit has been called twice in 500ms"""
if self.quit_requested:
raise ExperimentAborted
return True
|
bernoulli_lse-main
|
human_data_collection/helpers.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
from data import NL2BashDataset
from collectors import CollectorWithInfo
import argparse
if __name__ == "__main__":
dataset = NL2BashDataset()
parser = argparse.ArgumentParser()
parser.add_argument("--num_seeds", type=int, default=25)
parser.add_argument("--num_samples", type=int, default=5)
args = CollectorWithInfo.parse_args(parser)
args.dataset = "nl2bash"
args.seed = list(range(args.num_seeds))
args.prompt_template = "<text>{src}</text>\n<code>{trg}</code>\n"
args.example_template = "<text>{src}</text>\n<code>"
collector = CollectorWithInfo.from_args(args, dataset)
for i in range(args.num_samples):
collector(i, i, 5)
|
coder_reviewer_reranking-main
|
collect_nl2bash.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
from pathlib import Path
import os
from glob import glob
from argparse import ArgumentParser
import html
import json
from utils import *
from tqdm import tqdm, trange
from data import HumanEvalDataset, rindex, extract_docstring
from functools import partial
from pyminifier_canonicalize import clean_comment, remove_print
def postprocess_func_only(code, tokens):
lines = []
for line in code.split("\n"):
if len(line.strip()) > 0 and not line.startswith(" "):
continue
else:
lines.append(line)
code = "\n".join(lines)
code = code.rstrip()
curr = ""
for i, tok in enumerate(tokens):
curr += tok
if len(curr) >= len(code):
break
return code, tokens[: i + 1]
def make_new_context(
codex_data,
problem,
canonicalize=False,
clean_print=False,
):
prompt = codex_data["prompt"]
code_sample = codex_data["trg_prediction"]
if canonicalize:
try:
code_sample = clean_comment(code_sample)
except:
# static error
code_sample = code_sample
if clean_print:
code_sample = remove_print(code_sample)
func_name = problem["entry_point"]
docstring, func_header, func_context, doc_start = extract_docstring(prompt)
if canonicalize:
func_header = func_header.replace(f"{func_name}(", "f(")
docstring = docstring.replace(f"{func_name}(", "f(")
code_sample = code_sample.replace(f"{func_name}(", "f(")
reverse_prompt = "\n\n# write the docstring for the above function\n"
without_ref = (
func_context
+ "\n"
+ func_header.strip()
+ "\n"
+ code_sample
+ reverse_prompt
+ func_header.strip()
+ "\n"
+ f" {doc_start}"
)
with_ref = without_ref + docstring.strip()[3:]
return with_ref.rstrip(), without_ref
def rindex(lst, value):
return len(lst) - lst[::-1].index(value) - 1
def find_start(tokens):
tokens = tokens[:-2] # remove last docstring marker
for marker in [' """', " '''", ' ""', "''"]:
if marker in tokens:
return rindex(tokens[:-1], marker) + 1
raise ValueError("not found")
def batch_query_reverse_logp(all_codex_data, args):
for outer_i, batch_start in enumerate(
range(0, len(all_codex_data), args.batch_size)
):
batch_data = all_codex_data[batch_start : batch_start + args.batch_size]
batch_prompts = []
batch_data_with_prompt = []
for codex_data, problem in batch_data:
# TODO: postprocessing, should move else where
codex_data["trg_prediction"], codex_data["tokens"] = postprocess_func_only(
codex_data["trg_prediction"], codex_data["tokens"]
)
codex_data["logprobs"] = codex_data["logprobs"][: len(codex_data["tokens"])]
with_ref_prompt, without_ref_prompt = make_new_context(
codex_data,
problem,
canonicalize=args.canonicalize,
clean_print=args.clean_print,
)
batch_prompts.append(with_ref_prompt)
batch_data_with_prompt.append(
(codex_data, problem, with_ref_prompt, without_ref_prompt)
)
with_ref_reponse, _ = safe_codex_call(
args,
batch_prompts,
temperature=1.0,
echo=True,
max_tokens=0,
api_i=(outer_i % 3),
)
for (
batch_i,
(codex_data, problem, with_ref_prompt, without_ref_prompt),
) in enumerate(batch_data_with_prompt):
num_api_tokens = find_start(
with_ref_reponse["choices"][batch_i]["logprobs"]["tokens"]
)
gt_prompt_logprob = with_ref_reponse["choices"][batch_i]["logprobs"][
"token_logprobs"
][num_api_tokens:]
gt_prompt_tokens = with_ref_reponse["choices"][batch_i]["logprobs"][
"tokens"
][num_api_tokens:]
codex_data["reverse_prompt_with_ref"] = with_ref_prompt
codex_data["reverse_prompt_without_ref"] = without_ref_prompt
codex_data["prompt_reverse_logprobs"] = gt_prompt_logprob
codex_data["prompt_reverse_tokens"] = gt_prompt_tokens
codex_data["prompt_reverse_full_tokens"] = with_ref_reponse["choices"][
batch_i
]["logprobs"]["tokens"]
codex_data["prompt_reverse_full_logprobs"] = with_ref_reponse["choices"][
batch_i
]["logprobs"]["token_logprobs"]
all_codex_data = [d[0] for d in all_codex_data]
return all_codex_data
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--model", type=str, default="codex001")
parser.add_argument(
"--dataset",
type=str,
default="humaneval",
choices=["humaneval", "codet_humaneval", "mbpp_sanitized"],
)
parser.add_argument("--tag", type=str, default="")
parser.add_argument("--split", type=str, default="test")
parser.add_argument("--num_samples", type=int, default=5)
parser.add_argument("--num_procs", type=int, default=40)
parser.add_argument(
"--data_path",
type=str,
default="./samples/codex002",
)
parser.add_argument("--temperature", type=float, default=0.3)
parser.add_argument("--max_tokens", type=int, default=512)
parser.add_argument("--batch_size", type=int, default=20)
parser.add_argument("--top_p", type=float, default=1.0)
parser.add_argument("--canonicalize", default=False, action="store_true")
parser.add_argument("--clean-print", default=False, action="store_true")
parser.add_argument("--overwrite-output-dir", default=False, action="store_true")
args = parser.parse_args()
args.data_path = Path(args.data_path)
out_dir = f"seed-*/**/*-{args.temperature}"
if args.top_p != 1.0:
out_dir += f"-p{args.top_p}"
if args.max_tokens != 512:
out_dir += f"-max{args.max_tokens}"
args.data_path = args.data_path / args.dataset / out_dir
paths = list(sorted(glob(str(args.data_path), recursive=True)))
if args.dataset == "codet_humaneval":
dataset = HumanEvalDataset(
"dataset/human_eval/dataset/CodeTHumanEval.jsonl", mode="prompt_only"
)
else:
dataset = HumanEvalDataset(
path="dataset/mbpp/mbpp_sanitized_for_code_generation.jsonl",
mode="prompt_only",
)
prompt_to_data = {p["prompt"]: p for task_id, p in dataset.raw_data.items()}
paths = sorted(paths)
for path in tqdm(paths, desc="total seeds", disable=False):
path = Path(path)
for sample_i in trange(args.num_samples):
if len(args.tag) == 0:
output_file_name = f"{args.split}-{sample_i}.jsonl"
else:
output_file_name = f"{args.split}-{sample_i}-{args.tag}.jsonl"
try:
all_codex_data = []
with open(path / f"{args.split}-{sample_i}.jsonl", "r") as f:
for i, line in enumerate(f):
codex_data = json.loads(line)
raw_data = prompt_to_data[codex_data["prompt"]]
all_codex_data.append((codex_data, raw_data))
except Exception as e:
print(e)
print(f"{path / output_file_name} not ready yet. skipping.")
continue
if (path / output_file_name).exists() and not args.overwrite_output_dir:
with open(path / output_file_name, "r") as f:
line_num = len(f.readlines())
if line_num == len(all_codex_data):
print(f"skipping {path / output_file_name}")
continue
from multiprocessing import Pool
if args.num_procs > 1:
all_codex_data_with_reverse = []
chunk_size = len(all_codex_data) // args.num_procs + 1
chunked_all_codex_data = [
all_codex_data[chunk_start : chunk_start + chunk_size]
for chunk_start in range(0, len(all_codex_data), chunk_size)
]
with Pool(processes=args.num_procs) as pool:
for codex_data_with_reverse in pool.imap(
partial(batch_query_reverse_logp, args=args),
chunked_all_codex_data,
):
all_codex_data_with_reverse.extend(codex_data_with_reverse)
else:
all_codex_data_with_reverse = batch_query_reverse_logp(
all_codex_data, args
)
with open(path / output_file_name, "w") as f:
for codex_data_with_reverse in all_codex_data_with_reverse:
codex_data_json = json.dumps(codex_data_with_reverse)
f.write(codex_data_json + "\n")
|
coder_reviewer_reranking-main
|
zeroshot_reviewer.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
import argparse
import copy
import json
import openai
import os
import pickle
import random
import signal
import time
from glob import glob
from nltk.translate.bleu_score import sentence_bleu
from tqdm import tqdm, trange
import re
codex_name_mapping = {
"codex-cushman": "code-cushman-001",
"codex002": "code-davinci-002",
"codex001": "code-davinci-001",
}
def codex_greedy(configs, prompt, max_tokens=512):
response = openai.Completion.create(
engine=codex_name_mapping[configs.engine_name]
if configs.engine_name is not None
else "davinci-codex",
prompt=prompt,
temperature=0,
max_tokens=max_tokens,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
stop=configs.end_template,
)
return response["choices"][0]["text"], None, None
def codex_sample(configs, prompt, max_tokens=512):
response = openai.Completion.create(
engine=codex_name_mapping[configs.engine_name]
if configs.engine_name is not None
else "davinci-codex",
prompt=prompt,
temperature=configs.temperature,
max_tokens=max_tokens,
top_p=configs.top_p,
frequency_penalty=0,
presence_penalty=0,
logprobs=1,
stop=configs.end_template,
)
return (
response["choices"][0]["text"],
response["choices"][0]["logprobs"]["tokens"],
response["choices"][0]["logprobs"]["token_logprobs"],
)
def codex_batch_greedy(configs, batch_prompts, max_tokens=512):
raise NotImplementedError
def codex_batch_sample(configs, batch_prompts, max_tokens=512):
response = openai.Completion.create(
engine=codex_name_mapping[configs.engine_name]
if configs.engine_name is not None
else "davinci-codex",
prompt=batch_prompts,
temperature=configs.temperature,
max_tokens=max_tokens,
top_p=configs.top_p,
frequency_penalty=0,
presence_penalty=0,
logprobs=1,
stop=configs.end_template,
)
return [
(
response["choices"][batch_i]["text"],
response["choices"][batch_i]["logprobs"]["tokens"],
response["choices"][batch_i]["logprobs"]["token_logprobs"],
)
for batch_i in range(len(batch_prompts))
]
def process_batch_examples(args_with_idx):
batch_i, batch_args = args_with_idx
all_prompts = []
for args in batch_args:
src, trg, info, prompt_prefix, configs = args
if configs.dataset in ["mbpp_sanitized", "humaneval", "codet_humaneval"]:
prompt = src
else:
prompt = prompt_prefix + configs.example_template.format(src=src, info=info)
all_prompts.append(prompt)
max_tokens = configs.max_tokens
while True:
if configs.engine_name == "codex002":
openai.organization = os.getenv(f"OPENAI_ORG{(batch_i%3)+1}")
else:
openai.organization = os.getenv("OPENAI_ORG1")
try:
batch_results = (
codex_batch_greedy(configs, all_prompts, max_tokens)
if configs.mode == "greedy"
else codex_batch_sample(configs, all_prompts, max_tokens)
)
break
except openai.error.InvalidRequestError as e:
print(f"Context len: halving gen tokens, curr: {max_tokens}", end="\r")
max_tokens = max_tokens // 2
if max_tokens < 32:
raise ValueError("Prompt too long")
except openai.error.RateLimitError as e:
print(type(e), re.search("Current: .+ / min", str(e))[0], end="\r")
time.sleep(30)
except Exception as e:
print(type(e), e)
time.sleep(10)
all_results = []
for args, prompt, (trg_prediction, tokens, logprobs) in zip(
batch_args, all_prompts, batch_results
):
src, trg, info, prompt_prefix, configs = args
if "humaneval" in configs.dataset or configs.dataset == "mbpp_sanitized":
if "\nprint" in trg_prediction:
for i in range(0, len(tokens) - 1):
if tokens[i : i + 2] == ["\n", "print"]:
break
tokens = tokens[:i]
logprobs = logprobs[:i]
trg_prediction = "".join(tokens)
if i == len(tokens) - 1:
raise ValueError("not matched")
result = {
"prompt": prompt,
"src": src,
"trg_prediction": trg_prediction,
"reference": trg,
"tokens": tokens,
"logprobs": logprobs,
}
all_results.append(json.dumps(result))
return all_results
def process_one_example(args):
src, trg, info, prompt_prefix, configs = args
if configs.dataset in ["mbpp_sanitized", "humaneval", "codet_humaneval"]:
prompt = src
else:
prompt = prompt_prefix + configs.example_template.format(src=src, info=info)
max_tokens = configs.max_tokens
while True:
try:
trg_prediction, tokens, logprobs = (
codex_greedy(configs, prompt, max_tokens)
if configs.mode == "greedy"
else codex_sample(configs, prompt, max_tokens)
)
break
except openai.error.InvalidRequestError as e:
print(f"Context len: halving gen tokens, curr: {max_tokens}", end="\r")
max_tokens = max_tokens // 2
if max_tokens < 32:
raise ValueError("Prompt too long")
except openai.error.RateLimitError as e:
print(type(e), re.search("Current: .+ / min", str(e))[0], end="\r")
time.sleep(30)
except Exception as e:
print(type(e), e)
time.sleep(10)
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
try:
bleu_score = sentence_bleu(
[[ch for ch in trg]], [ch for ch in trg_prediction]
)
except:
bleu_score = 0
if "humaneval" in configs.dataset or configs.dataset == "mbpp_sanitized":
if "\nprint" in trg_prediction:
for i in range(0, len(tokens) - 1):
if tokens[i : i + 2] == ["\n", "print"]:
break
tokens = tokens[:i]
logprobs = logprobs[:i]
trg_prediction = "".join(tokens)
if i == len(tokens) - 1:
raise ValueError("not matched")
return json.dumps(
{
"prompt": prompt,
"src": src,
"trg_prediction": trg_prediction,
"reference": trg,
"tokens": tokens,
"logprobs": logprobs,
"bleu": bleu_score,
}
)
def codex_with_info(configs, dataset, prefixes):
# model
openai.api_key = os.getenv("OPENAI_API_KEY")
prompt_prefix = "".join(
[
configs.prompt_template.format(src=x[0], trg=x[1], info=x[2])
for x in prefixes
]
)
# save folder
if configs.top_p == 1:
save_dir = f"{configs.output_path}/seed-{configs.seed}/{configs.n_prompts}-shot/{configs.mode}-{configs.temperature}"
else:
save_dir = f"{configs.output_path}/seed-{configs.seed}/{configs.n_prompts}-shot/{configs.mode}-{configs.temperature}-p{configs.top_p}"
if configs.max_tokens != 512:
save_dir += f"-max{configs.max_tokens}"
os.system(f"mkdir -p {save_dir}")
# save configs and prefixes
if configs.rank == 0:
with open(f"{save_dir}/prefixes.json", "w") as fout:
json.dump(prefixes, fout)
fout.close()
with open(f"{save_dir}/configs.pkl", "wb") as fout:
pickle.dump(configs, fout)
fout.close()
ofname = f"{save_dir}/{configs.split}-{configs.rank}.jsonl"
if os.path.exists(ofname):
return
from multiprocessing import Pool
all_args = []
for (src, trg, info) in dataset:
all_args.append((src, trg, info, prompt_prefix, configs))
all_jsons = []
if configs.n_procs > 1:
all_args = [
all_args[chunk_start : chunk_start + configs.batch_size]
for chunk_start in range(0, len(all_args), configs.batch_size)
]
with Pool(processes=configs.n_procs) as pool:
for result_json in tqdm(
pool.imap(process_batch_examples, enumerate(all_args)),
total=len(all_args),
):
all_jsons.extend(result_json)
else:
for batch_i, batch_start in enumerate(
trange(0, len(all_args), configs.batch_size)
):
batch_args = all_args[batch_start : batch_start + configs.batch_size]
all_jsons.extend(process_batch_examples((batch_i, batch_args)))
with open(ofname, "w") as fout:
for jsonf in all_jsons:
fout.write(jsonf + "\n")
""" example collector: <src, trg, info> """
class CollectorWithInfo(object):
def __init__(self, configs, dataset):
self.configs = configs
self.dataset = dataset
def __call__(self, rank, local_rank, world_size):
configs = copy.deepcopy(self.configs)
configs.rank = rank
configs.gpu = local_rank
configs.world_size = world_size
args = []
for seed in self.configs.seed:
for n_prompts in self.configs.n_prompts:
args.append((seed, n_prompts, configs.temperature))
for seed, n_prompts, temperature in tqdm(args):
configs.n_prompts = n_prompts
configs.seed = seed
configs.temperature = temperature
random.seed(configs.seed)
if configs.n_prompts == 0:
prefixes = []
else:
if configs.saved_prefixes_path_template is not None:
prefix_pool = list()
for path in glob(
configs.saved_prefixes_path_template, recursive=True
):
prefix_pool.extend(json.load(open(path)))
prefix_pool = sorted(set([tuple(x) for x in prefix_pool]))
prefixes = random.sample(prefix_pool, configs.n_prompts)
else:
prefixes = random.sample(
self.dataset.data["train"], configs.n_prompts
)
if configs.shuffle_prefix:
original_prefixes = copy.deepcopy(prefixes)
while original_prefixes == prefixes:
random.shuffle(prefixes)
codex_with_info(configs, self.dataset.data[configs.split], prefixes)
@staticmethod
def parse_args(main_parser=None):
if main_parser is None:
main_parser = argparse.ArgumentParser()
subparsers = main_parser.add_subparsers(title="commands", dest="mode")
# collect
parser = subparsers.add_parser("collect", help="collecting stage")
parser.add_argument("--output-path", type=str, required=True)
parser.add_argument(
"--split", type=str, default="dev", choices=["train", "dev", "test"]
)
parser.add_argument("--seed", type=int, nargs="+", default=[0])
parser.add_argument("--n-procs", type=int, default=1)
parser.add_argument(
"--n-prompts",
type=int,
nargs="+",
default=[3],
help="number of few-shot prompt examples",
)
parser.add_argument(
"--mode", type=str, default="greedy", choices=["greedy", "sample"]
)
parser.add_argument(
"--batch-size",
type=int,
default=5,
help="number of sampled examples under the sampling mode",
)
parser.add_argument(
"--max-tokens",
type=int,
default=512,
help="number of sampled examples under the sampling mode",
)
parser.add_argument(
"--temperature", type=float, default=0.3, help="sample temperature"
)
parser.add_argument(
"--top_p", type=float, default=1.0, help="sample temperature"
)
parser.add_argument(
"--prompt-template",
type=str,
default="<info>{info}</info>\n<text>{src}</text>\n<code>{trg}</code>\n",
)
parser.add_argument(
"--example-template",
type=str,
default="<info>{info}</info>\n<text>{src}</text>\n<code>",
)
parser.add_argument("--end-template", type=str, default="</code>")
parser.add_argument("--shuffle-prefix", action="store_true", default=False)
parser.add_argument("--saved-prefixes-path-template", type=str, default=None)
parser.add_argument(
"--engine-name",
type=str,
default="codex-cushman",
choices=["codex-cushman", "codex001", "codex002"],
)
# slurm arguments
parser.add_argument("--slurm-ntasks", type=int, default=None)
parser.add_argument("--slurm-ngpus", type=int, default=0)
parser.add_argument("--slurm-nnodes", type=int, default=1)
parser.add_argument("--slurm-partition", type=str, default="devlab")
args = main_parser.parse_args()
return args
@classmethod
def from_args(cls, args=None, dataset=None):
if args is None:
args = cls.parse_args()
assert dataset is not None
return cls(args, dataset)
|
coder_reviewer_reranking-main
|
collectors.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
from data import SpiderDataset
from collectors import CollectorWithInfo
import argparse
if __name__ == "__main__":
dataset = SpiderDataset()
parser = argparse.ArgumentParser()
parser.add_argument("--num_seeds", type=int, default=25)
parser.add_argument("--num_samples", type=int, default=5)
args = CollectorWithInfo.parse_args(parser)
args.dataset = "spider"
args.seed = list(range(args.num_seeds))
collector = CollectorWithInfo.from_args(args, dataset)
for i in range(args.num_samples):
collector(i, i, 5)
|
coder_reviewer_reranking-main
|
collect_spider.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
import bashlex
import json
import os
import pickle
import regex
import signal
import subprocess
import tempfile
import threading
from datasets import load_metric
from glob import glob
from nltk.translate.bleu_score import sentence_bleu
from tqdm import tqdm
from dataset.human_eval.human_eval.evaluation import evaluate_functional_correctness
import numpy as np
from collections import Counter
from data import MBPPGoogleDataset, HumanEvalDataset, MBPPSanDataset
from utils_sql import *
from time import sleep
class Command(object):
def __init__(self, cmd):
self.cmd = cmd
self.process = None
def run(self, timeout):
def target():
self.process = subprocess.Popen(self.cmd, shell=True, preexec_fn=os.setsid)
self.process.communicate()
thread = threading.Thread(target=target)
thread.start()
thread.join(timeout)
if thread.is_alive():
os.killpg(self.process.pid, signal.SIGTERM)
thread.join()
return self.process.returncode
class PythonFunctionExecutor(object):
def __init__(self, function_content, function_call, timeout=10):
self.function_content = function_content
self.function_content = self.function_content.replace("</code>", "")
self.function_call = function_call
self.timeout = timeout
def __call__(self, i, use_json=False):
tempdir = tempfile.TemporaryDirectory()
with open(f"{tempdir.name}/code-{i}.py", "w") as fout:
print(self.function_content, file=fout)
print(f"result = {self.function_call}", file=fout)
print(f"import pickle", file=fout)
print(
f'pickle.dump(result, open("{tempdir.name}/execution_result-{i}.pkl", "wb"))',
file=fout,
)
command = Command(f"python {tempdir.name}/code-{i}.py >/dev/null 2>&1")
execution_status = command.run(timeout=self.timeout)
if execution_status == 0:
try:
execution_results = pickle.load(
open(f"{tempdir.name}/execution_result-{i}.pkl", "rb")
)
except:
execution_results = None
else:
execution_results = None
tempdir.cleanup()
return execution_status, execution_results
def mbpp_execute_one_assertion(args):
data_item, code_item, i = args
assertion = data_item[-1]
command = regex.match(f"assert (.+)==.+", assertion).group(1)
python_function = code_item["trg_prediction"]
executor = PythonFunctionExecutor(python_function, command)
execution_result = executor(i)
return execution_result
def mbpp_execute_multiple_assertion(args):
data_item, code_item, i = args
execution_result = list()
python_function = code_item["trg_prediction"]
for assertion_i, assertion in enumerate(data_item[-1]):
command = regex.match(f"assert (.+)==.+", assertion).group(1)
executor = PythonFunctionExecutor(python_function, command)
execution_result.append(executor(f"{i}-{assertion_i}"))
return execution_result
def mbpp_execute_multiple_assertion_pass(args):
data_item, code_item, i = args
execution_result = list()
python_function = code_item["trg_prediction"]
for assertion_i, assertion in enumerate(data_item[-1]):
command = regex.match(f"assert (.+==.+)", assertion).group(1)
executor = PythonFunctionExecutor(python_function, f"({command})")
execute_stats, execute_result = executor(f"{i}-{assertion_i}")
# if isinstance(execute_result, tuple) and len(execute_result) == 2:
# execute_result = execute_result[0]
# assert execute_result is None or isinstance(execute_result, bool)
execution_result.append((execute_stats, execute_result))
return execution_result
from multiprocessing import Pool
def execute_mbpp_google_folder(base_path, num_procs=10, verbose=False):
# single assertion
dataset = MBPPGoogleDataset(mode="assertion")
for path in tqdm(
glob(f"{base_path}/*jsonl"), leave=False, desc="exec one", disable=not verbose
): # execute first assertion call
if "with-reverse" in path:
continue
if os.path.exists(path.replace("jsonl", "exec.pkl")):
continue
split = os.path.basename(path).split("-")[0]
execution_results = list()
all_args = []
for i, line in enumerate(open(path).readlines()):
data_item = dataset.data[split][i]
code_item = json.loads(line)
all_args.append((data_item, code_item, i))
if num_procs > 1:
with Pool(processes=num_procs) as pool:
for execution_result in pool.imap(mbpp_execute_one_assertion, all_args):
execution_results.append(execution_result)
else:
for execution_result in map(mbpp_execute_one_assertion, all_args):
execution_results.append(execution_result)
with open(path.replace("jsonl", "exec.pkl"), "wb") as fout:
pickle.dump(execution_results, fout)
# multiple assertions (cheating)
dataset = MBPPGoogleDataset(mode="assertion-full")
for path in tqdm(
glob(f"{base_path}/*jsonl"),
leave=False,
desc="exec multiple",
disable=not verbose,
): # execute all assertion calls
if "with-reverse" in path:
continue
if os.path.exists(path.replace("jsonl", "execfull.pkl")):
continue
split = os.path.basename(path).split("-")[0]
execution_results = list()
all_args = []
for i, line in enumerate(open(path).readlines()):
data_item = dataset.data[split][i]
code_item = json.loads(line)
import uuid
all_args.append((data_item, code_item, str(uuid.uuid4())))
if num_procs > 1:
with Pool(processes=num_procs) as pool:
for execution_result in pool.imap(
mbpp_execute_multiple_assertion, all_args
):
execution_results.append(execution_result)
else:
for execution_result in map(mbpp_execute_multiple_assertion, all_args):
execution_results.append(execution_result)
with open(path.replace("jsonl", "execfull.pkl"), "wb") as fout:
pickle.dump(execution_results, fout)
# multiple assertions (pass or fail)
for path in tqdm(
glob(f"{base_path}/*jsonl"),
leave=False,
desc="exec-multiple-pass",
disable=not verbose,
):
if "with-reverse" in path:
continue
if os.path.exists(path.replace("jsonl", "execfullpass.pkl")):
continue
split = os.path.basename(path).split("-")[0]
execution_results = list()
all_args = []
for i, line in enumerate(open(path).readlines()):
data_item = dataset.data[split][i]
code_item = json.loads(line)
all_args.append((data_item, code_item, i))
if num_procs > 1:
with Pool(processes=num_procs) as pool:
for execution_result in pool.imap(
mbpp_execute_multiple_assertion_pass, all_args
):
execution_results.append(execution_result)
else:
for execution_result in map(mbpp_execute_multiple_assertion_pass, all_args):
execution_results.append(execution_result)
# with open(path.replace('jsonl', 'execfullpass.pkl'), 'rb') as fout:
# gt_execution_results = pickle.load(fout)
# for i, (a, b) in enumerate(zip(execution_results, gt_execution_results)):
# if a != b:
# print(i, (a, b))
with open(path.replace("jsonl", "execfullpass.pkl"), "wb") as fout:
pickle.dump(execution_results, fout)
def execute_spider_folder(
base_path,
db_path="dataset/spider/database",
gold_path="dataset/spider",
table_path="dataset/spider/tables.json",
timeout=10,
):
kmaps = build_foreign_key_map_from_json(table_path)
for path in glob(f"{base_path}/*jsonl"):
if "with-reverse" in path:
continue
if os.path.exists(path.replace("jsonl", "exec.pkl")):
continue
execution_results = list()
split = os.path.basename(path).split("-")[0]
file_gold_path = f"{gold_path}/{split}_gold.sql"
with open(file_gold_path) as f:
glist = [l.strip().split("\t") for l in f if len(l.strip()) > 0]
with open(path) as f:
plist = [json.loads(l)["trg_prediction"] for l in f]
for p_str, (_, db_name) in tqdm(list(zip(plist, glist))):
db = os.path.join(db_path, db_name, db_name + ".sqlite")
schema = Schema(get_schema(db))
try:
p_sql = get_sql(schema, p_str)
except:
# If p_sql is not valid, then we will use an empty sql to evaluate with the correct sql
p_sql = {
"except": None,
"from": {"conds": [], "table_units": []},
"groupBy": [],
"having": [],
"intersect": None,
"limit": None,
"orderBy": [],
"select": [False, []],
"union": None,
"where": [],
}
# rebuild sql for value evaluation
kmap = kmaps[db_name]
p_valid_col_units = build_valid_col_units(
p_sql["from"]["table_units"], schema
)
p_sql = rebuild_sql_val(p_sql)
p_sql = rebuild_sql_col(p_valid_col_units, p_sql, kmap)
execution_result = execute(db, p_str, p_sql, timeout)
execution_results.append(execution_result)
with open(path.replace("jsonl", "exec.pkl"), "wb") as fout:
pickle.dump(execution_results, fout)
def simulate_bash_exec(command):
return list(bashlex.split(command))
def execute_mbpp_google_folder_one(base_path, num_procs=5, verbose=False, tag=""):
# single assertion
path = str(base_path)
dataset = MBPPGoogleDataset(mode="assertion")
out_name = "exec.pkl"
if not (os.path.exists(path.replace("jsonl", out_name))):
split = os.path.basename(path).split("-")[0]
execution_results = list()
all_args = []
for i, line in enumerate(open(path).readlines()):
data_item = dataset.data[split][i]
code_item = json.loads(line)
all_args.append((data_item, code_item, i))
if num_procs > 1:
with Pool(processes=num_procs) as pool:
for execution_result in tqdm(
pool.imap(mbpp_execute_one_assertion, all_args),
total=len(all_args),
leave=False,
disable=not verbose,
desc="exec on",
):
execution_results.append(execution_result)
else:
for execution_result in map(
mbpp_execute_one_assertion, tqdm(all_args, disable=not verbose)
):
execution_results.append(execution_result)
with open(path.replace("jsonl", out_name), "wb") as fout:
pickle.dump(execution_results, fout)
# mltiple assertions (cheating)
dataset = MBPPGoogleDataset(mode="assertion-full")
path = str(base_path)
out_name = "execfull.pkl"
if not (os.path.exists(path.replace("jsonl", out_name))):
split = os.path.basename(path).split("-")[0]
execution_results = list()
all_args = []
for i, line in enumerate(open(path).readlines()):
data_item = dataset.data[split][i]
code_item = json.loads(line)
all_args.append((data_item, code_item, i))
if num_procs > 1:
with Pool(processes=num_procs) as pool:
for execution_result in tqdm(
pool.imap(mbpp_execute_multiple_assertion, all_args),
total=len(all_args),
leave=False,
disable=not verbose,
desc="exec all",
):
execution_results.append(execution_result)
else:
for execution_result in map(
mbpp_execute_multiple_assertion, tqdm(all_args, disable=not verbose)
):
execution_results.append(execution_result)
with open(path.replace("jsonl", out_name), "wb") as fout:
pickle.dump(execution_results, fout)
# mltiple assertions (pass or fail)
path = str(base_path)
out_name = "execfullpass.pkl"
if not (os.path.exists(path.replace("jsonl", out_name))):
split = os.path.basename(path).split("-")[0]
execution_results = list()
all_args = []
for i, line in enumerate(open(path).readlines()):
data_item = dataset.data[split][i]
code_item = json.loads(line)
all_args.append((data_item, code_item, i))
if num_procs > 1:
with Pool(processes=num_procs) as pool:
for execution_result in tqdm(
pool.imap(mbpp_execute_multiple_assertion_pass, all_args),
total=len(all_args),
leave=False,
disable=not verbose,
desc="pass or fail",
):
execution_results.append(execution_result)
else:
for execution_result in map(
mbpp_execute_multiple_assertion_pass,
tqdm(all_args, disable=not verbose),
):
execution_results.append(execution_result)
with open(path.replace("jsonl", out_name), "wb") as fout:
pickle.dump(execution_results, fout)
def execute_spider_folder_one(
base_path,
db_path="dataset/spider/database",
gold_path="dataset/spider",
table_path="dataset/spider/tables.json",
timeout=10,
verbose=False,
tag="",
):
kmaps = build_foreign_key_map_from_json(table_path)
path = str(base_path)
out_name = "exec.pkl" if tag == "" else f"exec.pkl"
if not (os.path.exists(path.replace("jsonl", f"{out_name}"))):
execution_results = list()
split = os.path.basename(path).split("-")[0]
file_gold_path = f"{gold_path}/{split}_gold.sql"
with open(file_gold_path) as f:
glist = [l.strip().split("\t") for l in f if len(l.strip()) > 0]
with open(path) as f:
plist = [json.loads(l)["trg_prediction"] for l in f]
count = 0
for p_str, (_, db_name) in tqdm(
list(zip(plist, glist)), disable=not verbose, desc="SQL exec"
):
db = os.path.join(db_path, db_name, db_name + ".sqlite")
schema = Schema(get_schema(db))
try:
p_sql = get_sql(schema, p_str)
except:
# If p_sql is not valid, then we will use an empty sql to evaluate with the correct sql
p_sql = {
"except": None,
"from": {"conds": [], "table_units": []},
"groupBy": [],
"having": [],
"intersect": None,
"limit": None,
"orderBy": [],
"select": [False, []],
"union": None,
"where": [],
}
# rebuild sql for value evaluation
kmap = kmaps[db_name]
p_valid_col_units = build_valid_col_units(
p_sql["from"]["table_units"], schema
)
p_sql = rebuild_sql_val(p_sql)
p_sql = rebuild_sql_col(p_valid_col_units, p_sql, kmap)
execution_result = execute(db, p_str, p_sql, timeout)
execution_results.append(execution_result)
count += 1
with open(path.replace("jsonl", out_name), "wb") as fout:
pickle.dump(execution_results, fout)
def humaneval_postprocess(
completion,
):
keep_lines = []
for l in completion.split("\n"):
if not l.startswith("print"):
keep_lines.append(l)
return "\n".join(keep_lines)
def humaneval_execute_one_assertion(problem):
assertion = problem["assertion"]
try:
command = regex.match(f"assert (.+)==.+", assertion).group(1)
except:
command = regex.match(f"assert (.+)", assertion).group(1)
python_function = problem["prompt"] + problem["completion"]
executor = PythonFunctionExecutor(python_function, command)
execution_result = executor(problem["task_id"].split("/")[1])
return execution_result
def humaneval_execute_multiple_assertion(problem):
execution_result = list()
python_function = problem["prompt"] + problem["completion"]
task_id = problem["task_id"].split("/")[1]
for assertion_i, assertion in enumerate(problem["assertion"]):
try:
try:
command = regex.match(f"assert (.+)==.+", assertion).group(1)
except:
command = regex.match(f"assert (.+)", assertion).group(1)
except:
print(problem["assertion"])
print(problem["task_id"])
breakpoint()
executor = PythonFunctionExecutor(python_function, command)
execution_result.append(executor(f"{task_id}-{assertion_i}"))
return execution_result
def humaneval_execute_generated_assertion(problem):
execution_result = list()
python_function = problem["prompt"] + problem["completion"]
task_id = problem["task_id"].split("/")[1]
total_matched = 0
for assertion_i, assertion in enumerate(problem["gen_assertion"]):
matched = False
for pattern in ["assert (.+)==.+", "assert (.+) is .+", "assert (.+)"]:
try:
command = regex.match(pattern, assertion).group(1)
matched = True
break
except:
pass
if matched:
executor = PythonFunctionExecutor(python_function, command)
execution_result.append(executor(f"{task_id}-{assertion_i}"))
total_matched += int(matched)
if total_matched > 20:
break
return execution_result
def execute_humaneval_folder_one(
base_path,
timeout=10,
verbose=False,
tag="",
num_procs=1,
dataset_choice="humaneval",
):
path = str(base_path)
if dataset_choice in ["humaneval", "codet_humaneval"]:
dataset_cls = HumanEvalDataset
if dataset_choice == "codet_humaneval":
dataset_problem_file = "dataset/human_eval/dataset/CodeTHumanEval.jsonl"
assertion_file = "dataset/human_eval/dataset/HumanEval.jsonl"
else:
dataset_problem_file = "dataset/human_eval/dataset/HumanEval.jsonl"
assertion_file = ""
elif dataset_choice == "mbpp_sanitized":
dataset_problem_file = "dataset/mbpp/mbpp_sanitized_for_code_generation.jsonl"
assertion_file = ""
dataset_cls = MBPPSanDataset
else:
raise ValueError("Invalid data choice")
dataset = dataset_cls(
path=dataset_problem_file, assertion_path=assertion_file, mode="assertion"
)
prompt_to_problem = {p["prompt"]: p for task_id, p in dataset.raw_data.items()}
out_name = "exec.pkl"
problem_with_completions = []
for line in open(path).readlines():
code_item = json.loads(line)
problem = prompt_to_problem[code_item["prompt"]]
problem["completion"] = humaneval_postprocess(code_item["trg_prediction"])
problem_with_completions.append(problem)
if not (os.path.exists(path.replace("jsonl", out_name))):
execution_results = []
if num_procs > 1:
with Pool(processes=num_procs) as pool:
for execution_result in pool.imap(
humaneval_execute_one_assertion, problem_with_completions
):
execution_results.append(execution_result)
else:
for execution_result in map(
humaneval_execute_one_assertion, problem_with_completions
):
execution_results.append(execution_result)
with open(path.replace("jsonl", out_name), "wb") as fout:
pickle.dump(execution_results, fout)
dataset = dataset_cls(
path=dataset_problem_file, assertion_path=assertion_file, mode="assertion-all"
)
prompt_to_problem = {p["prompt"]: p for task_id, p in dataset.raw_data.items()}
problem_with_completions = []
for line in open(path).readlines():
code_item = json.loads(line)
problem = prompt_to_problem[code_item["prompt"]]
problem["completion"] = humaneval_postprocess(code_item["trg_prediction"])
problem_with_completions.append(problem)
out_name = "execfull.pkl"
if not (os.path.exists(path.replace("jsonl", out_name))):
execution_results = []
if num_procs > 1:
with Pool(processes=num_procs) as pool:
for execution_result in pool.imap(
humaneval_execute_multiple_assertion, problem_with_completions
):
execution_results.append(execution_result)
else:
for execution_result in map(
humaneval_execute_multiple_assertion, problem_with_completions
):
execution_results.append(execution_result)
with open(path.replace("jsonl", out_name), "wb") as fout:
pickle.dump(execution_results, fout)
out_name = "execfullpass.pkl"
if not (os.path.exists(path.replace("jsonl", out_name))):
results, pass_at_k, extras = evaluate_functional_correctness(
samples=problem_with_completions,
sample_file=None,
k=[1],
problem_file=dataset_problem_file,
suppress=True,
timeout=timeout,
)
all_passed = []
for result in results.values():
result.sort()
passed = [r[1]["passed"] for r in result]
assert len(passed) == 1
all_passed.append(passed[0])
with open(path.replace("jsonl", out_name), "wb") as fout:
pickle.dump(all_passed, fout)
else:
all_passed = pickle.load(open(path.replace("jsonl", out_name), "rb"))
def execute_nl2bash_folder_one(
base_path,
):
bleu = load_metric("bleu")
path = str(base_path)
if all(
(
os.path.exists(path.replace(".jsonl", ".exec.pkl")),
os.path.exists(path.replace(".jsonl", ".exec.splitted.pkl")),
os.path.exists(path.replace(".jsonl", ".exec.simulate.pkl")),
os.path.exists(path.replace(".jsonl", ".exec.bleu.pkl")),
)
):
# return
pass
all_exec_results = []
all_exec_splitted_results = []
all_simulate_exec = []
all_char_bleu = []
for line in tqdm(open(path).readlines()):
code_item = json.loads(line)
code_item["trg_prediction"]
try:
with time_limit(10):
bashlex.parse(code_item["trg_prediction"])
all_exec_results.append(True)
except:
all_exec_results.append(False)
try:
with time_limit(10):
splitted_trg_pred = simulate_bash_exec(code_item["trg_prediction"])
except:
splitted_trg_pred = list()
simulate_exec = Counter(splitted_trg_pred)
all_exec_splitted_results.append(splitted_trg_pred)
all_simulate_exec.append(simulate_exec)
try:
with time_limit(10):
all_char_bleu.append(
bleu.compute(
predictions=[[ch for ch in code_item["reference"]]],
references=[[[ch for ch in code_item["trg_prediction"]]]],
)["bleu"]
)
except:
all_char_bleu.append(0)
with open(path.replace(".jsonl", ".exec.pkl"), "wb") as fout:
pickle.dump(all_exec_results, fout)
with open(path.replace(".jsonl", ".exec.splitted.pkl"), "wb") as fout:
pickle.dump(all_exec_splitted_results, fout)
with open(path.replace(".jsonl", ".exec.simulate.pkl"), "wb") as fout:
pickle.dump(all_simulate_exec, fout)
with open(path.replace(".jsonl", ".exec.bleu.pkl"), "wb") as fout:
pickle.dump(all_char_bleu, fout)
|
coder_reviewer_reranking-main
|
execution.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
import argparse
import data
from collectors import CollectorWithInfo
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--info-mode",
type=str,
default="assertion",
choices=["function_name", "assertion"],
)
parser.add_argument(
"--dataset-type",
type=str,
default="MBPPGoogleDataset",
choices=["MBPPDataset", "MBPPGoogleDataset"],
)
parser.add_argument("--num_seeds", type=int, default=25)
parser.add_argument("--num_samples", type=int, default=5)
args = CollectorWithInfo.parse_args(parser)
args.seed = list(range(args.num_seeds))
args.dataset = "mbpp"
args.split = "test"
dataset = getattr(data, args.dataset_type)(mode=args.info_mode)
collector = CollectorWithInfo.from_args(args, dataset)
for i in range(args.num_samples):
collector(i, i, 5)
|
coder_reviewer_reranking-main
|
collect_mbpp.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
import shutil
import torch
from pathlib import Path
import os
from glob import glob
from argparse import ArgumentParser
from tqdm import tqdm, trange
import torch.distributed as dist
from execution import (
execute_humaneval_folder_one,
execute_mbpp_google_folder_one,
execute_spider_folder_one,
execute_nl2bash_folder_one,
)
from pathlib import Path
parser = ArgumentParser()
parser.add_argument("--batch_size", type=int, default=2)
parser.add_argument("--dataset", type=str, default="mbpp")
parser.add_argument("--tag", type=str, default="")
parser.add_argument("--split", type=str, default="test")
parser.add_argument("--num_seeds", type=int, default=5)
parser.add_argument("--num_samples", type=int, default=5)
parser.add_argument("--num_prompts", type=int, default=1)
parser.add_argument(
"--in_data_path",
type=str,
default="/private/home/tianyizzz/projects/mbr-exec-data/mbr-exec-codex001/",
)
parser.add_argument("--temperature", type=float, default=0.3)
parser.add_argument("--max_tokens", type=int, default=512)
parser.add_argument("--top_p", type=float, default=1.0)
parser.add_argument("--rank", type=int, default=0)
parser.add_argument("--local_rank", type=int, default=0)
parser.add_argument("--world_size", type=int, default=1)
args = parser.parse_args()
args.rank = int(os.environ.get("LOCAL_RANK", 0))
# if args.world_size > 1:
# dist.init_process_group("gloo", rank=args.rank, world_size=args.world_size)
paths = []
if args.temperature > 0:
for seed in range(args.num_seeds):
for i in range(args.num_samples):
if (seed * args.num_samples + i) % args.world_size == args.rank:
out_dir = f"sample-{args.temperature}"
if args.top_p != 1.0:
out_dir += f"-p{args.top_p}"
if args.max_tokens != 512:
out_dir += f"-max{args.max_tokens}"
if args.tag == "":
result_file = f"{args.split}-{i}.jsonl"
else:
result_file = f"{args.split}-{i}-{args.tag}.jsonl"
path = (
Path(args.in_data_path)
/ args.dataset
/ f"seed-{seed}"
/ f"{args.num_prompts}-shot"
/ out_dir
/ result_file
)
paths.append(path)
else:
for seed in range(args.num_seeds):
i = 0
if (seed * 5 + i) % args.world_size == args.rank:
out_dir = f"sample-{args.temperature}"
if args.max_tokens != 512:
out_dir += f"-max{args.max_tokens}"
if args.tag == "":
result_file = f"{args.split}-{i}.jsonl"
else:
result_file = f"{args.split}-{i}-{args.tag}.jsonl"
paths.append(
Path(args.in_data_path)
/ args.dataset
/ f"seed-{seed}"
/ f"{args.num_prompts}-shot"
/ out_dir
/ result_file
)
for path in tqdm(paths, disable=not args.rank == 0):
if args.dataset == "mbpp":
execute_mbpp_google_folder_one(path, verbose=args.rank == 0, tag=args.tag)
elif args.dataset == "spider":
execute_spider_folder_one(path, verbose=args.rank == 0, tag=args.tag)
elif "humaneval" in args.dataset or args.dataset == "mbpp_sanitized":
execute_humaneval_folder_one(
path, verbose=args.rank == 0, tag=args.tag, dataset_choice=args.dataset
)
elif args.dataset == "nl2bash":
execute_nl2bash_folder_one(path)
else:
raise ValueError("invalid dataset")
|
coder_reviewer_reranking-main
|
multi_exec.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
from tqdm import tqdm
import os
import sqlite3
import pickle as pkl
# CONSTANT
db_dir = "./dataset/spider/database/"
# preloading spider data to reduce io
from dataset.spider_official.evaluation import (
build_foreign_key_map_from_json,
build_valid_col_units,
rebuild_sql_val,
rebuild_sql_col,
)
from dataset.spider_official.process_sql import (
get_schema,
Schema,
get_sql,
)
kmaps = build_foreign_key_map_from_json("./dataset/spider/tables.json")
with open("dataset/spider/dev_gold.sql") as f:
glist = [l.strip().split("\t") for l in f.readlines() if len(l.strip()) > 0]
all_g_res = []
for gold_sql in tqdm(glist, total=len(glist)):
g_str, db = gold_sql
db_name = db
db = os.path.join(db_dir, db, db + ".sqlite")
schema = Schema(get_schema(db))
g_sql = get_sql(schema, g_str)
kmap = kmaps[db_name]
g_valid_col_units = build_valid_col_units(g_sql["from"]["table_units"], schema)
g_sql = rebuild_sql_val(g_sql)
g_sql = rebuild_sql_col(g_valid_col_units, g_sql, kmap)
conn = sqlite3.connect(f"file:{db}?mode=ro", uri=True)
cursor = conn.cursor()
# there are potential utf-8 errors
try:
cursor.execute(g_str)
g_res = cursor.fetchall()
except:
g_res = []
def res_map(res, val_units):
rmap = {}
for idx, val_unit in enumerate(val_units):
key = (
tuple(val_unit[1])
if not val_unit[2]
else (val_unit[0], tuple(val_unit[1]), tuple(val_unit[2]))
)
rmap[key] = [r[idx] for r in res]
return rmap
g_val_units = [unit[1] for unit in g_sql["select"][1]]
g_res = res_map(g_res, g_val_units)
all_g_res.append(g_res)
pkl.dump(
all_g_res,
open(
"./dataset/spider/cached_gold_results.pkl",
"wb",
),
)
|
coder_reviewer_reranking-main
|
exec_spider_gold.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
################################
# Assumptions:
# 1. sql is correct
# 2. only table name has alias
# 3. only one intersect/union/except
#
# val: number(float)/string(str)/sql(dict)
# col_unit: (agg_id, col_id, isDistinct(bool))
# val_unit: (unit_op, col_unit1, col_unit2)
# table_unit: (table_type, col_unit/sql)
# cond_unit: (not_op, op_id, val_unit, val1, val2)
# condition: [cond_unit1, 'and'/'or', cond_unit2, ...]
# sql {
# 'select': (isDistinct(bool), [(agg_id, val_unit), (agg_id, val_unit), ...])
# 'from': {'table_units': [table_unit1, table_unit2, ...], 'conds': condition}
# 'where': condition
# 'groupBy': [col_unit1, col_unit2, ...]
# 'orderBy': ('asc'/'desc', [val_unit1, val_unit2, ...])
# 'having': condition
# 'limit': None/limit value
# 'intersect': None/sql
# 'except': None/sql
# 'union': None/sql
# }
################################
import json
import sqlite3
from nltk import word_tokenize
CLAUSE_KEYWORDS = (
"select",
"from",
"where",
"group",
"order",
"limit",
"intersect",
"union",
"except",
)
JOIN_KEYWORDS = ("join", "on", "as")
WHERE_OPS = (
"not",
"between",
"=",
">",
"<",
">=",
"<=",
"!=",
"in",
"like",
"is",
"exists",
)
UNIT_OPS = ("none", "-", "+", "*", "/")
AGG_OPS = ("none", "max", "min", "count", "sum", "avg")
TABLE_TYPE = {
"sql": "sql",
"table_unit": "table_unit",
}
COND_OPS = ("and", "or")
SQL_OPS = ("intersect", "union", "except")
ORDER_OPS = ("desc", "asc")
class Schema:
"""
Simple schema which maps table&column to a unique identifier
"""
def __init__(self, schema):
self._schema = schema
self._idMap = self._map(self._schema)
@property
def schema(self):
return self._schema
@property
def idMap(self):
return self._idMap
def _map(self, schema):
idMap = {"*": "__all__"}
id = 1
for key, vals in schema.items():
for val in vals:
idMap[key.lower() + "." + val.lower()] = (
"__" + key.lower() + "." + val.lower() + "__"
)
id += 1
for key in schema:
idMap[key.lower()] = "__" + key.lower() + "__"
id += 1
return idMap
def get_schema(db):
"""
Get database's schema, which is a dict with table name as key
and list of column names as value
:param db: database path
:return: schema dict
"""
schema = {}
conn = sqlite3.connect(db)
cursor = conn.cursor()
# fetch table names
cursor.execute("SELECT name FROM sqlite_master WHERE type='table';")
tables = [str(table[0].lower()) for table in cursor.fetchall()]
# fetch table info
for table in tables:
cursor.execute("PRAGMA table_info({})".format(table))
schema[table] = [str(col[1].lower()) for col in cursor.fetchall()]
return schema
def get_schema_from_json(fpath):
with open(fpath) as f:
data = json.load(f)
schema = {}
for entry in data:
table = str(entry["table"].lower())
cols = [str(col["column_name"].lower()) for col in entry["col_data"]]
schema[table] = cols
return schema
def tokenize(string):
string = str(string)
string = string.replace(
"'", '"'
) # ensures all string values wrapped by "" problem??
quote_idxs = [idx for idx, char in enumerate(string) if char == '"']
assert len(quote_idxs) % 2 == 0, "Unexpected quote"
# keep string value as token
vals = {}
for i in range(len(quote_idxs) - 1, -1, -2):
qidx1 = quote_idxs[i - 1]
qidx2 = quote_idxs[i]
val = string[qidx1 : qidx2 + 1]
key = "__val_{}_{}__".format(qidx1, qidx2)
string = string[:qidx1] + key + string[qidx2 + 1 :]
vals[key] = val
toks = [word.lower() for word in word_tokenize(string)]
# replace with string value token
for i in range(len(toks)):
if toks[i] in vals:
toks[i] = vals[toks[i]]
# find if there exists !=, >=, <=
eq_idxs = [idx for idx, tok in enumerate(toks) if tok == "="]
eq_idxs.reverse()
prefix = ("!", ">", "<")
for eq_idx in eq_idxs:
pre_tok = toks[eq_idx - 1]
if pre_tok in prefix:
toks = toks[: eq_idx - 1] + [pre_tok + "="] + toks[eq_idx + 1 :]
return toks
def scan_alias(toks):
"""Scan the index of 'as' and build the map for all alias"""
as_idxs = [idx for idx, tok in enumerate(toks) if tok == "as"]
alias = {}
for idx in as_idxs:
alias[toks[idx + 1]] = toks[idx - 1]
return alias
def get_tables_with_alias(schema, toks):
tables = scan_alias(toks)
for key in schema:
assert key not in tables, "Alias {} has the same name in table".format(key)
tables[key] = key
return tables
def parse_col(toks, start_idx, tables_with_alias, schema, default_tables=None):
"""
:returns next idx, column id
"""
tok = toks[start_idx]
if tok == "*":
return start_idx + 1, schema.idMap[tok]
if "." in tok: # if token is a composite
alias, col = tok.split(".")
key = tables_with_alias[alias] + "." + col
return start_idx + 1, schema.idMap[key]
assert (
default_tables is not None and len(default_tables) > 0
), "Default tables should not be None or empty"
for alias in default_tables:
table = tables_with_alias[alias]
if tok in schema.schema[table]:
key = table + "." + tok
return start_idx + 1, schema.idMap[key]
assert False, "Error col: {}".format(tok)
def parse_col_unit(toks, start_idx, tables_with_alias, schema, default_tables=None):
"""
:returns next idx, (agg_op id, col_id)
"""
idx = start_idx
len_ = len(toks)
isBlock = False
isDistinct = False
if toks[idx] == "(":
isBlock = True
idx += 1
if toks[idx] in AGG_OPS:
agg_id = AGG_OPS.index(toks[idx])
idx += 1
assert idx < len_ and toks[idx] == "("
idx += 1
if toks[idx] == "distinct":
idx += 1
isDistinct = True
idx, col_id = parse_col(toks, idx, tables_with_alias, schema, default_tables)
assert idx < len_ and toks[idx] == ")"
idx += 1
return idx, (agg_id, col_id, isDistinct)
if toks[idx] == "distinct":
idx += 1
isDistinct = True
agg_id = AGG_OPS.index("none")
idx, col_id = parse_col(toks, idx, tables_with_alias, schema, default_tables)
if isBlock:
assert toks[idx] == ")"
idx += 1 # skip ')'
return idx, (agg_id, col_id, isDistinct)
def parse_val_unit(toks, start_idx, tables_with_alias, schema, default_tables=None):
idx = start_idx
len_ = len(toks)
isBlock = False
if toks[idx] == "(":
isBlock = True
idx += 1
col_unit1 = None
col_unit2 = None
unit_op = UNIT_OPS.index("none")
idx, col_unit1 = parse_col_unit(
toks, idx, tables_with_alias, schema, default_tables
)
if idx < len_ and toks[idx] in UNIT_OPS:
unit_op = UNIT_OPS.index(toks[idx])
idx += 1
idx, col_unit2 = parse_col_unit(
toks, idx, tables_with_alias, schema, default_tables
)
if isBlock:
assert toks[idx] == ")"
idx += 1 # skip ')'
return idx, (unit_op, col_unit1, col_unit2)
def parse_table_unit(toks, start_idx, tables_with_alias, schema):
"""
:returns next idx, table id, table name
"""
idx = start_idx
len_ = len(toks)
key = tables_with_alias[toks[idx]]
if idx + 1 < len_ and toks[idx + 1] == "as":
idx += 3
else:
idx += 1
return idx, schema.idMap[key], key
def parse_value(toks, start_idx, tables_with_alias, schema, default_tables=None):
idx = start_idx
len_ = len(toks)
isBlock = False
if toks[idx] == "(":
isBlock = True
idx += 1
if toks[idx] == "select":
idx, val = parse_sql(toks, idx, tables_with_alias, schema)
elif '"' in toks[idx]: # token is a string value
val = toks[idx]
idx += 1
else:
try:
val = float(toks[idx])
idx += 1
except:
end_idx = idx
while (
end_idx < len_
and toks[end_idx] != ","
and toks[end_idx] != ")"
and toks[end_idx] != "and"
and toks[end_idx] not in CLAUSE_KEYWORDS
and toks[end_idx] not in JOIN_KEYWORDS
):
end_idx += 1
idx, val = parse_col_unit(
toks[start_idx:end_idx], 0, tables_with_alias, schema, default_tables
)
idx = end_idx
if isBlock:
assert toks[idx] == ")"
idx += 1
return idx, val
def parse_condition(toks, start_idx, tables_with_alias, schema, default_tables=None):
idx = start_idx
len_ = len(toks)
conds = []
while idx < len_:
idx, val_unit = parse_val_unit(
toks, idx, tables_with_alias, schema, default_tables
)
not_op = False
if toks[idx] == "not":
not_op = True
idx += 1
assert (
idx < len_ and toks[idx] in WHERE_OPS
), "Error condition: idx: {}, tok: {}".format(idx, toks[idx])
op_id = WHERE_OPS.index(toks[idx])
idx += 1
val1 = val2 = None
if op_id == WHERE_OPS.index(
"between"
): # between..and... special case: dual values
idx, val1 = parse_value(
toks, idx, tables_with_alias, schema, default_tables
)
assert toks[idx] == "and"
idx += 1
idx, val2 = parse_value(
toks, idx, tables_with_alias, schema, default_tables
)
else: # normal case: single value
idx, val1 = parse_value(
toks, idx, tables_with_alias, schema, default_tables
)
val2 = None
conds.append((not_op, op_id, val_unit, val1, val2))
if idx < len_ and (
toks[idx] in CLAUSE_KEYWORDS
or toks[idx] in (")", ";")
or toks[idx] in JOIN_KEYWORDS
):
break
if idx < len_ and toks[idx] in COND_OPS:
conds.append(toks[idx])
idx += 1 # skip and/or
return idx, conds
def parse_select(toks, start_idx, tables_with_alias, schema, default_tables=None):
idx = start_idx
len_ = len(toks)
assert toks[idx] == "select", "'select' not found"
idx += 1
isDistinct = False
if idx < len_ and toks[idx] == "distinct":
idx += 1
isDistinct = True
val_units = []
while idx < len_ and toks[idx] not in CLAUSE_KEYWORDS:
agg_id = AGG_OPS.index("none")
if toks[idx] in AGG_OPS:
agg_id = AGG_OPS.index(toks[idx])
idx += 1
idx, val_unit = parse_val_unit(
toks, idx, tables_with_alias, schema, default_tables
)
val_units.append((agg_id, val_unit))
if idx < len_ and toks[idx] == ",":
idx += 1 # skip ','
return idx, (isDistinct, val_units)
def parse_from(toks, start_idx, tables_with_alias, schema):
"""
Assume in the from clause, all table units are combined with join
"""
assert "from" in toks[start_idx:], "'from' not found"
len_ = len(toks)
idx = toks.index("from", start_idx) + 1
default_tables = []
table_units = []
conds = []
while idx < len_:
isBlock = False
if toks[idx] == "(":
isBlock = True
idx += 1
if toks[idx] == "select":
idx, sql = parse_sql(toks, idx, tables_with_alias, schema)
table_units.append((TABLE_TYPE["sql"], sql))
else:
if idx < len_ and toks[idx] == "join":
idx += 1 # skip join
idx, table_unit, table_name = parse_table_unit(
toks, idx, tables_with_alias, schema
)
table_units.append((TABLE_TYPE["table_unit"], table_unit))
default_tables.append(table_name)
if idx < len_ and toks[idx] == "on":
idx += 1 # skip on
idx, this_conds = parse_condition(
toks, idx, tables_with_alias, schema, default_tables
)
if len(conds) > 0:
conds.append("and")
conds.extend(this_conds)
if isBlock:
assert toks[idx] == ")"
idx += 1
if idx < len_ and (toks[idx] in CLAUSE_KEYWORDS or toks[idx] in (")", ";")):
break
return idx, table_units, conds, default_tables
def parse_where(toks, start_idx, tables_with_alias, schema, default_tables):
idx = start_idx
len_ = len(toks)
if idx >= len_ or toks[idx] != "where":
return idx, []
idx += 1
idx, conds = parse_condition(toks, idx, tables_with_alias, schema, default_tables)
return idx, conds
def parse_group_by(toks, start_idx, tables_with_alias, schema, default_tables):
idx = start_idx
len_ = len(toks)
col_units = []
if idx >= len_ or toks[idx] != "group":
return idx, col_units
idx += 1
assert toks[idx] == "by"
idx += 1
while idx < len_ and not (toks[idx] in CLAUSE_KEYWORDS or toks[idx] in (")", ";")):
idx, col_unit = parse_col_unit(
toks, idx, tables_with_alias, schema, default_tables
)
col_units.append(col_unit)
if idx < len_ and toks[idx] == ",":
idx += 1 # skip ','
else:
break
return idx, col_units
def parse_order_by(toks, start_idx, tables_with_alias, schema, default_tables):
idx = start_idx
len_ = len(toks)
val_units = []
order_type = "asc" # default type is 'asc'
if idx >= len_ or toks[idx] != "order":
return idx, val_units
idx += 1
assert toks[idx] == "by"
idx += 1
while idx < len_ and not (toks[idx] in CLAUSE_KEYWORDS or toks[idx] in (")", ";")):
idx, val_unit = parse_val_unit(
toks, idx, tables_with_alias, schema, default_tables
)
val_units.append(val_unit)
if idx < len_ and toks[idx] in ORDER_OPS:
order_type = toks[idx]
idx += 1
if idx < len_ and toks[idx] == ",":
idx += 1 # skip ','
else:
break
return idx, (order_type, val_units)
def parse_having(toks, start_idx, tables_with_alias, schema, default_tables):
idx = start_idx
len_ = len(toks)
if idx >= len_ or toks[idx] != "having":
return idx, []
idx += 1
idx, conds = parse_condition(toks, idx, tables_with_alias, schema, default_tables)
return idx, conds
def parse_limit(toks, start_idx):
idx = start_idx
len_ = len(toks)
if idx < len_ and toks[idx] == "limit":
idx += 2
return idx, int(toks[idx - 1])
return idx, None
def parse_sql(toks, start_idx, tables_with_alias, schema):
isBlock = False # indicate whether this is a block of sql/sub-sql
len_ = len(toks)
idx = start_idx
sql = {}
if toks[idx] == "(":
isBlock = True
idx += 1
# parse from clause in order to get default tables
from_end_idx, table_units, conds, default_tables = parse_from(
toks, start_idx, tables_with_alias, schema
)
sql["from"] = {"table_units": table_units, "conds": conds}
# select clause
_, select_col_units = parse_select(
toks, idx, tables_with_alias, schema, default_tables
)
idx = from_end_idx
sql["select"] = select_col_units
# where clause
idx, where_conds = parse_where(toks, idx, tables_with_alias, schema, default_tables)
sql["where"] = where_conds
# group by clause
idx, group_col_units = parse_group_by(
toks, idx, tables_with_alias, schema, default_tables
)
sql["groupBy"] = group_col_units
# having clause
idx, having_conds = parse_having(
toks, idx, tables_with_alias, schema, default_tables
)
sql["having"] = having_conds
# order by clause
idx, order_col_units = parse_order_by(
toks, idx, tables_with_alias, schema, default_tables
)
sql["orderBy"] = order_col_units
# limit clause
idx, limit_val = parse_limit(toks, idx)
sql["limit"] = limit_val
idx = skip_semicolon(toks, idx)
if isBlock:
assert toks[idx] == ")"
idx += 1 # skip ')'
idx = skip_semicolon(toks, idx)
# intersect/union/except clause
for op in SQL_OPS: # initialize IUE
sql[op] = None
if idx < len_ and toks[idx] in SQL_OPS:
sql_op = toks[idx]
idx += 1
idx, IUE_sql = parse_sql(toks, idx, tables_with_alias, schema)
sql[sql_op] = IUE_sql
return idx, sql
def load_data(fpath):
with open(fpath) as f:
data = json.load(f)
return data
def get_sql(schema, query):
toks = tokenize(query)
tables_with_alias = get_tables_with_alias(schema.schema, toks)
_, sql = parse_sql(toks, 0, tables_with_alias, schema)
return sql
def skip_semicolon(toks, start_idx):
idx = start_idx
while idx < len(toks) and toks[idx] == ";":
idx += 1
return idx
|
coder_reviewer_reranking-main
|
process_sql.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
################################
# val: number(float)/string(str)/sql(dict)
# col_unit: (agg_id, col_id, isDistinct(bool))
# val_unit: (unit_op, col_unit1, col_unit2)
# table_unit: (table_type, col_unit/sql)
# cond_unit: (not_op, op_id, val_unit, val1, val2)
# condition: [cond_unit1, 'and'/'or', cond_unit2, ...]
# sql {
# 'select': (isDistinct(bool), [(agg_id, val_unit), (agg_id, val_unit), ...])
# 'from': {'table_units': [table_unit1, table_unit2, ...], 'conds': condition}
# 'where': condition
# 'groupBy': [col_unit1, col_unit2, ...]
# 'orderBy': ('asc'/'desc', [val_unit1, val_unit2, ...])
# 'having': condition
# 'limit': None/limit value
# 'intersect': None/sql
# 'except': None/sql
# 'union': None/sql
# }
################################
from __future__ import print_function
import os
import json
import sqlite3
import signal
from contextlib import contextmanager
import argparse
from process_sql import get_schema, Schema, get_sql
import sys
from time import sleep
# Flag to disable value evaluation
DISABLE_VALUE = True
# Flag to disable distinct in select evaluation
DISABLE_DISTINCT = True
CLAUSE_KEYWORDS = (
"select",
"from",
"where",
"group",
"order",
"limit",
"intersect",
"union",
"except",
)
JOIN_KEYWORDS = ("join", "on", "as")
WHERE_OPS = (
"not",
"between",
"=",
">",
"<",
">=",
"<=",
"!=",
"in",
"like",
"is",
"exists",
)
UNIT_OPS = ("none", "-", "+", "*", "/")
AGG_OPS = ("none", "max", "min", "count", "sum", "avg")
TABLE_TYPE = {
"sql": "sql",
"table_unit": "table_unit",
}
COND_OPS = ("and", "or")
SQL_OPS = ("intersect", "union", "except")
ORDER_OPS = ("desc", "asc")
HARDNESS = {
"component1": ("where", "group", "order", "limit", "join", "or", "like"),
"component2": ("except", "union", "intersect"),
}
class TimeoutException(Exception):
pass
@contextmanager
def time_limit(seconds: float):
def signal_handler(signum, frame):
raise TimeoutException("Timed out!")
signal.setitimer(signal.ITIMER_REAL, seconds)
signal.signal(signal.SIGALRM, signal_handler)
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL, 0)
def condition_has_or(conds):
return "or" in conds[1::2]
def condition_has_like(conds):
return WHERE_OPS.index("like") in [cond_unit[1] for cond_unit in conds[::2]]
def condition_has_sql(conds):
for cond_unit in conds[::2]:
val1, val2 = cond_unit[3], cond_unit[4]
if val1 is not None and type(val1) is dict:
return True
if val2 is not None and type(val2) is dict:
return True
return False
def val_has_op(val_unit):
return val_unit[0] != UNIT_OPS.index("none")
def has_agg(unit):
return unit[0] != AGG_OPS.index("none")
def accuracy(count, total):
if count == total:
return 1
return 0
def recall(count, total):
if count == total:
return 1
return 0
def F1(acc, rec):
if (acc + rec) == 0:
return 0
return (2.0 * acc * rec) / (acc + rec)
def get_scores(count, pred_total, label_total):
if pred_total != label_total:
return 0, 0, 0
elif count == pred_total:
return 1, 1, 1
return 0, 0, 0
def eval_sel(pred, label):
pred_sel = pred["select"][1]
label_sel = label["select"][1]
label_wo_agg = [unit[1] for unit in label_sel]
pred_total = len(pred_sel)
label_total = len(label_sel)
cnt = 0
cnt_wo_agg = 0
for unit in pred_sel:
if unit in label_sel:
cnt += 1
label_sel.remove(unit)
if unit[1] in label_wo_agg:
cnt_wo_agg += 1
label_wo_agg.remove(unit[1])
return label_total, pred_total, cnt, cnt_wo_agg
def eval_where(pred, label):
pred_conds = [unit for unit in pred["where"][::2]]
label_conds = [unit for unit in label["where"][::2]]
label_wo_agg = [unit[2] for unit in label_conds]
pred_total = len(pred_conds)
label_total = len(label_conds)
cnt = 0
cnt_wo_agg = 0
for unit in pred_conds:
if unit in label_conds:
cnt += 1
label_conds.remove(unit)
if unit[2] in label_wo_agg:
cnt_wo_agg += 1
label_wo_agg.remove(unit[2])
return label_total, pred_total, cnt, cnt_wo_agg
def eval_group(pred, label):
pred_cols = [unit[1] for unit in pred["groupBy"]]
label_cols = [unit[1] for unit in label["groupBy"]]
pred_total = len(pred_cols)
label_total = len(label_cols)
cnt = 0
pred_cols = [pred.split(".")[1] if "." in pred else pred for pred in pred_cols]
label_cols = [
label.split(".")[1] if "." in label else label for label in label_cols
]
for col in pred_cols:
if col in label_cols:
cnt += 1
label_cols.remove(col)
return label_total, pred_total, cnt
def eval_having(pred, label):
pred_total = label_total = cnt = 0
if len(pred["groupBy"]) > 0:
pred_total = 1
if len(label["groupBy"]) > 0:
label_total = 1
pred_cols = [unit[1] for unit in pred["groupBy"]]
label_cols = [unit[1] for unit in label["groupBy"]]
if (
pred_total == label_total == 1
and pred_cols == label_cols
and pred["having"] == label["having"]
):
cnt = 1
return label_total, pred_total, cnt
def eval_order(pred, label):
pred_total = label_total = cnt = 0
if len(pred["orderBy"]) > 0:
pred_total = 1
if len(label["orderBy"]) > 0:
label_total = 1
if (
len(label["orderBy"]) > 0
and pred["orderBy"] == label["orderBy"]
and (
(pred["limit"] is None and label["limit"] is None)
or (pred["limit"] is not None and label["limit"] is not None)
)
):
cnt = 1
return label_total, pred_total, cnt
def eval_and_or(pred, label):
pred_ao = pred["where"][1::2]
label_ao = label["where"][1::2]
pred_ao = set(pred_ao)
label_ao = set(label_ao)
if pred_ao == label_ao:
return 1, 1, 1
return len(pred_ao), len(label_ao), 0
def get_nestedSQL(sql):
nested = []
for cond_unit in sql["from"]["conds"][::2] + sql["where"][::2] + sql["having"][::2]:
if type(cond_unit[3]) is dict:
nested.append(cond_unit[3])
if type(cond_unit[4]) is dict:
nested.append(cond_unit[4])
if sql["intersect"] is not None:
nested.append(sql["intersect"])
if sql["except"] is not None:
nested.append(sql["except"])
if sql["union"] is not None:
nested.append(sql["union"])
return nested
def eval_nested(pred, label):
label_total = 0
pred_total = 0
cnt = 0
if pred is not None:
pred_total += 1
if label is not None:
label_total += 1
if pred is not None and label is not None:
cnt += Evaluator().eval_exact_match(pred, label)
return label_total, pred_total, cnt
def eval_IUEN(pred, label):
lt1, pt1, cnt1 = eval_nested(pred["intersect"], label["intersect"])
lt2, pt2, cnt2 = eval_nested(pred["except"], label["except"])
lt3, pt3, cnt3 = eval_nested(pred["union"], label["union"])
label_total = lt1 + lt2 + lt3
pred_total = pt1 + pt2 + pt3
cnt = cnt1 + cnt2 + cnt3
return label_total, pred_total, cnt
def get_keywords(sql):
res = set()
if len(sql["where"]) > 0:
res.add("where")
if len(sql["groupBy"]) > 0:
res.add("group")
if len(sql["having"]) > 0:
res.add("having")
if len(sql["orderBy"]) > 0:
res.add(sql["orderBy"][0])
res.add("order")
if sql["limit"] is not None:
res.add("limit")
if sql["except"] is not None:
res.add("except")
if sql["union"] is not None:
res.add("union")
if sql["intersect"] is not None:
res.add("intersect")
# or keyword
ao = sql["from"]["conds"][1::2] + sql["where"][1::2] + sql["having"][1::2]
if len([token for token in ao if token == "or"]) > 0:
res.add("or")
cond_units = sql["from"]["conds"][::2] + sql["where"][::2] + sql["having"][::2]
# not keyword
if len([cond_unit for cond_unit in cond_units if cond_unit[0]]) > 0:
res.add("not")
# in keyword
if (
len(
[
cond_unit
for cond_unit in cond_units
if cond_unit[1] == WHERE_OPS.index("in")
]
)
> 0
):
res.add("in")
# like keyword
if (
len(
[
cond_unit
for cond_unit in cond_units
if cond_unit[1] == WHERE_OPS.index("like")
]
)
> 0
):
res.add("like")
return res
def eval_keywords(pred, label):
pred_keywords = get_keywords(pred)
label_keywords = get_keywords(label)
pred_total = len(pred_keywords)
label_total = len(label_keywords)
cnt = 0
for k in pred_keywords:
if k in label_keywords:
cnt += 1
return label_total, pred_total, cnt
def count_agg(units):
return len([unit for unit in units if has_agg(unit)])
def count_component1(sql):
count = 0
if len(sql["where"]) > 0:
count += 1
if len(sql["groupBy"]) > 0:
count += 1
if len(sql["orderBy"]) > 0:
count += 1
if sql["limit"] is not None:
count += 1
if len(sql["from"]["table_units"]) > 0: # JOIN
count += len(sql["from"]["table_units"]) - 1
ao = sql["from"]["conds"][1::2] + sql["where"][1::2] + sql["having"][1::2]
count += len([token for token in ao if token == "or"])
cond_units = sql["from"]["conds"][::2] + sql["where"][::2] + sql["having"][::2]
count += len(
[
cond_unit
for cond_unit in cond_units
if cond_unit[1] == WHERE_OPS.index("like")
]
)
return count
def count_component2(sql):
nested = get_nestedSQL(sql)
return len(nested)
def count_others(sql):
count = 0
# number of aggregation
agg_count = count_agg(sql["select"][1])
agg_count += count_agg(sql["where"][::2])
agg_count += count_agg(sql["groupBy"])
if len(sql["orderBy"]) > 0:
agg_count += count_agg(
[unit[1] for unit in sql["orderBy"][1] if unit[1]]
+ [unit[2] for unit in sql["orderBy"][1] if unit[2]]
)
agg_count += count_agg(sql["having"])
if agg_count > 1:
count += 1
# number of select columns
if len(sql["select"][1]) > 1:
count += 1
# number of where conditions
if len(sql["where"]) > 1:
count += 1
# number of group by clauses
if len(sql["groupBy"]) > 1:
count += 1
return count
class Evaluator:
"""A simple evaluator"""
def __init__(self):
self.partial_scores = None
def eval_hardness(self, sql):
count_comp1_ = count_component1(sql)
count_comp2_ = count_component2(sql)
count_others_ = count_others(sql)
if count_comp1_ <= 1 and count_others_ == 0 and count_comp2_ == 0:
return "easy"
elif (count_others_ <= 2 and count_comp1_ <= 1 and count_comp2_ == 0) or (
count_comp1_ <= 2 and count_others_ < 2 and count_comp2_ == 0
):
return "medium"
elif (
(count_others_ > 2 and count_comp1_ <= 2 and count_comp2_ == 0)
or (2 < count_comp1_ <= 3 and count_others_ <= 2 and count_comp2_ == 0)
or (count_comp1_ <= 1 and count_others_ == 0 and count_comp2_ <= 1)
):
return "hard"
else:
return "extra"
def eval_exact_match(self, pred, label):
partial_scores = self.eval_partial_match(pred, label)
self.partial_scores = partial_scores
for _, score in partial_scores.items():
if score["f1"] != 1:
return 0
if len(label["from"]["table_units"]) > 0:
label_tables = sorted(label["from"]["table_units"])
pred_tables = sorted(pred["from"]["table_units"])
return label_tables == pred_tables
return 1
def eval_partial_match(self, pred, label):
res = {}
label_total, pred_total, cnt, cnt_wo_agg = eval_sel(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res["select"] = {
"acc": acc,
"rec": rec,
"f1": f1,
"label_total": label_total,
"pred_total": pred_total,
}
acc, rec, f1 = get_scores(cnt_wo_agg, pred_total, label_total)
res["select(no AGG)"] = {
"acc": acc,
"rec": rec,
"f1": f1,
"label_total": label_total,
"pred_total": pred_total,
}
label_total, pred_total, cnt, cnt_wo_agg = eval_where(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res["where"] = {
"acc": acc,
"rec": rec,
"f1": f1,
"label_total": label_total,
"pred_total": pred_total,
}
acc, rec, f1 = get_scores(cnt_wo_agg, pred_total, label_total)
res["where(no OP)"] = {
"acc": acc,
"rec": rec,
"f1": f1,
"label_total": label_total,
"pred_total": pred_total,
}
label_total, pred_total, cnt = eval_group(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res["group(no Having)"] = {
"acc": acc,
"rec": rec,
"f1": f1,
"label_total": label_total,
"pred_total": pred_total,
}
label_total, pred_total, cnt = eval_having(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res["group"] = {
"acc": acc,
"rec": rec,
"f1": f1,
"label_total": label_total,
"pred_total": pred_total,
}
label_total, pred_total, cnt = eval_order(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res["order"] = {
"acc": acc,
"rec": rec,
"f1": f1,
"label_total": label_total,
"pred_total": pred_total,
}
label_total, pred_total, cnt = eval_and_or(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res["and/or"] = {
"acc": acc,
"rec": rec,
"f1": f1,
"label_total": label_total,
"pred_total": pred_total,
}
label_total, pred_total, cnt = eval_IUEN(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res["IUEN"] = {
"acc": acc,
"rec": rec,
"f1": f1,
"label_total": label_total,
"pred_total": pred_total,
}
label_total, pred_total, cnt = eval_keywords(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res["keywords"] = {
"acc": acc,
"rec": rec,
"f1": f1,
"label_total": label_total,
"pred_total": pred_total,
}
return res
def isValidSQL(sql, db):
conn = sqlite3.connect(db)
cursor = conn.cursor()
try:
cursor.execute(sql)
except:
return False
return True
def print_scores(scores, etype):
levels = ["easy", "medium", "hard", "extra", "all"]
partial_types = [
"select",
"select(no AGG)",
"where",
"where(no OP)",
"group(no Having)",
"group",
"order",
"and/or",
"IUEN",
"keywords",
]
print("{:20} {:20} {:20} {:20} {:20} {:20}".format("", *levels))
counts = [scores[level]["count"] for level in levels]
print("{:20} {:<20d} {:<20d} {:<20d} {:<20d} {:<20d}".format("count", *counts))
if etype in ["all", "exec"]:
print("===================== EXECUTION ACCURACY =====================")
this_scores = [scores[level]["exec"] for level in levels]
print(
"{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format(
"execution", *this_scores
)
)
if etype in ["all", "match"]:
print("\n====================== EXACT MATCHING ACCURACY =====================")
exact_scores = [scores[level]["exact"] for level in levels]
print(
"{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format(
"exact match", *exact_scores
)
)
print("\n---------------------PARTIAL MATCHING ACCURACY----------------------")
for type_ in partial_types:
this_scores = [scores[level]["partial"][type_]["acc"] for level in levels]
print(
"{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format(
type_, *this_scores
)
)
print("---------------------- PARTIAL MATCHING RECALL ----------------------")
for type_ in partial_types:
this_scores = [scores[level]["partial"][type_]["rec"] for level in levels]
print(
"{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format(
type_, *this_scores
)
)
print("---------------------- PARTIAL MATCHING F1 --------------------------")
for type_ in partial_types:
this_scores = [scores[level]["partial"][type_]["f1"] for level in levels]
print(
"{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format(
type_, *this_scores
)
)
def evaluate(gold, predict, db_dir, etype, kmaps):
with open(gold) as f:
glist = [l.strip().split("\t") for l in f.readlines() if len(l.strip()) > 0]
with open(predict) as f:
plist = [l.strip().split("\t") for l in f.readlines() if len(l.strip()) > 0]
# plist = [("select max(Share),min(Share) from performance where Type != 'terminal'", "orchestra")]
# glist = [("SELECT max(SHARE) , min(SHARE) FROM performance WHERE TYPE != 'Live final'", "orchestra")]
evaluator = Evaluator()
levels = ["easy", "medium", "hard", "extra", "all"]
partial_types = [
"select",
"select(no AGG)",
"where",
"where(no OP)",
"group(no Having)",
"group",
"order",
"and/or",
"IUEN",
"keywords",
]
entries = []
scores = {}
for level in levels:
scores[level] = {"count": 0, "partial": {}, "exact": 0.0}
scores[level]["exec"] = 0
for type_ in partial_types:
scores[level]["partial"][type_] = {
"acc": 0.0,
"rec": 0.0,
"f1": 0.0,
"acc_count": 0,
"rec_count": 0,
}
eval_err_num = 0
for p, g in zip(plist, glist):
p_str = p[0]
g_str, db = g
db_name = db
db = os.path.join(db_dir, db, db + ".sqlite")
schema = Schema(get_schema(db))
g_sql = get_sql(schema, g_str)
hardness = evaluator.eval_hardness(g_sql)
scores[hardness]["count"] += 1
scores["all"]["count"] += 1
try:
p_sql = get_sql(schema, p_str)
except:
# If p_sql is not valid, then we will use an empty sql to evaluate with the correct sql
p_sql = {
"except": None,
"from": {"conds": [], "table_units": []},
"groupBy": [],
"having": [],
"intersect": None,
"limit": None,
"orderBy": [],
"select": [False, []],
"union": None,
"where": [],
}
eval_err_num += 1
print("eval_err_num:{}".format(eval_err_num))
# rebuild sql for value evaluation
kmap = kmaps[db_name]
g_valid_col_units = build_valid_col_units(g_sql["from"]["table_units"], schema)
g_sql = rebuild_sql_val(g_sql)
g_sql = rebuild_sql_col(g_valid_col_units, g_sql, kmap)
p_valid_col_units = build_valid_col_units(p_sql["from"]["table_units"], schema)
p_sql = rebuild_sql_val(p_sql)
p_sql = rebuild_sql_col(p_valid_col_units, p_sql, kmap)
if etype in ["all", "exec"]:
exec_score = eval_exec_match(db, p_str, g_str, p_sql, g_sql)
if exec_score:
scores[hardness]["exec"] += 1.0
scores["all"]["exec"] += 1.0
if etype in ["all", "match"]:
exact_score = evaluator.eval_exact_match(p_sql, g_sql)
partial_scores = evaluator.partial_scores
if exact_score == 0:
print("{} pred: {}".format(hardness, p_str))
print("{} gold: {}".format(hardness, g_str))
print("")
scores[hardness]["exact"] += exact_score
scores["all"]["exact"] += exact_score
for type_ in partial_types:
if partial_scores[type_]["pred_total"] > 0:
scores[hardness]["partial"][type_]["acc"] += partial_scores[type_][
"acc"
]
scores[hardness]["partial"][type_]["acc_count"] += 1
if partial_scores[type_]["label_total"] > 0:
scores[hardness]["partial"][type_]["rec"] += partial_scores[type_][
"rec"
]
scores[hardness]["partial"][type_]["rec_count"] += 1
scores[hardness]["partial"][type_]["f1"] += partial_scores[type_]["f1"]
if partial_scores[type_]["pred_total"] > 0:
scores["all"]["partial"][type_]["acc"] += partial_scores[type_][
"acc"
]
scores["all"]["partial"][type_]["acc_count"] += 1
if partial_scores[type_]["label_total"] > 0:
scores["all"]["partial"][type_]["rec"] += partial_scores[type_][
"rec"
]
scores["all"]["partial"][type_]["rec_count"] += 1
scores["all"]["partial"][type_]["f1"] += partial_scores[type_]["f1"]
entries.append(
{
"predictSQL": p_str,
"goldSQL": g_str,
"hardness": hardness,
"exact": exact_score,
"partial": partial_scores,
}
)
for level in levels:
if scores[level]["count"] == 0:
continue
if etype in ["all", "exec"]:
scores[level]["exec"] /= scores[level]["count"]
if etype in ["all", "match"]:
scores[level]["exact"] /= scores[level]["count"]
for type_ in partial_types:
if scores[level]["partial"][type_]["acc_count"] == 0:
scores[level]["partial"][type_]["acc"] = 0
else:
scores[level]["partial"][type_]["acc"] = (
scores[level]["partial"][type_]["acc"]
/ scores[level]["partial"][type_]["acc_count"]
* 1.0
)
if scores[level]["partial"][type_]["rec_count"] == 0:
scores[level]["partial"][type_]["rec"] = 0
else:
scores[level]["partial"][type_]["rec"] = (
scores[level]["partial"][type_]["rec"]
/ scores[level]["partial"][type_]["rec_count"]
* 1.0
)
if (
scores[level]["partial"][type_]["acc"] == 0
and scores[level]["partial"][type_]["rec"] == 0
):
scores[level]["partial"][type_]["f1"] = 1
else:
scores[level]["partial"][type_]["f1"] = (
2.0
* scores[level]["partial"][type_]["acc"]
* scores[level]["partial"][type_]["rec"]
/ (
scores[level]["partial"][type_]["rec"]
+ scores[level]["partial"][type_]["acc"]
)
)
print_scores(scores, etype)
def eval_exec_match(db, p_str, g_str, pred, gold):
"""
return 1 if the values between prediction and gold are matching
in the corresponding index. Currently not support multiple col_unit(pairs).
"""
conn = sqlite3.connect(db)
cursor = conn.cursor()
try:
cursor.execute(p_str)
p_res = cursor.fetchall()
except:
return False
cursor.execute(g_str)
q_res = cursor.fetchall()
def res_map(res, val_units):
rmap = {}
for idx, val_unit in enumerate(val_units):
key = (
tuple(val_unit[1])
if not val_unit[2]
else (val_unit[0], tuple(val_unit[1]), tuple(val_unit[2]))
)
rmap[key] = [r[idx] for r in res]
return rmap
p_val_units = [unit[1] for unit in pred["select"][1]]
q_val_units = [unit[1] for unit in gold["select"][1]]
return res_map(p_res, p_val_units) == res_map(q_res, q_val_units)
from multiprocessing import Manager, Process
def execute(db, p_str, pred, timeout):
conn = sqlite3.connect(f"file:{db}?mode=ro", uri=True, timeout=30)
cursor = conn.cursor()
with Manager() as manager:
result = manager.list()
def unsafe_execute(result):
# sys.stdout = open(os.devnull, "w")
sys.stderr = open(os.devnull, "w")
try:
cursor.execute(p_str)
p_res = cursor.fetchall()
except sqlite3.OperationalError as e:
if "locked" in str(e):
print(e)
raise ValueError("Invalid")
result.append(p_res)
p = Process(target=unsafe_execute, args=(result,))
p.start()
p.join(timeout=timeout)
if p.exitcode != 0:
return False, None
else:
try:
p_res = result[0]
def res_map(res, val_units):
rmap = {}
for idx, val_unit in enumerate(val_units):
key = (
tuple(val_unit[1])
if not val_unit[2]
else (val_unit[0], tuple(val_unit[1]), tuple(val_unit[2]))
)
rmap[key] = [r[idx] for r in res]
return rmap
p_val_units = [unit[1] for unit in pred["select"][1]]
return True, res_map(p_res, p_val_units)
except:
return False, None
# Rebuild SQL functions for value evaluation
def rebuild_cond_unit_val(cond_unit):
if cond_unit is None or not DISABLE_VALUE:
return cond_unit
not_op, op_id, val_unit, val1, val2 = cond_unit
if type(val1) is not dict:
val1 = None
else:
val1 = rebuild_sql_val(val1)
if type(val2) is not dict:
val2 = None
else:
val2 = rebuild_sql_val(val2)
return not_op, op_id, val_unit, val1, val2
def rebuild_condition_val(condition):
if condition is None or not DISABLE_VALUE:
return condition
res = []
for idx, it in enumerate(condition):
if idx % 2 == 0:
res.append(rebuild_cond_unit_val(it))
else:
res.append(it)
return res
def rebuild_sql_val(sql):
if sql is None or not DISABLE_VALUE:
return sql
sql["from"]["conds"] = rebuild_condition_val(sql["from"]["conds"])
sql["having"] = rebuild_condition_val(sql["having"])
sql["where"] = rebuild_condition_val(sql["where"])
sql["intersect"] = rebuild_sql_val(sql["intersect"])
sql["except"] = rebuild_sql_val(sql["except"])
sql["union"] = rebuild_sql_val(sql["union"])
return sql
# Rebuild SQL functions for foreign key evaluation
def build_valid_col_units(table_units, schema):
col_ids = [
table_unit[1]
for table_unit in table_units
if table_unit[0] == TABLE_TYPE["table_unit"]
]
prefixs = [col_id[:-2] for col_id in col_ids]
valid_col_units = []
for value in schema.idMap.values():
if "." in value and value[: value.index(".")] in prefixs:
valid_col_units.append(value)
return valid_col_units
def rebuild_col_unit_col(valid_col_units, col_unit, kmap):
if col_unit is None:
return col_unit
agg_id, col_id, distinct = col_unit
if col_id in kmap and col_id in valid_col_units:
col_id = kmap[col_id]
if DISABLE_DISTINCT:
distinct = None
return agg_id, col_id, distinct
def rebuild_val_unit_col(valid_col_units, val_unit, kmap):
if val_unit is None:
return val_unit
unit_op, col_unit1, col_unit2 = val_unit
col_unit1 = rebuild_col_unit_col(valid_col_units, col_unit1, kmap)
col_unit2 = rebuild_col_unit_col(valid_col_units, col_unit2, kmap)
return unit_op, col_unit1, col_unit2
def rebuild_table_unit_col(valid_col_units, table_unit, kmap):
if table_unit is None:
return table_unit
table_type, col_unit_or_sql = table_unit
if isinstance(col_unit_or_sql, tuple):
col_unit_or_sql = rebuild_col_unit_col(valid_col_units, col_unit_or_sql, kmap)
return table_type, col_unit_or_sql
def rebuild_cond_unit_col(valid_col_units, cond_unit, kmap):
if cond_unit is None:
return cond_unit
not_op, op_id, val_unit, val1, val2 = cond_unit
val_unit = rebuild_val_unit_col(valid_col_units, val_unit, kmap)
return not_op, op_id, val_unit, val1, val2
def rebuild_condition_col(valid_col_units, condition, kmap):
for idx in range(len(condition)):
if idx % 2 == 0:
condition[idx] = rebuild_cond_unit_col(
valid_col_units, condition[idx], kmap
)
return condition
def rebuild_select_col(valid_col_units, sel, kmap):
if sel is None:
return sel
distinct, _list = sel
new_list = []
for it in _list:
agg_id, val_unit = it
new_list.append((agg_id, rebuild_val_unit_col(valid_col_units, val_unit, kmap)))
if DISABLE_DISTINCT:
distinct = None
return distinct, new_list
def rebuild_from_col(valid_col_units, from_, kmap):
if from_ is None:
return from_
from_["table_units"] = [
rebuild_table_unit_col(valid_col_units, table_unit, kmap)
for table_unit in from_["table_units"]
]
from_["conds"] = rebuild_condition_col(valid_col_units, from_["conds"], kmap)
return from_
def rebuild_group_by_col(valid_col_units, group_by, kmap):
if group_by is None:
return group_by
return [
rebuild_col_unit_col(valid_col_units, col_unit, kmap) for col_unit in group_by
]
def rebuild_order_by_col(valid_col_units, order_by, kmap):
if order_by is None or len(order_by) == 0:
return order_by
direction, val_units = order_by
new_val_units = [
rebuild_val_unit_col(valid_col_units, val_unit, kmap) for val_unit in val_units
]
return direction, new_val_units
def rebuild_sql_col(valid_col_units, sql, kmap):
if sql is None:
return sql
sql["select"] = rebuild_select_col(valid_col_units, sql["select"], kmap)
sql["from"] = rebuild_from_col(valid_col_units, sql["from"], kmap)
sql["where"] = rebuild_condition_col(valid_col_units, sql["where"], kmap)
sql["groupBy"] = rebuild_group_by_col(valid_col_units, sql["groupBy"], kmap)
sql["orderBy"] = rebuild_order_by_col(valid_col_units, sql["orderBy"], kmap)
sql["having"] = rebuild_condition_col(valid_col_units, sql["having"], kmap)
sql["intersect"] = rebuild_sql_col(valid_col_units, sql["intersect"], kmap)
sql["except"] = rebuild_sql_col(valid_col_units, sql["except"], kmap)
sql["union"] = rebuild_sql_col(valid_col_units, sql["union"], kmap)
return sql
def build_foreign_key_map(entry):
cols_orig = entry["column_names_original"]
tables_orig = entry["table_names_original"]
# rebuild cols corresponding to idmap in Schema
cols = []
for col_orig in cols_orig:
if col_orig[0] >= 0:
t = tables_orig[col_orig[0]]
c = col_orig[1]
cols.append("__" + t.lower() + "." + c.lower() + "__")
else:
cols.append("__all__")
def keyset_in_list(k1, k2, k_list):
for k_set in k_list:
if k1 in k_set or k2 in k_set:
return k_set
new_k_set = set()
k_list.append(new_k_set)
return new_k_set
foreign_key_list = []
foreign_keys = entry["foreign_keys"]
for fkey in foreign_keys:
key1, key2 = fkey
key_set = keyset_in_list(key1, key2, foreign_key_list)
key_set.add(key1)
key_set.add(key2)
foreign_key_map = {}
for key_set in foreign_key_list:
sorted_list = sorted(list(key_set))
midx = sorted_list[0]
for idx in sorted_list:
foreign_key_map[cols[idx]] = cols[midx]
return foreign_key_map
def build_foreign_key_map_from_json(table):
with open(table) as f:
data = json.load(f)
tables = {}
for entry in data:
tables[entry["db_id"]] = build_foreign_key_map(entry)
return tables
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--gold", dest="gold", type=str)
parser.add_argument("--pred", dest="pred", type=str)
parser.add_argument("--db", dest="db", type=str)
parser.add_argument("--table", dest="table", type=str)
parser.add_argument("--etype", dest="etype", type=str)
args = parser.parse_args()
gold = args.gold
pred = args.pred
db_dir = args.db
table = args.table
etype = args.etype
assert etype in ["all", "exec", "match"], "Unknown evaluation method"
kmaps = build_foreign_key_map_from_json(table)
evaluate(gold, pred, db_dir, etype, kmaps)
|
coder_reviewer_reranking-main
|
utils_sql.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
from time import sleep
import os
import random
import openai
import re
import json
def safe_codex_call(
args, api_text, temperature=None, stop=None, echo=False, max_tokens=256, api_i=0
):
temperature = temperature if temperature else args.temperature
while True:
try:
if args.model == "codex002":
openai.organization = os.getenv(f"OPENAI_ORG{api_i+1}")
else:
openai.organization = os.getenv("OPENAI_ORG1")
codex_response = codex_greedy(
api_text,
temperature=temperature,
codex_config=args.model,
stop=stop,
echo=echo,
max_tokens=max_tokens,
)
break
except openai.error.InvalidRequestError as e:
codex_response = None
if isinstance(api_text, list):
api_text = [t.replace("\n", "") for t in api_text]
else:
api_text = api_text.replace("\n", "")
print("Invalid Request: Removing newlines")
except openai.error.RateLimitError as e:
print(type(e), f"API {api_i}:", e, end="\r")
sleep(30)
api_i = (api_i + 1) % 3
except Exception as e:
print(type(e), e)
sleep(10)
if codex_response is None:
codex_text = ""
else:
codex_text = "".join(codex_response["choices"][0]["logprobs"]["tokens"])
return codex_response, codex_text
def codex_greedy(
prompt, temperature=0.3, codex_config="codex", stop=None, echo=False, max_tokens=256
):
if stop is None:
stop = ["#SOLUTION END", "# SOLUTION END", "SOLUTION END"]
if codex_config == "codex001":
codex_code = "code-davinci-001"
elif codex_config == "codex002":
codex_code = "code-davinci-002"
elif codex_config == "codex-cushman":
codex_code = "code-cushman-001"
else:
raise ValueError
response = openai.Completion.create(
engine=codex_code,
prompt=prompt,
temperature=temperature,
stop=stop,
max_tokens=max_tokens,
top_p=0.95,
logprobs=1,
frequency_penalty=0,
presence_penalty=0,
echo=echo,
)
return response
def write_jsonl(data_list, file_path):
with open(file_path, "w") as f:
for d in data_list:
f.write(json.dumps(d) + "\n")
def parse_prompt(prompt, dataset="mbpp"):
prompt_data = []
fewshot_examples = [
p.strip() + "</code>" for p in prompt.split("</code>") if len(p) > 1
]
for example in fewshot_examples:
example_data = dict()
if dataset in ["mbpp", "spider"]:
all_fields = ["info", "text", "code"]
elif dataset == "nl2bash":
all_fields = ["text", "code"]
for field in all_fields:
field_start = example.index(f"<{field}>")
field_end = example.index(f"</{field}>")
example_data[field] = example[field_start : field_end + len(f"</{field}>")]
prompt_data.append(example_data)
return prompt_data
def make_new_context(prompt_parse, dataset="mbpp"):
without_ref = ""
with_ref = ""
if dataset == "mbpp":
full_prompt_fields = ["code", "info", "text"]
elif dataset == "spider":
full_prompt_fields = ["info", "code", "text"]
else:
full_prompt_fields = ["code", "text"]
if dataset == "mbpp" or dataset == "nl2bash":
partial_prompt_fields = ["code"]
elif dataset == "spider":
partial_prompt_fields = ["info", "code"]
for i, example in enumerate(prompt_parse):
for field in full_prompt_fields:
with_ref += example[field] + "\n"
if i < len(prompt_parse) - 1:
for field in full_prompt_fields:
without_ref += example[field] + "\n"
else:
for field in partial_prompt_fields:
without_ref += example[field] + "\n"
return with_ref.strip(), without_ref.strip()
from contextlib import contextmanager
import signal
class TimeoutException(Exception):
pass
@contextmanager
def time_limit(seconds: float):
def signal_handler(signum, frame):
raise TimeoutException("Timed out!")
signal.setitimer(signal.ITIMER_REAL, seconds)
signal.signal(signal.SIGALRM, signal_handler)
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL, 0)
|
coder_reviewer_reranking-main
|
utils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
import keyword, sys
from pyminifier import analyze
from pyminifier.minification import remove_comments_and_docstrings, remove_blank_lines
import re
RESERVED_WORDS = keyword.kwlist + analyze.builtins
def clean_comment(code):
code = remove_comments_and_docstrings(code)
code = remove_blank_lines(code)
return code
def remove_print(code):
code = re.sub("print(.+)", "print('')", code)
code = re.sub("Error(.+)", "Error('')", code)
code = re.sub("Exception(.+)", "Exception('')", code)
code = re.sub("assert (.+), +['\"].+['\"]", "assert \\1", code)
return code
|
coder_reviewer_reranking-main
|
pyminifier_canonicalize.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
import os
import tempfile
from datasets import load_metric
from tqdm import tqdm
import pickle as pkl
from data import MBPPGoogleDataset
from execution import Command
import sys
from utils import time_limit
""" dataset keys: src, trg_prediction, reference """
def evaluate_charbleu(dataset):
bleu = load_metric("bleu")
predictions = [[ch for ch in item["trg_prediction"]] for item in dataset]
references = [[[ch for ch in item["reference"]]] for item in dataset]
return bleu.compute(predictions=predictions, references=references)
""" dataset keys: src, trg_prediction, reference (only trg_prediction useful) """
def evaluate_spider_with_cached_results(selected):
all_pred_results = [item["execution_result"] for item in selected]
all_gold_results = pkl.load(
open(
"./dataset/spider/cached_gold_results.pkl",
"rb",
)
)
total_correct = 0
for p_res, g_res in tqdm(
zip(all_pred_results, all_gold_results),
total=len(all_gold_results),
):
total_correct += int(p_res[1] == g_res)
return total_correct / len(all_gold_results)
def evaluate_one_mbpp(args, tempdir, dataset, timeout):
i, item = args
if "execution_result_full_pass" in dataset[i]:
return int(
all(
isinstance(x[1], bool) and x[1] == True
for x in dataset[i]["execution_result_full_pass"]
)
)
else:
test_cases = item["test_list"]
test_setups = item["test_setup_code"]
code = dataset[i]["trg_prediction"]
# write code to file
with open(f"{tempdir.name}/code-{i}.py", "w") as fout:
print(code, file=fout)
print(test_setups, file=fout)
for case in test_cases:
print(case, file=fout)
fout.close()
command = Command(f"python {tempdir.name}/code-{i}.py >/dev/null 2>&1")
execution_result = command.run(timeout=timeout) == 0
return execution_result
from functools import partial
from multiprocessing import Pool
""" dataset keys: src, trg_prediction, reference (only trg_prediction useful) """
def evaluate_google_mbpp(
dataset,
reference_path,
split="test",
timeout=10,
return_details=False,
num_procs=1,
verbose=False,
):
references = MBPPGoogleDataset(reference_path)
assert len(dataset) == len(references.raw_data[split])
tempdir = tempfile.TemporaryDirectory()
passed_information = list()
partial_evalutate_one = partial(
evaluate_one_mbpp, tempdir=tempdir, dataset=dataset, timeout=timeout
)
if num_procs > 1:
with Pool(processes=num_procs) as pool:
for result_json in tqdm(
pool.imap(
partial_evalutate_one, list(enumerate(references.raw_data[split]))
),
total=len(references.raw_data[split]),
leave=False,
disable=not verbose,
):
passed_information.append(result_json)
else:
for args in tqdm(
list(enumerate(references.raw_data[split])), disable=not verbose
):
passed_information.append(partial_evalutate_one(args))
tempdir.cleanup()
if return_details:
return passed_information
else:
return sum(passed_information) / len(passed_information)
def evaluate_humaneval(dataset):
all_passed = [d["execution_result_full_pass"] for d in dataset]
return sum(all_passed) / len(all_passed)
|
coder_reviewer_reranking-main
|
evaluate.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
import bashlex
import collections
import json
import pickle
import numpy as np
import os
import random
from glob import glob
from nltk.translate.bleu_score import sentence_bleu
from evaluate import (
evaluate_charbleu,
evaluate_google_mbpp,
evaluate_spider_with_cached_results,
evaluate_humaneval,
)
from tqdm import tqdm, trange
from pyminifier_canonicalize import clean_comment
import torch
from argparse import ArgumentParser
from pathlib import Path
from functools import partial
from multiprocessing import Pool
from datasets import load_metric
class MultiSampleSelector(object):
def __init__(
self,
paths,
split="dev",
tag="",
model="",
dataset="",
verbose=False,
no_rejection=False,
):
self.paths = (
list(sorted(glob(paths, recursive=True)))
if isinstance(paths, str)
else list(sorted(paths))
)
self.split = split
self.data = collections.defaultdict(list)
self.args = collections.defaultdict(list)
self.tag = tag
self.model = model
self.dataset = dataset
self.verbose = verbose
for i, path in tqdm(
enumerate(self.paths),
total=len(self.paths),
desc="loading jsons",
disable=not self.verbose,
):
self.args[i] = pickle.load(open(f"{self.paths[0]}/configs.pkl", "rb"))
idx = 0
if self.tag != "":
file_path = f"{path}/{split}-{idx}-{tag}.jsonl"
else:
file_path = f"{path}/{split}-{idx}.jsonl"
while os.path.exists(file_path):
self.data[i, idx].extend([json.loads(x) for x in open(file_path)])
idx += 1
if self.tag != "":
file_path = f"{path}/{split}-{idx}-{tag}.jsonl"
else:
file_path = f"{path}/{split}-{idx}.jsonl"
print(f"{len(self.data)} cached samples")
for path_id, sample_id in tqdm(
self.data, desc="loading logprobs", disable=not self.verbose
):
if (
self.paths[path_id].find("nl2bash") != -1
): # NL2bash data, exec simulation
if self.tag != "":
file_name = f"{path}/{split}-{sample_id}-{tag}"
else:
file_name = f"{path}/{split}-{sample_id}"
exec_results = pickle.load(open(f"{file_name}.exec.pkl", "rb"))
simulate_exec_results = pickle.load(
open(f"{file_name}.exec.simulate.pkl", "rb")
)
splitted_exec_results = pickle.load(
open(f"{file_name}.exec.splitted.pkl", "rb")
)
char_bleus = pickle.load(open(f"{file_name}.exec.bleu.pkl", "rb"))
for item_i, item in enumerate(self.data[path_id, sample_id]):
if no_rejection:
item["not_degenerate"] = True
else:
# implementing degenerate solution rejection
if self.dataset in ["codet_humaneval", "mbpp_sanitized"]:
item["not_degenerate"] = filter_empty(
item, remove_function_header=False
) and filter_repeat(item["trg_prediction"])
elif self.dataset in ["mbpp"]:
item["not_degenerate"] = filter_empty(
item, remove_function_header=True
)
elif self.dataset in ["spider", "nl2bash"]:
item["not_degenerate"] = len(
item["trg_prediction"]
) != 0 and filter_repeat(item["trg_prediction"])
else:
raise ValueError("Invalid Dataset.")
avg_logprob, sum_logprob = self.extract_logprob_stats(item, path_id)
item["avg_logprob"] = avg_logprob
item["sum_logprob"] = sum_logprob
reverse_logprob = self.extract_reverse_logprob(item, path_id)
(
item["sum_reverse_logprob"],
item["avg_reverse_logprob"],
) = reverse_logprob
if (
self.paths[path_id].find("nl2bash") != -1
): # NL2bash data, exec simulation
item["executable"] = exec_results[item_i]
item["trg_prediction_splitted"] = splitted_exec_results[item_i]
item["execution_result_simulated"] = simulate_exec_results[item_i]
item["charbleu"] = char_bleus[item_i]
def extract_reverse_logprob(self, item, path_id):
if "prompt_reverse_logprobs" not in item:
return 0, 0
logprobs = item["prompt_reverse_logprobs"]
return np.sum(logprobs), np.mean(logprobs)
def extract_logprob_stats(self, item, path_id):
current_seq = ""
if "codex" in self.model:
extracted_position = None
for i, _ in enumerate(item["tokens"]):
current_seq += item["tokens"][i]
end_template = self.args[path_id].end_template
if isinstance(end_template, list):
end_template = ""
if (
current_seq.find(item["trg_prediction"]) != -1
and current_seq.find(end_template) != -1
):
extracted_position = i + 1
break
logprobs = (
item["logprobs"][:extracted_position]
if extracted_position is not None
else item["logprobs"]
)
logprobs = list(
filter(lambda x: x < 0, logprobs)
) # handle potential codex bug on positive log probability
else:
logprobs = item["logprobs"]
return np.mean(logprobs), np.sum(logprobs)
def select(
self, ids=None, key_extractor=lambda x: x["avg_logprob"], return_keys=False
):
if ids is None:
ids = self.data.keys()
ids = list(sorted(ids))
n_examples = len(self.data[ids[0]])
selected_examples = list()
sample_keys = collections.defaultdict(list)
for i in range(n_examples):
max_key = None
selected_item = None
for idx in ids:
item = self.data[idx][i]
key = key_extractor(item)
sample_keys[idx].append(key)
if max_key is None or key > max_key:
max_key = key
selected_item = item
assert selected_item is not None
selected_examples.append(selected_item)
if return_keys:
return selected_examples, sample_keys
else:
return selected_examples
class ExecutionBasedMultiSampleSelector(MultiSampleSelector):
def __init__(
self,
paths,
split="dev",
execution_type=None,
tag="",
model="",
verbose=False,
dataset="",
no_rejection=False,
):
super().__init__(
paths,
split=split,
tag=tag,
model=model,
verbose=verbose,
dataset=dataset,
no_rejection=no_rejection,
)
self.execution_type = execution_type
load_execution(self.data, self.paths, split, self.tag)
class IntraMultiSampleSelector(MultiSampleSelector):
def __init__(
self,
paths,
split="dev",
tag="",
model="",
verbose=False,
dataset="",
no_rejection=False,
):
super().__init__(
paths,
split=split,
tag=tag,
model=model,
verbose=verbose,
dataset=dataset,
no_rejection=no_rejection,
)
def select(
self,
ids=None,
key_extractor=None,
second_key_extractor=None,
return_keys=False,
quantile_threshold=None,
):
if ids is None:
ids = self.data.keys()
elif isinstance(ids, int):
ids = [
(i, j) for i in set(x[0] for x in self.data.keys()) for j in range(ids)
]
ids = list(sorted(ids))
id_set = set(ids)
sample_keys = collections.defaultdict(list)
# print(f'Selecting Samples from IDs: {ids}')
n_examples = len(self.data[ids[0]])
selected_examples = list()
for i in range(n_examples):
max_key = None
selected_item = None
if quantile_threshold is not None:
filtered_ids = []
all_second_key = []
for idx in ids:
selected_item = None
item = self.data[idx][i]
all_second_key.append(
second_key_extractor(item)
if second_key_extractor is not None
else 0
)
threshold = np.quantile(all_second_key, quantile_threshold)
for idx_i, idx in enumerate(ids):
if all_second_key[idx_i] >= threshold:
filtered_ids.append(idx)
else:
filtered_ids = ids
for idx in filtered_ids:
item = self.data[idx][i]
first_keys = list()
for grndtruth_idx in filtered_ids:
grndtruth_item = self.data[grndtruth_idx][i]
key = key_extractor(item, grndtruth_item)
first_keys.append(key)
first_key = sum(first_keys)
second_key = (
second_key_extractor(item)
if second_key_extractor is not None
else 0
)
current_key = (first_key, second_key)
item["mbr_key"] = current_key
sample_keys[idx].append(current_key)
if max_key is None or current_key > max_key:
max_key = current_key
selected_item = item
assert selected_item is not None
selected_examples.append(selected_item)
if return_keys:
return selected_examples, sample_keys
else:
return selected_examples
class ExecutionBasedIntraMultiSampleSelector(IntraMultiSampleSelector):
def __init__(
self,
paths,
split="dev",
execution_type=None,
tag="",
model="",
verbose=False,
dataset="",
no_rejection=False,
):
super().__init__(
paths,
split=split,
tag=tag,
model=model,
verbose=verbose,
dataset=dataset,
no_rejection=no_rejection,
)
self.execution_type = execution_type
load_execution(self.data, self.paths, split, self.tag)
def filter_empty(x, remove_function_header=False):
code = x["trg_prediction"]
if remove_function_header:
code = "\n".join(
[l for l in code.split("\n") if not l.strip().startswith("def")]
)
try:
code = clean_comment(code)
except:
code = ""
return code.strip() not in ["", "pass", "return"]
def filter_repeat(x, threshold=0.25):
import zlib
bytes_x = bytes(x, encoding="utf-8")
comp_x = zlib.compress(bytes_x)
return len(comp_x) / len(bytes_x) > threshold
def load_execution(data_dict, paths, split, tag):
if "spider" in paths[0]:
exec_list = [
("exec.pkl", "execution_result"),
]
else:
exec_list = [
("exec.pkl", "execution_result"),
("execfull.pkl", "execution_result_full"),
("execfullpass.pkl", "execution_result_full_pass"),
("gen.execfull.pkl", "gen_execution_result_full"),
]
for suffix, result_name in exec_list:
for i, idx in data_dict:
if tag == "":
out_name = f"{split}-{idx}.{suffix}"
else:
out_name = f"{split}-{idx}-{tag}.{suffix}"
path = paths[i]
if suffix != "gen.execfull.pkl" or os.path.exists(f"{path}/{out_name}"):
execution_results = pickle.load(open(f"{path}/{out_name}", "rb"))
assert len(execution_results) == len(data_dict[i, idx])
for j, execution_result in enumerate(execution_results):
data_dict[i, idx][j][result_name] = execution_result
"""equivalence checking functions"""
# base equavalence checking function
def single_exec_result_matching(exec_x, exec_y, good_execution_result):
try:
if (
exec_x[0] == good_execution_result
and exec_y[0] == good_execution_result
and exec_x[1] == exec_y[1]
):
return 1
else:
return 0
except:
return 0
def multi_exec_result_matching(exec_x, exec_y, good_execution_result):
try:
same_output_count = 0
if exec_x[0] == good_execution_result and exec_y[0] == good_execution_result:
for ex, ey in exec_x[0], exec_y[1]:
same_output_count += 1
return same_output_count
else:
return 0
except:
return 0
# first assertion call matching
def execution_selection_function(x, y, good_execution_result=0):
exec_x, exec_y = x["execution_result"], y["execution_result"]
return single_exec_result_matching(exec_x, exec_y, good_execution_result)
def multi_execution_selection_function(x, y, good_execution_result=0):
exec_x, exec_y = x["gen_execution_result_full"], y["gen_execution_result_full"]
return sum(
[
single_exec_result_matching(single_x, single_y, good_execution_result)
for single_x, single_y in zip(exec_x, exec_y)
]
)
# just executability checking
def executability_selection_function(x, good_execution_result=0):
exec_res = x["execution_result"]
return exec_res[0] == good_execution_result
def multi_executability_selection_function(x, good_execution_result=0):
exec_res = x["gen_execution_result_full"]
return sum([e[0] == good_execution_result for e in exec_res])
def bleu_selection_function(x, y):
return sentence_bleu(
[[ch for ch in x["trg_prediction"]]], [ch for ch in y["trg_prediction"]]
)
def token_bleu_selection_function(x, y):
return sentence_bleu([x["trg_prediction"].split()], y["trg_prediction"].split())
def bash_execution_tokenbleu_selection_function(x, y):
if not x["executable"] or not y["executable"]:
return 0
x = x["trg_prediction_splitted"]
y = y["trg_prediction_splitted"]
return sentence_bleu([x], y)
def get_mbpp_selector(
criterion,
mbpp_good_execution_result,
use_multi_assertions=False,
remove_function_header=False,
):
if "$" in criterion:
criterion = criterion.split("$")[0]
secondary_key_function = None
if not use_multi_assertions:
exec_func = execution_selection_function
else:
exec_func = multi_execution_selection_function
mbr_function = lambda x, y: all(
[
exec_func(x, y, mbpp_good_execution_result),
x["not_degenerate"],
y["not_degenerate"],
]
)
if not use_multi_assertions:
executability_func = executability_selection_function
else:
executability_func = multi_executability_selection_function
if criterion == "oracle":
def get_oracle(x):
if isinstance(x["execution_result_full_pass"], bool):
return int(x["execution_result_full_pass"])
elif isinstance(x["execution_result_full_pass"], list):
return int(
all(
isinstance(exec_result[1], bool) and exec_result[1] == True
for exec_result in x["execution_result_full_pass"]
)
)
sample_selection_function = get_oracle
elif criterion == "mbr_exec":
sample_selection_function = mbr_function
secondary_key_function = lambda x: x["sum_logprob"]
elif criterion in [
"sum_logprob",
"avg_logprob",
"avg_reverse_logprob",
"sum_reverse_logprob",
]:
criterion = criterion.split("-")[-1]
sample_selection_function = lambda x: (
x["not_degenerate"],
x[criterion],
)
elif criterion == "random":
sample_selection_function = lambda x: (
x["not_degenerate"],
random.random(),
)
elif criterion.startswith("avgreverselogprob-ensemble#"):
alpha = float(criterion.split("#")[1])
sample_selection_function = lambda x: (
x["not_degenerate"],
x["avg_reverse_logprob"] * alpha + x["avg_logprob"] * (1 - alpha),
)
elif criterion.startswith("sumreverselogprob-ensemble#"):
alpha = float(criterion.split("#")[1])
sample_selection_function = lambda x: (
x["not_degenerate"],
x["sum_reverse_logprob"] * alpha + x["sum_logprob"] * (1 - alpha),
)
elif criterion in [
"executability-sum_logprob",
"executability-avg_logprob",
"executability-avg_reverse_logprob",
"executability-sum_reverse_logprob",
]:
criterion = criterion.split("-")[-1]
sample_selection_function = lambda x: (
x["not_degenerate"],
executability_func(x, mbpp_good_execution_result),
x[criterion],
)
elif criterion == "executability-random":
sample_selection_function = lambda x: (
x["not_degenerate"],
executability_func(x, mbpp_good_execution_result),
random.random(),
)
elif criterion.startswith("executability-avgreverselogprob-ensemble#"):
alpha = float(criterion.split("#")[1])
sample_selection_function = lambda x: (
executability_func(x, mbpp_good_execution_result),
x["not_degenerate"],
x["avg_reverse_logprob"] * alpha + x["avg_logprob"] * (1 - alpha),
)
elif criterion.startswith("executability-sumreverselogprob-ensemble#"):
alpha = float(criterion.split("#")[1])
sample_selection_function = lambda x: (
executability_func(x, mbpp_good_execution_result),
x["not_degenerate"],
x["sum_reverse_logprob"] * alpha + x["sum_logprob"] * (1 - alpha),
)
else:
raise ValueError(f"Unknown criterion: {criterion}")
return sample_selection_function, secondary_key_function
"""
select and evaluate a group in batch
required keys:
data_split: 'train', 'dev' or 'test'
temperature: 0.1 .. 1.0
criterion: 'mbr_exec' ... see full options in the function
data_path: root data path for the task
n_samples: number of candidates
rand_seed: random seed for one experiment
"""
def bootstrap_mbpp(args):
mbpp_good_execution_result = 0
data_path = f"{args.data_path}/seed-*/**/*-{args.temperature}/"
multisample_selector = ExecutionBasedMultiSampleSelector(
data_path,
args.data_split,
"mbpp",
tag=args.tag,
model=args.model,
dataset="mbpp",
verbose=args.verbose,
no_rejection=args.no_rejection,
)
intrasample_selector = ExecutionBasedIntraMultiSampleSelector(
data_path,
args.data_split,
"mbpp",
tag=args.tag,
model=args.model,
dataset="mbpp",
verbose=args.verbose,
no_rejection=args.no_rejection,
)
id_keys = list(multisample_selector.data.keys())
acc_dict = collections.defaultdict(list)
std_dict = collections.defaultdict(list)
for crit in args.criteria:
sample_selection_function, secondary_key_function = get_mbpp_selector(
crit, mbpp_good_execution_result, remove_function_header=True
)
if "mbr" in crit:
selector = intrasample_selector
else:
selector = multisample_selector
step_results = collections.defaultdict(list)
for sub_n_samples in trange(
args.num_samples_start, args.num_samples_end, args.num_samples_gap
):
random.seed(args.seed)
np.random.seed(args.seed)
for bootstrap_i in range(args.num_bootstraps):
# id_is = np.random.choice(list(range(len(id_keys))), size=sub_n_samples, replace=True)
# ids = [id_keys[i] for i in id_is]
ids = random.sample(id_keys, sub_n_samples)
if secondary_key_function is not None:
if "$" in crit:
quantile_threshold = float(crit.split("$")[1])
else:
quantile_threshold = None
selected = selector.select(
ids,
sample_selection_function,
secondary_key_function,
quantile_threshold=quantile_threshold,
)
else:
selected = selector.select(ids, sample_selection_function)
result = evaluate_google_mbpp(
selected, "dataset/mbpp/mbpp.jsonl", "test", verbose=args.verbose
)
step_results[sub_n_samples].append(result)
for k, v in step_results.items():
acc_dict[crit].append(np.mean(v))
std_dict[crit].append(np.std(v))
return (acc_dict, std_dict)
def get_nl2bash_selector(criterion):
import random
secondary_key_function = None
if criterion == "oracle":
sample_selection_function = lambda x: x["charbleu"]
elif criterion == "mbr_bleu":
raise NotImplementedError
# sample_selection_function = lambda x, y: bleu_selection_function(x, y, tag=tag, model=model)
elif criterion == "mbr_tokenbleu":
sample_selection_function = lambda x, y: all(
[
token_bleu_selection_function(x, y),
x["not_degenerate"],
y["not_degenerate"],
]
)
elif criterion == "mbr_exec_tokenbleu":
sample_selection_function = lambda x, y: all(
[
bash_execution_tokenbleu_selection_function(x, y),
x["not_degenerate"],
y["not_degenerate"],
]
)
secondary_key_function = lambda x: x["sum_logprob"]
elif criterion in [
"sum_logprob",
"avg_logprob",
"avg_reverse_logprob",
"sum_reverse_logprob",
]:
criterion = criterion.split("-")[-1]
sample_selection_function = lambda x: (
x["not_degenerate"],
x[criterion],
)
elif criterion == "random":
sample_selection_function = lambda x: (
x["not_degenerate"],
random.random(),
)
elif criterion.startswith("avgreverselogprob-ensemble#"):
alpha = float(criterion.split("#")[1])
sample_selection_function = lambda x: (
x["not_degenerate"],
x["avg_reverse_logprob"] * alpha + x["avg_logprob"] * (1 - alpha),
)
elif criterion.startswith("sumreverselogprob-ensemble#"):
alpha = float(criterion.split("#")[1])
sample_selection_function = lambda x: (
x["not_degenerate"],
x["sum_reverse_logprob"] * alpha + x["sum_logprob"] * (1 - alpha),
)
elif criterion in [
"executability-sum_logprob",
"executability-avg_logprob",
"executability-avg_reverse_logprob",
"executability-sum_reverse_logprob",
]:
criterion = criterion.split("-")[-1]
sample_selection_function = lambda x: (
x["not_degenerate"],
x["executable"],
x[criterion],
)
elif criterion == "executability-random":
sample_selection_function = lambda x: (
x["not_degenerate"],
x["executable"],
random.random(),
)
elif criterion.startswith("executability-avgreverselogprob-ensemble#"):
alpha = float(criterion.split("#")[1])
sample_selection_function = lambda x: (
x["executable"],
x["not_degenerate"],
x["avg_reverse_logprob"] * alpha + x["avg_logprob"] * (1 - alpha),
)
elif criterion.startswith("executability-sumreverselogprob-ensemble#"):
alpha = float(criterion.split("#")[1])
sample_selection_function = lambda x: (
x["executable"],
x["not_degenerate"],
x["sum_reverse_logprob"] * alpha + x["sum_logprob"] * (1 - alpha),
)
else:
raise ValueError(f"Unknown criterion: {criterion}")
return sample_selection_function, secondary_key_function
def bootstrap_nl2bash(args):
data_path = f"{args.data_path}/seed-*/**/*-{args.temperature}/"
secondary_key_function = None
intra_selector = IntraMultiSampleSelector(
data_path,
args.data_split,
tag=args.tag,
model=args.model,
verbose=args.verbose,
dataset="nl2bash",
no_rejection=args.no_rejection,
)
multi_selector = MultiSampleSelector(
data_path,
args.data_split,
tag=args.tag,
model=args.model,
verbose=args.verbose,
dataset="nl2bash",
no_rejection=args.no_rejection,
)
id_keys = list(intra_selector.data.keys())
acc_dict = collections.defaultdict(list)
std_dict = collections.defaultdict(list)
for crit in args.criteria:
sample_selection_function, secondary_key_function = get_nl2bash_selector(crit)
if "mbr" in crit:
selector = intra_selector
else:
selector = multi_selector
step_results = collections.defaultdict(list)
for sub_n_samples in trange(
args.num_samples_start, args.num_samples_end, args.num_samples_gap
):
random.seed(args.seed)
np.random.seed(args.seed)
for bootstrap_i in range(args.num_bootstraps):
ids = random.sample(id_keys, sub_n_samples)
if secondary_key_function is not None:
selected = selector.select(
ids, sample_selection_function, secondary_key_function
)
else:
selected = selector.select(ids, sample_selection_function)
result = evaluate_charbleu(selected)["bleu"]
step_results[sub_n_samples].append(result)
for k, v in step_results.items():
acc_dict[crit].append(np.mean(v))
std_dict[crit].append(np.std(v))
return acc_dict, std_dict
def bootstrap_human_eval(args):
humaneval_good_execution_result = 0
data_path = f"{args.data_path}/seed-*/0-shot/*-{args.temperature}"
if args.top_p != 1.0:
data_path += f"-p{args.top_p}"
if args.max_tokens != 512:
data_path += f"-max{args.max_tokens}"
multisample_selector = ExecutionBasedMultiSampleSelector(
data_path,
args.data_split,
"humaneval",
tag=args.tag,
model=args.model,
verbose=args.verbose,
dataset=args.dataset,
no_rejection=args.no_rejection,
)
intrasample_selector = ExecutionBasedIntraMultiSampleSelector(
data_path,
args.data_split,
"humaneval",
tag=args.tag,
model=args.model,
verbose=args.verbose,
dataset=args.dataset,
no_rejection=args.no_rejection,
)
id_keys = list(multisample_selector.data.keys())
acc_dict = collections.defaultdict(list)
std_dict = collections.defaultdict(list)
for crit in args.criteria:
sample_selection_function, secondary_key_function = get_mbpp_selector(
crit,
humaneval_good_execution_result,
use_multi_assertions=args.use_generated_assertions,
)
if "mbr" in crit:
selector = intrasample_selector
else:
selector = multisample_selector
step_results = collections.defaultdict(list)
for sub_n_samples in trange(
args.num_samples_start, args.num_samples_end, args.num_samples_gap
):
random.seed(args.seed)
np.random.seed(args.seed)
for bootstrap_i in range(args.num_bootstraps):
ids = random.sample(id_keys, sub_n_samples)
# id_is = np.random.choice(list(range(len(id_keys))), size=sub_n_samples, replace=True)
# ids = [id_keys[i] for i in id_is]
if secondary_key_function is not None:
if "$" in crit:
quantile_threshold = float(crit.split("$")[1])
else:
quantile_threshold = None
selected = selector.select(
ids,
sample_selection_function,
secondary_key_function,
quantile_threshold=quantile_threshold,
)
else:
selected = selector.select(ids, sample_selection_function)
result = evaluate_humaneval(selected)
step_results[sub_n_samples].append(result)
for k, v in step_results.items():
acc_dict[crit].append(np.mean(v))
std_dict[crit].append(np.std(v))
return (acc_dict, std_dict)
def get_spider_selector(criterion, spider_good_execution_result=True):
import random
secondary_key_function = None
if criterion == "mbr_exec":
sample_selection_function = lambda x, y: execution_selection_function(
x, y, spider_good_execution_result
)
secondary_key_function = lambda x: x["sum_logprob"]
elif criterion == "mbr_exec_reverse":
sample_selection_function = lambda x, y: execution_selection_function(
x, y, spider_good_execution_result
)
secondary_key_function = lambda x: x["avg_reverse_logprob"]
elif criterion == "mbr_exec_avglogp":
sample_selection_function = lambda x, y: execution_selection_function(
x, y, spider_good_execution_result
)
secondary_key_function = lambda x: x["avg_logprob"]
elif criterion == "random":
sample_selection_function = lambda x: random.random()
elif criterion in [
"sum_logprob",
"avg_logprob",
"avg_reverse_logprob",
"sum_reverse_logprob",
]:
sample_selection_function = lambda x: x[criterion]
elif criterion.startswith("avgreverselogprob-ensemble#"):
alpha = float(criterion.split("#")[1])
sample_selection_function = lambda x: x["avg_reverse_logprob"] * alpha + x[
"avg_logprob"
] * (1 - alpha)
elif criterion.startswith("sumreverselogprob-ensemble#"):
alpha = float(criterion.split("#")[1])
sample_selection_function = lambda x: x["sum_reverse_logprob"] * alpha + x[
"sum_logprob"
] * (1 - alpha)
elif criterion == "mbr_bleu":
sample_selection_function = lambda x, y: bleu_selection_function(x, y)
elif criterion == "mbr_tokenbleu":
sample_selection_function = lambda x, y: token_bleu_selection_function(x, y)
elif criterion in [
"executability-sum_logprob",
"executability-avg_logprob",
"executability-avg_reverse_logprob",
"executability-sum_reverse_logprob",
]:
criterion = criterion.split("-")[1]
sample_selection_function = lambda x: (
executability_selection_function(x, spider_good_execution_result),
x[criterion],
)
elif criterion == "executability-random":
sample_selection_function = lambda x: (
executability_selection_function(x, spider_good_execution_result),
random.random(),
)
elif criterion == "executability-mbr_bleu":
sample_selection_function = (
lambda x, y: bleu_selection_function(x, y)
* x["execution_result"][0]
* y["execution_result"][0]
)
elif criterion == "executability-mbr_tokenbleu":
sample_selection_function = (
lambda x, y: token_bleu_selection_function(x, y)
* x["execution_result"][0]
* y["execution_result"][0]
)
elif criterion.startswith("executability-avgreverselogprob-ensemble#"):
alpha = float(criterion.split("#")[1])
sample_selection_function = lambda x: (
executability_selection_function(x, spider_good_execution_result),
x["avg_reverse_logprob"] * alpha + x["avg_logprob"] * (1 - alpha),
)
elif criterion.startswith("executability-sumreverselogprob-ensemble#"):
alpha = float(criterion.split("#")[1])
sample_selection_function = lambda x: (
executability_selection_function(x, spider_good_execution_result),
x["sum_reverse_logprob"] * alpha + x["sum_logprob"] * (1 - alpha),
)
else:
raise ValueError(f"Unknown criterion: {criterion}")
return sample_selection_function, secondary_key_function
def bootstrap_spider(args):
spider_good_execution_result = True
data_path = f"{args.data_path}/seed-*/**/*-{args.temperature}/"
intrasample_selector = ExecutionBasedIntraMultiSampleSelector(
data_path,
"dev",
"spider",
tag=args.tag,
model=args.model,
verbose=args.verbose,
dataset="spider",
no_rejection=args.no_rejection,
)
multisample_selector = ExecutionBasedMultiSampleSelector(
data_path,
"dev",
"spider",
tag=args.tag,
model=args.model,
verbose=args.verbose,
dataset="spider",
no_rejection=args.no_rejection,
) # pre-execution for faster evaluation
id_keys = list(multisample_selector.data.keys())
acc_dict = collections.defaultdict(list)
std_dict = collections.defaultdict(list)
# preloading spider data to reduce io
from dataset.spider_official.evaluation import (
evaluate,
build_foreign_key_map_from_json,
)
kmaps = build_foreign_key_map_from_json(
"/private/home/tianyizzz/projects/mbr-exec/dataset/spider/tables.json"
)
with open("dataset/spider/dev_gold.sql") as f:
glist = [l.strip().split("\t") for l in f.readlines() if len(l.strip()) > 0]
all_args = []
flat_accs = []
for crit in args.criteria:
if "mbr" in crit:
selector = intrasample_selector
else:
selector = multisample_selector
for sub_n_samples in range(
args.num_samples_start, args.num_samples_end, args.num_samples_gap
):
random.seed(args.seed)
np.random.seed(args.seed)
for bootstrap_i in range(args.num_bootstraps):
id_is = np.random.choice(
list(range(len(id_keys))), size=sub_n_samples, replace=True
)
ids = [id_keys[i] for i in id_is]
all_args.append(
(
ids,
crit,
selector,
sub_n_samples * args.num_bootstraps + bootstrap_i,
kmaps,
glist,
)
)
if args.num_procs > 1:
print(f"running with {args.num_procs} processes.")
from multiprocessing.pool import ThreadPool as Pool
with Pool(processes=args.num_procs) as pool:
for acc in tqdm(
pool.imap(evaluate_spider_one, all_args, chunksize=1),
total=len(all_args),
desc=f"{crit}",
):
flat_accs.append(acc)
else:
for data in tqdm(all_args, total=len(all_args)):
flat_accs.append(evaluate_spider_one(data))
acc_idx = 0
for crit in args.criteria:
step_results = collections.defaultdict(list)
for sub_n_samples in trange(
args.num_samples_start, args.num_samples_end, args.num_samples_gap
):
for bootstrap_i in range(args.num_bootstraps):
step_results[sub_n_samples].append(flat_accs[acc_idx])
acc_idx += 1
for k, v in step_results.items():
acc_dict[crit].append(np.mean(v))
std_dict[crit].append(np.std(v))
return (acc_dict, std_dict)
def evaluate_spider_one(args):
ids, crit, selector, bootstrap_i, kmaps, glist = args
sample_selection_function, secondary_key_function = get_spider_selector(crit)
if secondary_key_function is not None:
selected = selector.select(
ids, sample_selection_function, secondary_key_function
)
else:
selected = selector.select(ids, sample_selection_function)
acc = evaluate_spider_with_cached_results(selected)
return acc
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--tag", type=str, default="")
parser.add_argument("--out_dir", type=str, default="./result_db")
parser.add_argument(
"--model",
type=str,
choices=[
"codegen-2B",
"codegen-2B-half",
"codegen-6B",
"codegen-6B-half",
"codegen-16B-half",
"incoder-1B",
"incoder-1B-half",
"incoder-6B",
"incoder-6B-half",
"codex001",
"codex002",
"codex-cushman",
],
)
parser.add_argument(
"--dataset",
type=str,
default="mbpp",
choices=[
"mbpp",
"spider",
"nl2bash",
"humaneval",
"codet_humaneval",
"mbpp_sanitized",
],
)
parser.add_argument("--num_samples_start", type=int, default=24)
parser.add_argument("--num_samples_end", type=int, default=25)
parser.add_argument("--num_samples_gap", type=int, default=1)
parser.add_argument("--num_procs", type=int, default=1)
parser.add_argument("--num_bootstraps", type=int, default=1)
parser.add_argument("--temperature", type=float, default=0.3)
parser.add_argument("--max_tokens", type=int, default=512)
parser.add_argument("--top_p", type=float, default=1.0)
parser.add_argument("--verbose", default=False, action="store_true")
parser.add_argument("--seed", type=int, default=1)
parser.add_argument("--grid_search_alpha", action="store_true", default=False)
parser.add_argument("--ablate", action="store_true", default=False)
parser.add_argument("--no-rejection", action="store_true", default=False)
parser.add_argument(
"--use_generated_assertions", action="store_true", default=False
)
parser.add_argument(
"--data_path",
type=str,
default="samples",
)
args = parser.parse_args()
if args.temperature > 0:
base_crits = ["sum_logprob", "avg_logprob", "avg_reverse_logprob", "random"]
if args.grid_search_alpha:
for i in range(1, 10):
base_crits.append(
f"sumreverselogprob-ensemble#0.{i}",
)
for i in range(1, 10):
base_crits.append(
f"avgreverselogprob-ensemble#0.{i}",
)
else:
base_crits.append(
f"sumreverselogprob-ensemble#0.5",
)
base_crits.append(
f"avgreverselogprob-ensemble#0.5",
)
all_crits = base_crits + ["executability-" + b for b in base_crits]
else:
all_crits = ["sum_logprob"]
args.criteria = all_crits
args.data_path = f"{args.data_path}/{args.model}/{args.dataset}"
if args.dataset == "spider":
args.criteria = all_crits + ["mbr_exec"]
acc_dict, std_dict = bootstrap_spider(args)
elif args.dataset == "mbpp":
args.criteria = args.criteria + ["mbr_exec", "oracle"]
args.data_split = "test"
acc_dict, std_dict = bootstrap_mbpp(args)
elif "humaneval" in args.dataset or args.dataset == "mbpp_sanitized":
args.criteria = args.criteria + ["mbr_exec", "oracle"]
args.data_split = "test"
acc_dict, std_dict = bootstrap_human_eval(args)
elif args.dataset == "nl2bash":
args.criteria = args.criteria + ["mbr_exec_tokenbleu", "oracle"]
args.data_split = "dev"
acc_dict, std_dict = bootstrap_nl2bash(args)
else:
raise ValueError
if args.tag != "":
out_path = Path(
f"{args.out_dir}/{args.dataset}-{args.model}-temp{args.temperature}-{args.tag}"
)
else:
out_path = Path(
f"{args.out_dir}/{args.dataset}-{args.model}-temp{args.temperature}"
)
out_path.mkdir(parents=True, exist_ok=True)
torch.save(acc_dict, out_path / "acc.pt")
torch.save(std_dict, out_path / "std.pt")
print(f"saving to {out_path}")
for crit in args.criteria:
print(crit, f"{acc_dict[crit][-1]:.4f} {std_dict[crit][-1]:.2f}")
|
coder_reviewer_reranking-main
|
sample_selectors.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
from pathlib import Path
import os
from glob import glob
from argparse import ArgumentParser
import html
import json
from utils import *
from tqdm import tqdm, trange
from functools import partial
from utils import write_jsonl, parse_prompt, make_new_context
from pyminifier_canonicalize import remove_print, clean_comment
parser = ArgumentParser()
parser.add_argument("--model", type=str, default="codex")
parser.add_argument(
"--dataset", type=str, default="mbpp", choices=["mbpp", "spider", "nl2bash"]
)
parser.add_argument("--tag", type=str, default="")
parser.add_argument("--split", type=str, default="test")
parser.add_argument("--batch_size", type=int, default=20)
parser.add_argument("--max_tokens", type=int, default=512)
parser.add_argument("--top_p", type=float, default=1.0)
parser.add_argument("--num_samples", type=int, default=5)
parser.add_argument("--num_procs", type=int, default=40)
parser.add_argument("--canonicalize", action="store_true", default=False)
parser.add_argument(
"--data_path",
type=str,
default="/private/home/tianyizzz/projects/mbr-exec-data/mbr-exec-release/",
)
parser.add_argument("--temperature", type=float, default=0.3)
args = parser.parse_args()
args.data_path = Path(args.data_path)
out_dir = f"seed-*/**/*-{args.temperature}"
if args.top_p != 1.0:
out_dir += f"-p{args.top_p}"
if args.max_tokens != 512:
out_dir += f"-max{args.max_tokens}"
args.data_path = args.data_path / args.dataset / out_dir
paths = list(sorted(glob(str(args.data_path), recursive=True)))
def find_start(tokens, dataset="mbpp"):
if dataset == "mbpp":
match_token = ["<", "info", ">"]
else:
match_token = ["<", "text", ">"]
for i in range(len(tokens) - 3, 0, -1):
if tokens[i : i + 3] == match_token:
break
return i
def batch_query_reverse_logp(all_codex_data, args, verbose=False):
for outer_i, batch_start in enumerate(
trange(0, len(all_codex_data), args.batch_size, disable=not verbose)
):
batch_data = all_codex_data[batch_start : batch_start + args.batch_size]
batch_prompts = []
batch_prompts_without_ref = []
for codex_data in batch_data:
prompt = codex_data["prompt"]
prompt_parse = parse_prompt(prompt, dataset=args.dataset)
code_sample = codex_data["trg_prediction"]
prompt_parse[-1]["code"] = f"<code>{code_sample}</code>"
if args.dataset == "mbpp" and args.canonicalize:
try:
code_sample = clean_comment(code_sample)
except:
code_sample = code_sample
code_sample = remove_print(code_sample)
with_ref_prompt, without_ref_prompt = make_new_context(
prompt_parse, dataset=args.dataset
)
batch_prompts.append(with_ref_prompt)
batch_prompts_without_ref.append(without_ref_prompt)
with_ref_reponse, _ = safe_codex_call(
args,
batch_prompts,
temperature=1.0,
echo=True,
max_tokens=0,
api_i=outer_i % 3,
)
for batch_i, (codex_data, with_ref_prompt, without_ref_prompt) in enumerate(
zip(batch_data, batch_prompts, batch_prompts_without_ref)
):
num_api_tokens = find_start(
with_ref_reponse["choices"][batch_i]["logprobs"]["tokens"],
dataset=args.dataset,
)
gt_prompt_logprob = with_ref_reponse["choices"][batch_i]["logprobs"][
"token_logprobs"
][num_api_tokens:]
gt_prompt_tokens = with_ref_reponse["choices"][batch_i]["logprobs"][
"tokens"
][num_api_tokens:]
codex_data["reverse_prompt_with_ref"] = with_ref_prompt
codex_data["reverse_prompt_without_ref"] = without_ref_prompt
codex_data["prompt_reverse_tokens"] = gt_prompt_tokens
codex_data["prompt_reverse_logprobs"] = gt_prompt_logprob
codex_data["prompt_reverse_full_tokens"] = with_ref_reponse["choices"][
batch_i
]["logprobs"]["tokens"]
codex_data["prompt_reverse_full_logprobs"] = with_ref_reponse["choices"][
batch_i
]["logprobs"]["token_logprobs"]
return all_codex_data
paths = sorted(paths)
print(paths)
for path in tqdm(paths, desc="total seeds", disable=False):
path = Path(path)
for sample_i in trange(args.num_samples, leave=False):
if len(args.tag) == 0:
output_file_name = f"{args.split}-{sample_i}-with-reverse.jsonl"
else:
output_file_name = f"{args.split}-{sample_i}-with-reverse-{args.tag}.jsonl"
try:
all_codex_data = []
with open(path / f"{args.split}-{sample_i}.jsonl", "r") as f:
for i, line in enumerate(f):
codex_data = json.loads(line)
codex_data = json.loads(line)
all_codex_data.append(codex_data)
except Exception as e:
print(e)
print(f"{path / output_file_name} not ready yet. skipping.")
continue
if (path / output_file_name).exists():
with open(path / output_file_name, "r") as f:
line_num = len(f.readlines())
if line_num == len(all_codex_data):
continue
from multiprocessing import Pool
if args.num_procs > 1:
all_codex_data_with_reverse = []
chunk_size = len(all_codex_data) // args.num_procs + 1
chunked_all_codex_data = [
all_codex_data[chunk_start : chunk_start + chunk_size]
for chunk_start in range(0, len(all_codex_data), chunk_size)
]
with Pool(processes=args.num_procs) as pool:
for codex_data_with_reverse in tqdm(
pool.imap(
partial(batch_query_reverse_logp, args=args, verbose=True),
chunked_all_codex_data,
),
total=len(chunked_all_codex_data),
):
all_codex_data_with_reverse.extend(codex_data_with_reverse)
else:
all_codex_data_with_reverse = batch_query_reverse_logp(
all_codex_data, args, verbose=True
)
with open(path / output_file_name, "w") as f:
for codex_data_with_reverse in all_codex_data_with_reverse:
codex_data_json = json.dumps(codex_data_with_reverse)
f.write(codex_data_json + "\n")
|
coder_reviewer_reranking-main
|
fewshot_reviewer.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
import argparse
import data
from collectors import CollectorWithInfo
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--info-mode",
type=str,
default="assertion",
choices=["function_name", "assertion"],
)
parser.add_argument(
"--dataset",
type=str,
choices=["mbpp_sanitized", "codet_humaneval", "humaneval"],
)
parser.add_argument("--num_seeds", type=int, default=25)
parser.add_argument("--num_samples", type=int, default=5)
args = CollectorWithInfo.parse_args(parser)
args.output_path = args.output_path + "/" + str(args.dataset)
if args.dataset == "codet_humaneval":
data_file_path = "dataset/human_eval/dataset/CodeTHumanEval.jsonl"
elif args.dataset == "mbpp_sanitized":
data_file_path = "dataset/mbpp/mbpp_sanitized_for_code_generation.jsonl"
args.end_template = ["\nclass", "\ndef", "\n#", "\nif"]
dataset = getattr(data, "HumanEvalDataset")(path=data_file_path, mode="prompt_only")
collector = CollectorWithInfo.from_args(args, dataset)
if args.temperature > 0:
args.seed = list(range(args.num_seeds))
for i in range(args.num_samples):
collector(i, i, 5)
else:
args.seed = list(range(args.num_seeds))
collector(0, 0, 1)
|
coder_reviewer_reranking-main
|
collect_zeroshot.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
import collections
import json
import os
import regex
class NL2BashDataset(object):
def __init__(self, path="dataset/nl2bash/data/bash"):
self.data = collections.defaultdict()
for split in ["train", "dev", "test"]:
nls = [x.strip() for x in open(os.path.join(path, f"{split}.nl.filtered"))]
cms = [x.strip() for x in open(os.path.join(path, f"{split}.cm.filtered"))]
infos = ["" for x in open(os.path.join(path, f"{split}.cm.filtered"))]
self.data[split] = list(zip(nls, cms, infos))
class SpiderDataset(object):
def __init__(self, path="dataset/spider"):
self.data = collections.defaultdict()
self.dbs = json.load(open(f"{path}/tables.json"))
self.id2db = {item["db_id"]: item for item in self.dbs}
for split in ["train", "dev"]:
split_fname = "train_spider" if split == "train" else split
data = json.load(open(f"{path}/{split_fname}.json"))
nls = [x["question"] for x in data]
cms = [x["query"] for x in data]
db_info = [self.extract_db_info(x["db_id"]) for x in data]
self.data[split] = list(zip(nls, cms, db_info))
def extract_db_info(self, db_id):
db = self.id2db[db_id]
id2table = {
i: table_name for i, table_name in enumerate(db["table_names_original"])
}
info = f"{db_id} "
used_table_id = set()
for table_id, column_name in db["column_names_original"]:
if table_id == -1:
info += f"| {column_name} "
elif table_id not in used_table_id:
info += f"| {id2table[table_id]} : {column_name} "
used_table_id.add(table_id)
else:
info += f", {column_name} "
return info.strip()
class MBPPGoogleDataset(object):
def __init__(self, path="dataset/mbpp/mbpp.jsonl", mode="function_name"):
raw_data = sorted(
[json.loads(x) for x in open(path)], key=lambda x: x["task_id"]
)
for i, data_item in enumerate(raw_data):
assert data_item["task_id"] == i + 1
self.raw_data = collections.defaultdict()
self.mode = mode
# 374 for training, 100 heldout, 500 test
self.raw_data["train"] = raw_data[:10] + raw_data[510:]
self.raw_data["test"] = raw_data[10:510]
# data for codex collector, in input-output-info format
self.data = collections.defaultdict()
for split in self.raw_data:
self.data[split] = self.extract_data(self.raw_data[split], mode)
@staticmethod
def extract_data(raw_data, mode):
if mode == "function_name":
get_function_name = lambda test_example: regex.match(
"assert [\(]*([^\(]+)\(", test_example
).group(1)
info = [get_function_name(x["test_list"][0]) for x in raw_data]
elif mode == "assertion":
info = [x["test_list"][0] for x in raw_data]
elif mode == "assertion-full":
info = [x["test_list"] for x in raw_data]
else:
raise Exception(f"Mode {mode} not supported.")
nls = [x["text"] for x in raw_data]
codes = [x["code"] for x in raw_data]
return list(zip(nls, codes, info))
from dataset.human_eval.human_eval.data import read_problems
class HumanEvalDataset(object):
def __init__(
self,
path="dataset/human_eval/dataset/HumanEval.jsonl",
assertion_path="",
mode="assertion",
):
self.path = path
self.data = dict()
self.raw_data = read_problems(path)
self.mode = mode
if assertion_path != "":
self.assertion_data = read_problems(assertion_path)
else:
self.assertion_data = self.raw_data
self.data["test"] = self.extract_data()
def extract_data(self):
nls = []
codes = []
info = []
for pid, prob in self.raw_data.items():
assert_prob = self.assertion_data[pid]
nls.append(prob["prompt"])
docstring, func_header, func_context, doc_start = extract_docstring(
assert_prob["prompt"]
)
self.raw_data[pid]["func_header"] = func_header.strip() + "\n"
self.raw_data[pid]["func_context"] = func_context
codes.append(prob["canonical_solution"])
if self.mode != "prompt_only":
assertions = extract_test(pid, prob["entry_point"], docstring)
if self.mode == "assertion":
self.raw_data[pid]["assertion"] = assertions[0]
info.append(assertions[0])
else:
self.raw_data[pid]["assertion"] = assertions
info.append(assertions)
else:
info.append([])
return list(zip(nls, codes, info))
class MBPPSanDataset(HumanEvalDataset):
def extract_data(self):
nls = []
codes = []
info = []
for pid, prob in self.raw_data.items():
nls.append(prob["prompt"])
docstring, func_header, func_context, doc_start = extract_docstring(
prob["prompt"]
)
self.raw_data[pid]["func_header"] = func_header.strip() + "\n"
self.raw_data[pid]["func_context"] = func_context
codes.append(prob["canonical_solution"])
if self.mode != "prompt_only":
assertions = [
l.strip() for l in prob["test"].split("\n")[1:] if l.strip() != ""
]
if self.mode == "assertion":
self.raw_data[pid]["assertion"] = assertions[0]
info.append(assertions[0])
elif self.mode == "assertion-all":
self.raw_data[pid]["assertion"] = assertions
info.append(assertions)
else:
raise ValueError("invalid mode")
else:
info.append([])
return list(zip(nls, codes, info))
def rindex(lst, value):
return len(lst) - lst[::-1].index(value) - 1
def _check_test_case_validation(test_case):
if len(test_case.strip()) < 1:
return False
if "assert" not in test_case:
return False
try:
multi_line_test_case = test_case.replace("\n", "\n ")
assert_in_a_block = f"try:\n {multi_line_test_case}\nexcept:\n pass\n"
compile(assert_in_a_block, "", "exec")
return True
except Exception:
return False
def extract_generated_tests(content, entry_point):
def _truncate(content):
for identifier in ["\nclass", "\ndef", "\n#", "\nif", "\nprint"]:
if identifier in content:
content = content.split(identifier)[0]
return content.strip()
split_by_assert = [
f"assert {part}".strip()
for part in f"assert {content}".split("assert ")
if (entry_point.strip() in part) and len(part.strip()) > 0
]
truncated_test_cases = [_truncate(i) for i in split_by_assert]
checked_assertions = [
i for i in truncated_test_cases if _check_test_case_validation(i)
]
return checked_assertions
def extract_docstring(prompt):
func_start = max(rindex(prompt, " fed") - 4, 0)
clean_prompt = prompt[func_start:]
if '"""' in prompt:
doc_start = '"""'
else:
doc_start = "'''"
docstring = clean_prompt[clean_prompt.strip().index(doc_start) :]
func_header = clean_prompt[: clean_prompt.strip().index(doc_start)]
func_context = prompt[:func_start]
return docstring, func_header, func_context, doc_start
def extract_test(pid, func_name, docstring):
if pid in manual_extract:
return manual_extract[pid]
else:
return _extract_tests(func_name, docstring)
def _extract_tests(func_name, docstring):
all_tests = []
doc_lines = docstring.strip().split("\n")
test_start = False
if ">>>" in docstring:
for l in doc_lines:
if not test_start:
if ">>>" in l and func_name in l:
test_start = True
if test_start:
if ">>>" in l and func_name in l:
l = l.strip()[3:].strip()
all_tests.append(l)
elif l.strip() != "" and '"""' not in l:
all_tests[-1] = "assert " + all_tests[-1] + f" == {l.strip()}"
test_start = False
elif any(
["==>" in docstring, "=>" in docstring, "->" in docstring, "➞" in docstring]
):
for special_char in ["==>", "=>", "->", "➞", "==>"]:
if special_char in docstring:
break
for l in doc_lines:
if not test_start:
if special_char in l and func_name in l:
test_start = True
if test_start and (special_char in l and func_name in l):
l = l.strip().replace(special_char, "==")
l = "assert " + l
all_tests.append(l)
elif any(["==" in docstring, "returns" in docstring]):
for special_char in ["==", "returns"]:
if special_char in docstring:
break
for l in doc_lines:
if not test_start:
if special_char in l and func_name + "(" in l:
test_start = True
if test_start and (special_char in l and func_name in l):
l = "assert " + l.strip().replace(special_char, "==")
all_tests.append(l)
return all_tests
manual_extract = {
"HumanEval/12": [
"assert longest(['a', 'b', 'c']) == 'a'",
"assert longest(['a', 'bb', 'ccc']) == 'ccc'",
],
"HumanEval/38": ["assert True == True"], # empty assertion to handle no doc test
"HumanEval/41": ["assert True == True"], # empty assertion to handle no doc test
"HumanEval/50": ["assert True == True"], # empty assertion to handle no doc test
"HumanEval/67": [
'assert fruit_distribution("5 apples and 6 oranges", 19) == 8'
'assert fruit_distribution("0 apples and 1 oranges",3) == 2'
'assert fruit_distribution("2 apples and 3 oranges", 100) == 95'
'assert fruit_distribution("100 apples and 1 oranges",120) == 19'
],
"HumanEval/68": [
"assert pluck([4,2,3]) == [2, 1]",
"assert pluck([1,2,3]) == [2, 1]",
"assert pluck([]) == []",
"assert pluck([5, 0, 3, 0, 4, 2]) == [0, 1]",
],
"HumanEval/78": [
"assert hex_key('AB') == 1",
"assert hex_key('1077E') == 2",
"assert hex_key('ABED1A33') == 4",
"assert hex_key('123456789ABCDEF0') == 6",
"assert hex_key('2020') == 2",
],
"HumanEval/79": [
"assert decimal_to_binary(15) == 'db1111db'",
"assert decimal_to_binary(32) == 'db100000db'",
],
"HumanEval/81": [
"assert grade_equation([4.0, 3, 1.7, 2, 3.5]) ==> ['A+', 'B', 'C-', 'C', 'A-']"
],
"HumanEval/83": ["assert True == True"], # empty assertion to handle no doc test
"HumanEval/84": ["assert True == True"], # empty assertion to handle no doc test
"HumanEval/86": [
"assert anti_shuffle('Hi') == 'Hi'",
"assert anti_shuffle('hello') == 'ehllo'",
"assert anti_shuffle('Hello World!!!') == 'Hello !!!Wdlor'",
],
"HumanEval/88": [
"assert sort_array([]) == []",
"assert sort_array([5]) == [5]",
"assert sort_array([2, 4, 3, 0, 1, 5]) == [0, 1, 2, 3, 4, 5]",
"assert sort_array([2, 4, 3, 0, 1, 5, 6]) == [6, 5, 4, 3, 2, 1, 0]",
],
"HumanEval/94": [
"assert skjkasdkd([0,3,2,1,3,5,7,4,5,5,5,2,181,32,4,32,3,2,32,324,4,3]) == 10",
"assert skjkasdkd([1,0,1,8,2,4597,2,1,3,40,1,2,1,2,4,2,5,1]) == 25",
"assert skjkasdkd([1,3,1,32,5107,34,83278,109,163,23,2323,32,30,1,9,3]) == 13",
"assert skjkasdkd([0,724,32,71,99,32,6,0,5,91,83,0,5,6]) == 11",
"assert skjkasdkd([0,81,12,3,1,21]) == 3",
"assert skjkasdkd([0,8,1,2,1,7]) == 7",
],
"HumanEval/95": [
'assert check_dict_case({"a":"apple", "b":"banana"}) == True.',
'assert check_dict_case({"a":"apple", "A":"banana", "B":"banana"}) == False.',
'assert check_dict_case({"a":"apple", 8:"banana", "a":"apple"}) == False.',
'assert check_dict_case({"Name":"John", "Age":"36", "City":"Houston"}) == False.',
'assert check_dict_case({"STATE":"NC", "ZIP":"12345" }) == True.',
],
"HumanEval/97": [
"assert multiply(148, 412) == 16",
"assert multiply(19, 28) == 72",
"assert multiply(2020, 1851) == 0",
"assert multiply(14,-15) == 20",
],
"HumanEval/102": [
"assert choose_num(12, 15) == 14",
"assert choose_num(13, 12) == -1",
],
"HumanEval/105": ["assert True == True"],
"HumanEval/107": [
"assert even_odd_palindrome(3) == (1, 3)",
"assert even_odd_palindrome(12) == (4, 6)",
],
"HumanEval/108": [
"assert count_nums([]) == 0",
"assert count_nums([-1, 11, -11]) == 1",
"assert count_nums([1, 1, 2]) == 3",
],
"HumanEval/115": [
"assert max_fill([[0,0,1,0], [0,1,0,0], [1,1,1,1]]) == 1",
"assert max_fill([[0,0,1,1], [0,0,0,0], [1,1,1,1], [0,1,1,1]]) == 2",
"assert max_fill([[0,0,0], [0,0,0]]) == 0",
],
"HumanEval/116": [
"assert sort_array([1, 5, 2, 3, 4]) == [1, 2, 3, 4, 5]",
"assert sort_array([-2, -3, -4, -5, -6]) == [-6, -5, -4, -3, -2]",
"assert sort_array([1, 0, 2, 3, 4]) == [0, 1, 2, 3, 4]",
],
"HumanEval/112": [
"assert reverse_delete('abcde', 'ae') == ('bcd',False)",
"assert reverse_delete('abcdef', 'b') == ('acdef',False)",
"assert reverse_delete('abcdedcba', 'ab') == ('cdedc',True)",
],
"HumanEval/120": [
"assert maximum([-3, -4, 5], 3) == [-4, -3, 5]",
"assert maximum([4, -4, 4], 2) == [4, 4]",
"assert maximum([-3, 2, 1, 2, -1, -2, 1], 1) == [2]",
],
"HumanEval/122": [
"assert add_elements([111,21,3,4000,5,6,7,8,9]) == 24",
],
"HumanEval/128": [
"assert prod_signs([1, 2, 2, -4]) == -9",
"assert prod_signs([0, 1]) == 0",
"assert prod_signs([]) == None",
],
"HumanEval/129": [
"assert minPath([[1,2,3], [4,5,6], [7,8,9]], 3) == [1, 2, 1]",
"assert minPath([[5,9,3], [4,1,6], [7,8,2]], 1) == [1]",
],
"HumanEval/130": ["assert tri(3) == [1, 3, 2, 8]"],
"HumanEval/133": [
"assert sum_squares([1,2,3]) == 14",
"assert sum_squares([1,4,9]) == 98",
"assert sum_squares([1,3,5,7]) == 84",
"assert sum_squares([1.4,4.2,0]) == 29",
"assert sum_squares([-2.4,1,1]) == 6",
],
"HumanEval/135": [
"assert can_arrange([1,2,4,3,5]) == 3",
"assert can_arrange([1,2,3]) == -1",
],
"HumanEval/141": [
"assert file_name_check('example.txt') == 'Yes'",
"assert file_name_check('1example.dll') == 'No'",
],
"HumanEval/142": [
"assert sum_squares([1,2,3]) == 6",
"assert sum_squares([]) == 0",
"assert sum_squares([-1,-5,2,-1,-5]) == -126",
],
"HumanEval/143": [
"assert words_in_sentence('This is a test') == 'is'",
"assert words_in_sentence('lets go for swimming') == 'go for'",
],
"HumanEval/144": [
'assert simplify("1/5", "5/1") == True',
'assert simplify("1/6", "2/1") == False',
'assert simplify("7/10", "10/2") == False',
],
"HumanEval/145": [
"assert order_by_points([1, 11, -1, -11, -12]) == [-1, -11, 1, -12, 11]",
"assert order_by_points([]) == []",
],
"HumanEval/156": [
"assert int_to_mini_roman(19) == 'xix'",
"assert int_to_mini_roman(152) == 'clii'",
"assert int_to_mini_roman(426) == 'cdxxvi'",
],
"HumanEval/147": [
"assert get_max_triples(5) == 1",
],
"HumanEval/149": [
'assert list_sort(["aa", "a", "aaa"]) == ["aa"]',
'assert list_sort(["ab", "a", "aaa", "cd"]) == ["ab", "cd"]',
],
"HumanEval/159": [
"assert eat(5, 6, 10) == [11, 4]",
"assert eat(4, 8, 9) == [12, 1]",
"assert eat(1, 10, 10) == [11, 0]",
"assert eat(2, 11, 5) == [7, 0]",
],
"HumanEval/160": [
"assert do_algebra([2, 3, 4, 5], ['+', '*', '-']) == 9",
],
"HumanEval/161": [
'assert solve("1234") == "4321"',
'assert solve("ab") == "AB"',
'assert solve("#a@C") == "#A@c"',
],
"HumanEval/162": [
"assert string_to_md5('Hello world') == '3e25960a79dbc69b674cd4ec67a72c62'"
],
}
|
coder_reviewer_reranking-main
|
data.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from torchvision import datasets, transforms
from torch.utils.data.sampler import RandomSampler
import torchvision
import torchvision.transforms as transforms
import torch.utils.data
import pdb
class CIFAR10_Wrapper(torch.utils.data.Dataset):
def __init__(self, root, train, download, transform):
self.dataset = torchvision.datasets.CIFAR10(
root=root, train=train,
download=download, transform=transform)
self.transformed_cache = {}
self.access_total = 0
self.cache_hit = 0
self.access_since_retransform = 0
def __getitem__(self, index):
#print(self.transformed_cache.keys())
if index in self.transformed_cache.keys():
item = self.transformed_cache[index]
self.cache_hit += 1
#print("Using cache: ", index)
else:
item = self.dataset[index]
self.transformed_cache[index] = item
#pdb.set_trace()
#print("Writing cache: ", index)
self.access_total += 1
self.access_since_retransform += 1
#print("since retransform: ", self.access_since_retransform)
#print("total: ", self.access_total)
return item
def __len__(self):
return len(self.dataset)
#return 128
# flushes the cache of transformed images
def retransform(self):
print("total calls retransform: {}, cache hits: {}".format(
self.access_since_retransform, self.cache_hit))
#print("total: ", self.access_total)
self.transformed_cache = {}
self.access_since_retransform = 0
self.cache_hit = 0
|
deep-variance-reduction-main
|
cifar_wrapper.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#from __future__ import print_function
import argparse
import pickle
import os
import time
from timeit import default_timer as timer
import torch
import torch.nn as nn
from torch.autograd import Variable
import sys
import math
import problems
import optimizers
import logging
import pdb
from torch.nn.functional import nll_loss, log_softmax
import numpy as np
from diagnostics import *
from run_vr import *
import torch.backends.cudnn as cudnn
def run(args_override={}):
run_dir = "runs"
disable_cuda = False
checkpoint_dir = "/checkpoint/{}/checkpoints".format(os.environ["USER"])
default_momentum = 0.9
default_lr = 0.1
default_decay = 0.0001
default_epochs = 300
default_batch_size = 128
default_tail_average = 0.0
default_tail_average_all = False
default_half_precision = False
default_method = "sgd" #"svrg" #"sgd"
default_log_diagnostics = False
default_log_diagnostics_every_epoch = False
default_log_fast_diagnostics = False
default_logfname = "log"
default_log_interval = 20
default_transform_locking = True
default_per_block = False
default_dropout = False
default_batchnorm = True
default_vr_from_epoch = 1 # 1 is first epoch.
default_calculate_train_loss_each_epoch = False
default_save_model = False # Saving every 10 epochs
default_resume = False
default_resume_from = ""
# It will always resume from a checkpoint
default_full_checkpointing = False
default_second_lambda = 0.5
default_inner_steps = 10
default_clamping = 1000.0
default_vr_bn_at_recalibration = True
default_variance_reg = 0.01
default_lr_reduction = "150-225"
default_L = 1.0
default_architecture = "default"
default_problem = "cifar10"
# Training settings
parser = argparse.ArgumentParser(description='PyTorch optimization testbed')
parser.add_argument('--problem', type=str, default=default_problem,
help='Problem instance (default: ' + default_problem + ')')
parser.add_argument('--method', type=str, default=default_method,
help='Optimization method (default: ' + default_method + ')')
parser.add_argument('--batch-size', type=int,
default=default_batch_size, metavar='M',
help='minibatch size (default: ' + str(default_batch_size) + ')')
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=default_epochs, metavar='N',
help='number of epochs to train (default: ' + str(default_epochs) + ')')
parser.add_argument('--lr', type=float, default=default_lr, metavar='LR',
help='learning rate (default: ' + str(default_lr) + ')')
parser.add_argument('--momentum', type=float, default=default_momentum,
metavar='M',
help='SGD momentum (default: ' + str(default_momentum) + ')')
parser.add_argument('--decay', type=float, default=default_decay,
metavar='M',
help='SGD weight decay (default: ' + str(default_decay) + ')')
parser.add_argument('--L', type=float, default=default_L,
metavar='L',
help='SGD L estimate (default: ' + str(default_L) + ')')
parser.add_argument('--tail_average', type=float, default=default_tail_average,
help='Use tail averaging of iterates every epoch, with the given tail fraction (default: ' + str(default_tail_average) + ')')
parser.add_argument('--tail_average_all', type=str2bool, default=default_tail_average_all,
help='Apply tail aveaging either to the whole run or just after the first lr reduction (default: ' + str(default_tail_average_all) + ')')
parser.add_argument('--clamping', type=float, default=default_clamping,
metavar='C', help='APS clamping (default: ' + str(default_clamping) + ')')
parser.add_argument('--inner_steps', type=int, default=default_inner_steps, metavar='N',
help='Inner steps for implicit methods (default: ' + str(default_inner_steps) + ')')
parser.add_argument('--vr_from_epoch', type=int, default=default_vr_from_epoch,
help='Start VR (if in use) at this epoch (default: ' + str(default_vr_from_epoch) + ')')
parser.add_argument('--no-cuda', action='store_true', default=disable_cuda,
help='disables CUDA training')
parser.add_argument('--half_precision', type=str2bool, default=default_half_precision,
help='Use half precision (default: ' + str(default_half_precision) + ')')
parser.add_argument('--second_lambda', type=float, default=default_second_lambda,
metavar='D',
help='A second linear interpolation factor used by some algorithms (default: '
+ str(default_second_lambda) + ')')
parser.add_argument('--variance_reg', type=float, default=default_variance_reg,
metavar='D',
help='Added to the variance in reparam to prevent divide by 0 problems (default: '
+ str(default_variance_reg) + ')')
parser.add_argument('--architecture', type=str, default=default_architecture,
help='architecture (default: ' + default_architecture + ')')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--dropout', type=str2bool, default=default_dropout,
help='Use dropout (default: ' + str(default_dropout) + ')')
parser.add_argument('--batchnorm', type=str2bool, default=default_batchnorm,
help='Use batchnorm (default: ' + str(default_batchnorm) + ')')
parser.add_argument('--transform_locking', type=str2bool, default=default_transform_locking,
help='Transform locking: ' + str(default_transform_locking) + ')')
parser.add_argument('--log_diagnostics', type=str2bool, default=default_log_diagnostics,
help='produce and log expensive diagnostics (default: ' + str(default_log_diagnostics) + ')')
parser.add_argument('--log_diagnostics_every_epoch', type=str2bool, default=default_log_diagnostics_every_epoch,
help='do full diagnostics every epoch instead of every 10')
parser.add_argument('--log_diagnostics_deciles', type=str2bool, default=False,
help='full diagnostics at every 10% of the epoch')
parser.add_argument('--log_fast_diagnostics', type=str2bool, default=default_log_fast_diagnostics,
help='produce and log cheap diagnostics (default: ' + str(default_log_fast_diagnostics) + ')')
parser.add_argument('--logfname', type=str, default=default_logfname,
help='Prefix for diagonstic log files (default: ' + str(default_logfname) + ')')
parser.add_argument('--save_model', type=str2bool, default=default_save_model,
help='Save model every 10 epochs (default: ' + str(default_save_model) + ')')
parser.add_argument('--resume', type=str2bool, default=default_resume,
help='Resume from resume_from (default: ' + str(default_resume) + ')')
parser.add_argument('--resume_from', type=str, default=default_resume_from,
help=' Path to saved model (default: ' + str(default_resume_from) + ')')
parser.add_argument('--full_checkpointing', type=str2bool, default=default_full_checkpointing,
help='Writeout and resume from checkpoints (default: ' + str(default_full_checkpointing) + ')')
parser.add_argument('--calculate_train_loss_each_epoch', type=str, default=default_calculate_train_loss_each_epoch,
help=' Do a 2nd pass after each epoch to calculate the training error rate/loss (default: ' + str(default_calculate_train_loss_each_epoch) + ')')
parser.add_argument('--vr_bn_at_recalibration', type=str2bool, default=default_vr_bn_at_recalibration,
help='Use batch norm on the recalibration pass (default: ' + str(default_vr_bn_at_recalibration) + ')')
parser.add_argument('--lr_reduction', type=str, default=default_lr_reduction,
help='Use lr reduction specified (default: ' + str(default_lr_reduction) + ')')
parser.add_argument('--log_interval', type=int, default=default_log_interval, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--per_block', type=str2bool, default=default_per_block,
help='Use per block learning rates (default: ' + str(default_per_block) + ')')
args = parser.parse_args([]) # Don't actually use command line arguments, put from call to function only
# Apply overrides?
args.__dict__.update(args_override)
if isinstance(args, dict):
args = Struct(**args)
#"scsg"
args.opt_vr = opt_vr = (args.method in ["saga", "svrg", "pointsaga", "recompute_svrg", "online_svrg"])
run_name = (args.problem + "-" + args.architecture + "-" +
args.method + "-lr" + str(args.lr) +
"-m" + str(args.momentum) + "-" + "d" + str(args.decay) +
"-epochs" + str(args.epochs) + "bs" +
str(args.batch_size) +
"reduct_" + args.lr_reduction)
if not args.batchnorm:
run_name += "_nobn"
if args.dropout:
run_name += "_dropout"
if args.opt_vr and args.vr_from_epoch != 1:
run_name += "_vr_from_" + str(args.vr_from_epoch)
if not args.vr_bn_at_recalibration:
run_name += "_bn_recal_" + str(args.vr_bn_at_recalibration)
if args.resume:
run_name += "_resume"
if args.seed != 1:
run_name += "seed_" + str(args.seed)
if args.half_precision:
run_name += "_half"
if args.tail_average > 0:
run_name += "_tavg_" + str(args.tail_average)
if args.tail_average_all:
run_name += "_tall"
run_name = run_name.strip().replace('.', '_')
# SETUP LOGGING
root = logging.getLogger()
root.setLevel(logging.INFO)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s | %(message)s')
ch.setFormatter(formatter)
#if 'ch' in locals():
root.addHandler(ch)
############
logging.info("Run " + run_name)
logging.info("#########")
logging.info(args)
args.cuda = not args.no_cuda and torch.cuda.is_available()
logging.info("Using CUDA: {} CUDA AVAIL: {} #DEVICES: {}".format(
args.cuda, torch.cuda.is_available(), torch.cuda.device_count()))
cudnn.benchmark = True
logging.info("Loading data")
train_loader, test_loader, model, train_dataset = problems.load(args)
if hasattr(model, "sampler") and hasattr(model.sampler, "reorder"):
logging.info("NOTE: Consistant batch sampling in use")
if args.cuda:
logging.info("model.cuda")
model.cuda()
logging.info("")
if args.half_precision:
logging.info("Using half precision")
model = model.half()
if args.resume:
# Load
model.load_state_dict(torch.load(args.resume_from, map_location=lambda storage, loc: storage))
model.cuda()
logging.info("Resuming from file: {}".format(args.resume_from))
checkpoint_resume = False
if args.full_checkpointing:
# Look for and load checkpoint model
checkpoint_model_path = checkpoint_dir + "/" + run_name + "_checkpoint.model"
checkpoint_runinfo_path = checkpoint_dir + "/" + run_name + "_info.pkl"
if os.path.exists(checkpoint_model_path):
checkpoint_resume = True
logging.info("Checkpoint found: {}".format(checkpoint_model_path))
model.load_state_dict(torch.load(checkpoint_model_path, map_location=lambda storage, loc: storage))
model.cuda()
with open(checkpoint_runinfo_path, 'rb') as fcheckpoint:
runinfo = pickle.load(fcheckpoint)
if runinfo["epoch"] >= args.epochs:
logging.error("runinfo['epoch']: {} >= args.epochs, checkpoint is past/at end of run".format(runinfo["epoch"]))
return
else:
logging.info("No checkpoint exists, starting a fresh run")
############################
# logging.info some information about the model
logging.info("Model statistics:")
nparams = 0
group_idx = 0
for param in model.parameters():
#import pdb; pdb.set_trace()
group_size = 1
for g in param.size():
group_size *= g
nparams += group_size
group_idx += 1
train_nbatches = len(train_loader)
logging.info("total parameters: {:,}".format(nparams))
logging.info("minibatch size: {}".format(args.batch_size))
logging.info("Rough training dataset size: {:,} number of minibatches: {}".format(
len(train_loader)*args.batch_size, train_nbatches))
logging.info("Rough test dataset size: {:,} number of test minibatches: {}".format(
len(test_loader)*args.batch_size, len(test_loader)))
# Averaging fraction calculation
ntail_batches = int(train_nbatches*args.tail_average)
if ntail_batches == 0:
ntail_batches = 1
ntail_from = train_nbatches - ntail_batches
logging.info("Tail averaging fraction {:.2f} will average {} batches, from batch: {}, tail_average_all: {}".format(
args.tail_average, ntail_batches, ntail_from, args.tail_average_all
))
logging.info("Creating optimizer")
optimizer = optimizers.optimizer(model, args)
criterion = nn.CrossEntropyLoss()
def train(epoch):
model.train()
interval = timer()
start = timer()
start_time = time.time()
time_cuda = 0.0
time_variable = 0.0
time_forward = 0.0
time_backward = 0.0
time_step = 0.0
time_load = 0.0
if args.tail_average > 0.0:
averaged_so_far = 0
# create/zero tail_average storage
for group in optimizer.param_groups:
for p in group['params']:
param_state = optimizer.state[p]
if 'tail_average' not in param_state:
param_state['tail_average'] = p.data.clone().double().zero_()
load_start_time = time.time()
for batch_idx, (data, target) in enumerate(train_loader):
time_load += time.time() - load_start_time
cuda_time = time.time()
if args.cuda:
data, target = data.cuda(), target.cuda(non_blocking=True)
if args.half_precision:
data = data.half()
variable_time = time.time()
time_cuda += variable_time - cuda_time
data, target = Variable(data), Variable(target)
time_variable += time.time() - variable_time
def eval_closure():
nonlocal time_forward
nonlocal time_backward
closure_time = time.time()
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target)
eval_time = time.time()
time_forward += eval_time - closure_time
loss.backward()
time_backward += time.time() - eval_time
return loss
step_start_time = time.time()
if hasattr(optimizer, "step_preds"):
def partial_closure():
optimizer.zero_grad()
output = model(data)
logprobs = log_softmax(output)
return logprobs
loss = optimizer.step_preds(partial_closure, target)
elif opt_vr:
loss = optimizer.step(batch_idx, closure=eval_closure)
else:
loss = optimizer.step(closure=eval_closure)
time_step += time.time() - step_start_time
if args.log_diagnostics and epoch >= args.vr_from_epoch:
if args.method == "svrg":
in_run_diagnostics(epoch, batch_idx, args, train_loader, optimizer, model, criterion)
# Accumulate tail average
if args.tail_average > 0.0:
if batch_idx >= ntail_from:
averaged_so_far += 1
for group in optimizer.param_groups:
for p in group['params']:
param_state = optimizer.state[p]
tail = param_state['tail_average']
# Running mean calculation
tail.add_(1.0/averaged_so_far, p.data.double() - tail)
if batch_idx % args.log_interval == 0:
mid = timer()
percent_done = 100. * batch_idx / len(train_loader)
if percent_done > 0:
time_estimate = math.ceil((mid - start)*(100/percent_done))
time_estimate = str(datetime.timedelta(seconds=time_estimate))
inst_estimate = math.ceil((mid - interval)*(len(train_loader)/args.log_interval))
inst_estimate = str(datetime.timedelta(seconds=inst_estimate))
else:
time_estimate = "unknown"
inst_estimate = "unknown"
logging.info('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}, time: {} inst: {}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.data.item(), time_estimate, inst_estimate))
if False:
since_start = time.time()
logging.info("load: {:.3f}, cuda: {:.3f}, variable: {:.3f}, forward: {:.3f}, backward: {:.3f}, step: {:.3f}, step-clo: {:.3f}, sum: {}, actual: {}".format(
time_load, time_cuda, time_variable, time_forward, time_backward, time_step, time_step - time_forward - time_backward,
time_load + time_cuda + time_variable + time_step, since_start - start_time
))
time_cuda = 0.0
time_variable = 0.0
time_forward = 0.0
time_backward = 0.0
time_step = 0.0
time_load = 0.0
interval = timer()
load_start_time = time.time()
if args.tail_average > 0.0:
if averaged_so_far != ntail_batches:
raise Exception("Off by one: {}, {}".format(averaged_so_far, ntail_batches))
current_lr = optimizer.param_groups[0]['lr']
if args.tail_average_all or args.lr != current_lr:
logging.info("Setting x to tail average ({}), current_lr: {}".format(
args.tail_average, current_lr))
for group in optimizer.param_groups:
for p in group['params']:
param_state = optimizer.state[p]
tail = param_state['tail_average']
p.data.zero_().add_(tail.type_as(p.data))
if hasattr(model, "sampler") and hasattr(model.sampler, "reorder"):
model.sampler.reorder()
if hasattr(train_dataset, "retransform"):
logging.info("retransform")
train_dataset.retransform()
def loss_stats(epoch, loader, setname):
model.eval()
loss = 0.0
correct = 0.0
for data, target in loader:
if args.cuda:
data, target = data.cuda(), target.cuda()
if args.half_precision:
data = data.half()
data, target = Variable(data, volatile=True), Variable(target)
output = model(data)
loss += criterion(output, target).data.item()
pred = output.data.max(1)[1] # index of the max log-probability
correct += pred.eq(target.data).cpu().sum().float().item()
loss /= len(loader) # loss function already averages over batch size
error_rate = 100.0 * correct / len(loader.dataset)
#pdb.set_trace()
logging.info('{} set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)'.format(
setname, loss, correct, len(loader.dataset),
error_rate))
return (loss, error_rate)
# Crate directory for saving model if needed
problem_dir = run_dir + "/" + args.problem
if not os.path.exists(run_dir):
os.makedirs(run_dir)
if not os.path.exists(problem_dir):
os.makedirs(problem_dir)
if not checkpoint_resume:
runinfo = vars(args)
runinfo["train_losses"] = []
runinfo["train_errors"] = []
runinfo["test_losses"] = []
runinfo["test_errors"] = []
runinfo["nparams"] = nparams
runinfo["ndatapoints"] = len(train_loader)*args.batch_size
runinfo["nminibatches"] = len(train_loader)
runinfo["epoch"] = 0
else:
# When resuming
if hasattr(optimizer, "recalibrate"):
logging.info("Recalibrate for restart, epoch: {}".format(runinfo["epoch"]))
seed = runinfo["seed"] + 1031*runinfo["epoch"]
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
recalibrate(runinfo["epoch"], args, train_loader, test_loader, model, train_dataset, optimizer, criterion)
for epoch in range(runinfo["epoch"]+1, args.epochs + 1):
runinfo["epoch"] = epoch
logging.info("Starting epoch {}".format(epoch))
seed = runinfo["seed"] + 1031*epoch
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
if epoch == 1 and hasattr(optimizer, "recalibrate"):
recalibrate(epoch, args, train_loader, test_loader, model, train_dataset, optimizer, criterion)
if args.lr_reduction == "default":
lr = args.lr * (0.1 ** (epoch // 75))
elif args.lr_reduction == "none" or args.lr_reduction == "False":
lr = args.lr
elif args.lr_reduction == "150":
lr = args.lr * (0.1 ** (epoch // 150))
elif args.lr_reduction == "150-225":
lr = args.lr * (0.1 ** (epoch // 150)) * (0.1 ** (epoch // 225))
elif args.lr_reduction == "up5x-20-down150":
if epoch < 20:
lr = args.lr
else:
lr = 3.0 * args.lr * (0.1 ** (epoch // 150))
elif args.lr_reduction == "up30-150-225":
if epoch < 30:
lr = args.lr
else:
lr = 3.0 * args.lr * (0.1 ** (epoch // 150)) * (0.1 ** (epoch // 225))
elif args.lr_reduction == "every30":
lr = args.lr * (0.1 ** (epoch // 30))
else:
logging.info("Lr scheme not recognised: {}".format(args.lr_reduction))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
logging.info("Learning rate: {}".format(lr))
start = timer()
if args.method == "scsg":
train_scsg(epoch, args, train_loader, test_loader, model, train_dataset, optimizer, criterion)
else:
train(epoch)
end = timer()
logging.info("Epoch took: {}".format(end-start))
logging.info("")
if args.calculate_train_loss_each_epoch:
(train_loss, train_err) = loss_stats(epoch, train_loader, "Train") #test(epoch)
else:
train_loss = 0
train_err = 0
runinfo["train_losses"].append(train_loss)
runinfo["train_errors"].append(train_err)
(test_loss, test_err) = loss_stats(epoch, test_loader, "Test") #test(epoch)
runinfo["test_losses"].append(test_loss)
runinfo["test_errors"].append(test_err)
logging.info("")
if args.log_fast_diagnostics and hasattr(optimizer, "store_old_table"):
logging.info("Storing old table")
optimizer.store_old_table()
if hasattr(optimizer, "recalibrate"):
recalibrate(epoch+1, args, train_loader, test_loader, model, train_dataset, optimizer, criterion)
if False: # Only works for recompute_svrg I think
batchnorm_diagnostics(epoch, args, train_loader, optimizer, model)
if epoch >= args.vr_from_epoch and args.log_fast_diagnostics and hasattr(optimizer, "epoch_diagnostics"):
optimizer.epoch_diagnostics(train_loss, train_err, test_loss, test_err)
# Ocassionally save out the model.
if args.save_model and epoch % 10 == 0:
logging.info("Saving model ...")
model_dir = problem_dir + "/model_" + run_name
if not os.path.exists(model_dir):
os.makedirs(model_dir)
model_fname = "{}/epoch_{}.model".format(model_dir, epoch)
torch.save(model.state_dict(), model_fname)
logging.info("Saved model {}".format(model_fname))
out_fname = problem_dir + "/" + run_name + '_partial.pkl'
with open(out_fname, 'wb') as output:
pickle.dump(runinfo, output)
print("Saved partial: {}".format(out_fname))
if args.full_checkpointing:
checkpoint_model_path_tmp = checkpoint_model_path + ".tmp"
logging.info("Saving checkpoint model ...")
torch.save(model.state_dict(), checkpoint_model_path_tmp)
os.rename(checkpoint_model_path_tmp, checkpoint_model_path)
logging.info("Saved {}".format(checkpoint_model_path))
checkpoint_runinfo_path_tmp = checkpoint_runinfo_path + ".tmp"
with open(checkpoint_runinfo_path_tmp, 'wb') as output:
pickle.dump(runinfo, output)
os.rename(checkpoint_runinfo_path_tmp, checkpoint_runinfo_path)
print("Saved runinfo: {}".format(checkpoint_runinfo_path))
if True:
if args.method == "reparm":
optimizer.print_diagnostics()
out_fname = problem_dir + "/" + run_name + '_final.pkl'
with open(out_fname, 'wb') as output:
pickle.dump(runinfo, output)
print("Saved {}".format(out_fname))
if __name__ == "__main__":
run()
|
deep-variance-reduction-main
|
run.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from torch.optim.optimizer import Optimizer, required
import torch
import pdb
import pickle
import math
import logging
class SCSG(Optimizer):
def __init__(self, params, args, nbatches, model, vr_bn_at_recalibration,
vr_from_epoch,
lr=required, momentum=required, weight_decay=required):
self.nbatches = nbatches
self.batches_processed = 0
self.epoch = 0
self.vr_bn_at_recalibration = vr_bn_at_recalibration
self.vr_from_epoch = vr_from_epoch
self.model = model
defaults = dict(lr=lr, momentum=momentum, weight_decay=weight_decay)
self.recompute_version = False
self.megabatch_size = 10 # number of minibatches in a megabatch
self.recalibration_interval = 10
self.recalibration_i = 0
self.interval_i = 0
self.stats_buffered = False
if self.megabatch_size != self.recalibration_interval:
raise Exception("megabatch_size != recalibration_interval not supported yet")
self.test_name = args.logfname
self.running_tmp = {}
self.running_interp = 0.9
super(SCSG, self).__init__(params, defaults)
def __setstate__(self, state):
super(SCSG, self).__setstate__(state)
def initialize(self):
for group in self.param_groups:
for p in group['params']:
momentum = group['momentum']
param_state = self.state[p]
if 'gavg' not in param_state:
param_state['gavg'] = p.data.clone().zero_()
param_state['gavg_debug'] = p.data.clone().zero_()
param_state['full_grad'] = p.data.clone().zero_()
param_state['gi'] = p.data.clone().zero_()
if not self.recompute_version:
gsize = p.data.size()
gtbl_size = torch.Size([self.megabatch_size] + list(gsize))
param_state['gktbl'] = torch.zeros(gtbl_size).cuda()
# m2 is the running gradient variance accumulator
param_state['m2'] = p.data.clone().zero_()
param_state['grad_running_cov'] = p.data.clone().zero_()
param_state['grad_running_var'] = p.data.clone().zero_()
param_state['grad_running_mean'] = p.data.clone().zero_()
if momentum != 0:
if 'momentum_buffer' not in param_state:
buf = param_state['momentum_buffer'] = p.data.clone().zero_()
if 'tilde_x' not in param_state:
param_state['tilde_x'] = p.data.clone()
param_state['xk'] = p.data.clone()
state = self.model.state_dict()
# Batch norm's activation running_mean/var variables
for skey in state.keys():
if skey.endswith(".running_mean") or skey.endswith(".running_var"):
self.running_tmp[skey] = state[skey].clone()
logging.info("running: {}".format(self.running_tmp.keys()))
def recalibrate_start(self):
""" Part of the recalibration pass with SVRG.
Stores the gradients for later use. We don't do anything in online_svrg
"""
print("Recalibrate_start called")
self.epoch += 1
self.initialize()
gi_var = [0.0 for i in range(self.recalibration_interval)]
vr_var = [0.0 for i in range(self.recalibration_interval)]
if self.stats_buffered:
# Write out any variance statistics
for group in self.param_groups:
for p in group['params']:
param_state = self.state[p]
#fg = param_state['full_grad']
#fg.zero_() # reset the full gradient
for i in range(self.recalibration_interval):
gi_var[i] += param_state["gi_var_acum"][i].sum()/self.recalibration_interval
vr_var[i] += param_state["vr_var_acum"][i].sum()/self.recalibration_interval
fname = 'stats/{}_scsg_{}.pkl'.format(self.test_name, self.epoch)
with open(fname, 'wb') as output:
pickle.dump({
'gi_var': gi_var,
'vr_var': vr_var,
'epoch': self.epoch,
}, output)
#self.gradient_variances = []
#self.vr_step_variances = []
#self.batch_indices = []
self.stats_buffered = False
print("logging pass diagnostics saved to {}".format(fname))
def recalibrate(self, batch_id, closure):
""" SCSG doesn't use recalibration passes.
"""
return 0.0
def epoch_diagnostics(self, train_loss, train_err, test_loss, test_err):
"""
Called after recalibrate, saves stats out to disk.
"""
def step_outer_part(self, closure, idx):
self.store_running_mean()
loss = closure()
for group in self.param_groups:
for p in group['params']:
gk = p.grad.data
param_state = self.state[p]
gavg = param_state['gavg']
gavg_debug = param_state['gavg_debug']
if self.recalibration_i == 0:
param_state['tilde_x'].zero_().add_(p.data)
gavg.zero_()
gavg_debug.zero_()
param_state['m2'].zero_()
gavg_debug.add_(1.0/self.megabatch_size, gk)
if not self.recompute_version:
param_state['gktbl'][idx, :] = gk
m2 = param_state['m2']
# Online mean/variance calcuation from wikipedia
delta = gk - gavg
gavg.add_(1.0/(self.recalibration_i+1), delta)
#if self.recalibration_i == 4:
# pdb.set_trace()
delta2 = gk - gavg
m2.add_(delta*delta2)
self.restore_running_mean()
self.recalibration_i += 1
#self.batches_processed += 1
return loss
def step_inner_part(self, closure, idx):
# Check a few things:
if self.recalibration_i != self.megabatch_size:
raise Exception("bad self.recalibration_i: {}".format(self.recalibration_i))
if self.recompute_version:
self.store_running_mean()
## Store current xk, replace with x_tilde
for group in self.param_groups:
for p in group['params']:
param_state = self.state[p]
xk = param_state['xk']
xk.zero_().add_(p.data)
p.data.zero_().add_(param_state['tilde_x'])
## Compute gradient at x_tilde
closure()
## Store x_tilde gradient in gi, and revert to xk
for group in self.param_groups:
for p in group['params']:
param_state = self.state[p]
xk = param_state['xk']
param_state['gi'].zero_().add_(p.grad.data)
p.data.zero_().add_(xk)
self.restore_running_mean()
# JUST FOR DEBUGGING
if False:
for group in self.param_groups:
for p in group['params']:
param_state = self.state[p]
gi = param_state['gi']
gi_tbl = param_state['gktbl'][idx, :]
#pdb.set_trace()
if torch.norm(gi-gi_tbl) > 1e-6:
print("difference: {}".format( torch.norm(gi-gi_tbl)))
pdb.set_trace()
else:
for group in self.param_groups:
for p in group['params']:
param_state = self.state[p]
param_state['gi'] = param_state['gktbl'][idx, :]
## compute gradient at xk
loss = closure()
for group in self.param_groups:
momentum = group['momentum']
weight_decay = group['weight_decay']
learning_rate = group['lr']
for p in group['params']:
gk = p.grad.data
param_state = self.state[p]
gi = param_state['gi']
gavg = param_state['gavg']
#gavg_debug = param_state['gavg_debug']
if momentum != 0:
buf = param_state['momentum_buffer']
#########
if self.epoch < self.vr_from_epoch:
vr_gradient = gk.clone() # Just do sgd steps
else:
vr_gradient = gk.clone().sub_(gi).add_(gavg)
# Track the running mean and variance of the gradients.
grad_running_mean = param_state['grad_running_mean']
grad_running_var = param_state['grad_running_var']
grad_running_cov = param_state['grad_running_cov']
cov_update = (gk - grad_running_mean)*(gi - gavg)
grad_running_cov.mul_(self.running_interp).add_(1-self.running_interp, cov_update)
# Using delta from before and after the mean update is apparently the
# best way to update variances.
delta1 = gk - grad_running_mean
grad_running_mean.mul_(self.running_interp).add_(1-self.running_interp, gk)
delta2 = gk - grad_running_mean
var_update = delta1*delta2
grad_running_var.mul_(self.running_interp).add_(1-self.running_interp, var_update)
#if torch.norm(gavg-gavg_debug) > 1e-7:
# raise Exception("gavg norm diff: {}".format(torch.norm(gavg-gavg_debug)))
if weight_decay != 0:
vr_gradient.add_(weight_decay, p.data)
if momentum != 0:
dampening = 0.0
vr_gradient = buf.mul_(momentum).add_(1 - dampening, vr_gradient)
# Take step.
p.data.add_(-learning_rate, vr_gradient)
# track number of minibatches seen
#logging.info("interval i: {}".format(self.interval_i))
self.batches_processed += 1
if self.batches_processed % 20 == 0 and self.batches_processed > 0:
running_cov_acum = 0.0
m2_acum = 0.0
var_acum = 0.0
for group in self.param_groups:
for p in group['params']:
param_state = self.state[p]
grad_running_cov = param_state['grad_running_cov']
grad_running_var = param_state['grad_running_var']
m2 = param_state['m2']
running_cov_acum += grad_running_cov.sum()
var_acum += grad_running_var.sum()
# m2 is not stored normalized by self.nbatches
m2_norm = m2.div(self.megabatch_size)
m2_acum += m2_norm.sum()
if m2_acum > 0:
cov_var_ratio = running_cov_acum/m2_acum
vr_variance = var_acum + m2_acum - 2*running_cov_acum
vr_ratio = vr_variance/var_acum
corr_coef = running_cov_acum/math.sqrt(var_acum*m2_acum)
logging.info("VR RATIO: {:.3f}. Raw cov/var: {:.3f}, correlation coef: {:.3f}. Var: {:.3f} m2: {:.3f} cov: {:.3f}".format(
vr_ratio, cov_var_ratio, corr_coef, var_acum, m2_acum, running_cov_acum))
return loss
###############################################
###############################################
def full_grad_init(self):
for group in self.param_groups:
for p in group['params']:
param_state = self.state[p]
fg = param_state['full_grad']
fg.zero_()
def full_grad_calc(self, closure):
loss = closure()
for group in self.param_groups:
for p in group['params']:
gk = p.grad.data
param_state = self.state[p]
fg = param_state['full_grad']
fg.add_(1.0/self.nbatches, gk)
def logging_pass_start(self):
self.recalibration_i = 0
self.interval_i = 0
self.stats_buffered = True
for group in self.param_groups:
for p in group['params']:
param_state = self.state[p]
pgz = p.grad.data.clone().zero_()
#param_state["gi_mean_acum"] = []
param_state["gi_var_acum"] = []
#param_state["vr_mean_acum"] = []
param_state["vr_var_acum"] = []
for i in range(self.recalibration_interval):
#param_state["gi_mean_acum"].append(pgz.clone())
param_state["gi_var_acum"].append(pgz.clone())
#param_state["vr_mean_acum"].append(pgz.clone())
param_state["vr_var_acum"].append(pgz.clone())
def logging_pass(self, interval, closure):
self.store_running_mean()
## Store current xk, replace with x_tilde
for group in self.param_groups:
for p in group['params']:
param_state = self.state[p]
xk = param_state['xk']
xk.zero_().add_(p.data)
p.data.zero_().add_(param_state['tilde_x'])
## Compute gradient at x_tilde
closure()
## Store x_tilde gradient in gi, and revert to xk
for group in self.param_groups:
for p in group['params']:
param_state = self.state[p]
xk = param_state['xk']
param_state['gi'].zero_().add_(p.grad.data)
p.data.zero_().add_(xk)
# Restore running_mean/var
self.restore_running_mean()
## compute gradient at xk
loss = closure()
for group in self.param_groups:
momentum = group['momentum']
weight_decay = group['weight_decay']
learning_rate = group['lr']
for p in group['params']:
gk = p.grad.data
param_state = self.state[p]
full_grad = param_state['full_grad']
gi = param_state['gi']
gavg = param_state['gavg']
vr_gradient = gk.clone().sub_(gi).add_(gavg)
# Online mean/variance calcuation
delta = gk - full_grad
param_state["gi_var_acum"][interval].add_(delta*delta)
# var version
delta = vr_gradient - full_grad
param_state["vr_var_acum"][interval].add_(delta*delta)
return loss
def store_running_mean(self):
# Store running_mean/var temporarily
state = self.model.state_dict()
for skey in self.running_tmp.keys():
self.running_tmp[skey].zero_().add_(state[skey])
def restore_running_mean(self):
state = self.model.state_dict()
for skey in self.running_tmp.keys():
state[skey].zero_().add_(self.running_tmp[skey])
|
deep-variance-reduction-main
|
scsg.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from torch.optim.optimizer import Optimizer, required
import torch
import pdb
import pickle
import math
import logging
class RecomputeSVRG(Optimizer):
r"""Implements the standard SVRG method
"""
def __init__(self, params, nbatches, model, vr_bn_at_recalibration,
vr_from_epoch,
lr=required, momentum=required, weight_decay=required):
self.nbatches = nbatches
self.batches_processed = 0
self.epoch = 0
self.vr_bn_at_recalibration = vr_bn_at_recalibration
self.vr_from_epoch = vr_from_epoch
self.model = model
self.running_tmp = {}
defaults = dict(lr=lr, momentum=momentum, weight_decay=weight_decay)
super(RecomputeSVRG, self).__init__(params, defaults)
def __setstate__(self, state):
super(RecomputeSVRG, self).__setstate__(state)
def initialize(self):
for group in self.param_groups:
for p in group['params']:
momentum = group['momentum']
param_state = self.state[p]
if 'gavg' not in param_state:
param_state['gavg'] = p.data.double().clone().zero_()
param_state['gi'] = p.data.clone().zero_()
param_state['gi_debug'] = p.data.clone().zero_()
if momentum != 0:
if 'momentum_buffer' not in param_state:
buf = param_state['momentum_buffer'] = p.data.clone().zero_()
if 'tilde_x' not in param_state:
param_state['tilde_x'] = p.data.clone()
param_state['xk'] = p.data.clone()
# Batch norm's activation running_mean/var variables
state = self.model.state_dict()
for skey in state.keys():
if skey.endswith(".running_mean") or skey.endswith(".running_var"):
self.running_tmp[skey] = state[skey].clone()
def recalibrate_start(self):
""" Part of the recalibration pass with SVRG.
Stores the gradients for later use.
"""
self.epoch += 1
self.recal_calls = 0
self.initialize()
self.store_running_mean()
print("Recal epoch: {}".format(self.epoch))
if self.epoch >= self.vr_from_epoch:
for group in self.param_groups:
for p in group['params']:
param_state = self.state[p]
gavg = param_state['gavg']
gavg.zero_()
tilde_x = param_state['tilde_x']
tilde_x.zero_().add_(p.data.clone())
#pdb.set_trace()
else:
logging.info("Skipping recalibration as epoch {} not >= {}".format(
self.epoch, self.vr_from_epoch))
def recalibrate(self, batch_id, closure):
""" Compute part of the full batch gradient, from one minibatch
"""
loss = closure()
if self.epoch >= self.vr_from_epoch:
self.recal_calls += 1
for group in self.param_groups:
for p in group['params']:
gk = p.grad.data
param_state = self.state[p]
gavg = param_state['gavg']
gavg.add_(1.0/self.nbatches, gk.double())
return loss
def recalibrate_end(self):
self.restore_running_mean()
if self.recal_calls != self.nbatches:
raise Exception("recalibrate_end called, with {} nbatches: {}".format(
self.recal_calls, self.nbatches))
def epoch_diagnostics(self, train_loss, train_err, test_loss, test_err):
"""
Called after recalibrate, saves stats out to disk.
"""
def step(self, batch_id, closure):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
if self.epoch >= self.vr_from_epoch:
self.store_running_mean()
## Store current xk, replace with x_tilde
for group in self.param_groups:
for p in group['params']:
param_state = self.state[p]
xk = param_state['xk']
xk.zero_().add_(p.data)
p.data.zero_().add_(param_state['tilde_x'])
# Standard is vr_bn_at_recalibration=True, so this doesn't fire.
if not self.vr_bn_at_recalibration:
self.model.eval() # turn off batch norm
## Compute gradient at x_tilde
closure()
## Store x_tilde gradient in gi, and revert to xk
for group in self.param_groups:
for p in group['params']:
param_state = self.state[p]
xk = param_state['xk']
gi = param_state['gi']
gi.zero_().add_(p.grad.data)
p.data.zero_().add_(xk)
# Make sure batchnorm is handled correctly.
self.restore_running_mean()
## compute gradient at xk
loss = closure()
for group in self.param_groups:
momentum = group['momentum']
weight_decay = group['weight_decay']
learning_rate = group['lr']
for p in group['params']:
gk = p.grad.data
param_state = self.state[p]
gi = param_state['gi']
gavg = param_state['gavg']
if momentum != 0:
buf = param_state['momentum_buffer']
#########
if self.epoch >= self.vr_from_epoch:
vr_gradient = gk.clone().sub_(gi).add_(gavg.type_as(gk))
else:
vr_gradient = gk.clone() # Just do sgd steps
if weight_decay != 0:
vr_gradient.add_(weight_decay, p.data)
if momentum != 0:
dampening = 0.0
vr_gradient = buf.mul_(momentum).add_(1 - dampening, vr_gradient)
# Take step.
p.data.add_(-learning_rate, vr_gradient)
# track number of minibatches seen
self.batches_processed += 1
return loss
def store_running_mean(self):
# Store running_mean/var temporarily
state = self.model.state_dict()
#pdb.set_trace()
for skey in self.running_tmp.keys():
self.running_tmp[skey].zero_().add_(state[skey])
def restore_running_mean(self):
state = self.model.state_dict()
for skey in self.running_tmp.keys():
state[skey].zero_().add_(self.running_tmp[skey])
|
deep-variance-reduction-main
|
recompute_svrg.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.multiprocessing as multiprocessing
#from torch._C import _update_worker_pids, \
# _remove_worker_pids, _error_if_any_worker_fails
#from .sampler import SequentialSampler, RandomSampler, BatchSampler
from torch.utils.data.sampler import SequentialSampler, RandomSampler, BatchSampler
import signal
import functools
import collections
import re
import sys
import traceback
import threading
from torch._six import string_classes, int_classes
if sys.version_info[0] == 2:
import Queue as queue
else:
import queue
_use_shared_memory = False
"""Whether to use shared memory in default_collate"""
class ExceptionWrapper(object):
"Wraps an exception plus traceback to communicate across threads"
def __init__(self, exc_info):
self.exc_type = exc_info[0]
self.exc_msg = "".join(traceback.format_exception(*exc_info))
def _worker_loop(dataset, index_queue, data_queue, collate_fn, seed, init_fn, worker_id):
global _use_shared_memory
_use_shared_memory = True
# Intialize C side signal handlers for SIGBUS and SIGSEGV. Python signal
# module's handlers are executed after Python returns from C low-level
# handlers, likely when the same fatal signal happened again already.
# https://docs.python.org/3/library/signal.html Sec. 18.8.1.1
#_set_worker_signal_handlers()
torch.set_num_threads(1)
torch.manual_seed(seed)
if init_fn is not None:
init_fn(worker_id)
while True:
r = index_queue.get()
if r is None:
break
idx, batch_indices = r
try:
samples = collate_fn([dataset[i] for i in batch_indices])
except Exception:
data_queue.put((idx, ExceptionWrapper(sys.exc_info())))
else:
data_queue.put((idx, samples))
def _worker_manager_loop(in_queue, out_queue, done_event, pin_memory, device_id):
if pin_memory:
torch.cuda.set_device(device_id)
while True:
try:
r = in_queue.get()
except Exception:
if done_event.is_set():
return
raise
if r is None:
break
if isinstance(r[1], ExceptionWrapper):
out_queue.put(r)
continue
idx, batch = r
try:
if pin_memory:
batch = pin_memory_batch(batch)
except Exception:
out_queue.put((idx, ExceptionWrapper(sys.exc_info())))
else:
out_queue.put((idx, batch))
numpy_type_map = {
'float64': torch.DoubleTensor,
'float32': torch.FloatTensor,
'float16': torch.HalfTensor,
'int64': torch.LongTensor,
'int32': torch.IntTensor,
'int16': torch.ShortTensor,
'int8': torch.CharTensor,
'uint8': torch.ByteTensor,
}
def default_collate(batch):
"Puts each data field into a tensor with outer dimension batch size"
error_msg = "batch must contain tensors, numbers, dicts or lists; found {}"
elem_type = type(batch[0])
if torch.is_tensor(batch[0]):
out = None
if _use_shared_memory:
# If we're in a background process, concatenate directly into a
# shared memory tensor to avoid an extra copy
numel = sum([x.numel() for x in batch])
storage = batch[0].storage()._new_shared(numel)
out = batch[0].new(storage)
return torch.stack(batch, 0, out=out)
elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
and elem_type.__name__ != 'string_':
elem = batch[0]
if elem_type.__name__ == 'ndarray':
# array of string classes and object
if re.search('[SaUO]', elem.dtype.str) is not None:
raise TypeError(error_msg.format(elem.dtype))
return torch.stack([torch.from_numpy(b) for b in batch], 0)
if elem.shape == (): # scalars
py_type = float if elem.dtype.name.startswith('float') else int
return numpy_type_map[elem.dtype.name](list(map(py_type, batch)))
elif isinstance(batch[0], int_classes):
return torch.LongTensor(batch)
elif isinstance(batch[0], float):
return torch.DoubleTensor(batch)
elif isinstance(batch[0], string_classes):
return batch
elif isinstance(batch[0], collections.Mapping):
return {key: default_collate([d[key] for d in batch]) for key in batch[0]}
elif isinstance(batch[0], collections.Sequence):
transposed = zip(*batch)
return [default_collate(samples) for samples in transposed]
raise TypeError((error_msg.format(type(batch[0]))))
def pin_memory_batch(batch):
if torch.is_tensor(batch):
return batch.pin_memory()
elif isinstance(batch, string_classes):
return batch
elif isinstance(batch, collections.Mapping):
return {k: pin_memory_batch(sample) for k, sample in batch.items()}
elif isinstance(batch, collections.Sequence):
return [pin_memory_batch(sample) for sample in batch]
else:
return batch
_SIGCHLD_handler_set = False
"""Whether SIGCHLD handler is set for DataLoader worker failures. Only one
handler needs to be set for all DataLoaders in a process."""
def _set_SIGCHLD_handler():
if sys.platform == 'win32': # Windows doesn't support SIGCHLD handler
return
global _SIGCHLD_handler_set
if _SIGCHLD_handler_set:
return
previous_handler = signal.getsignal(signal.SIGCHLD)
if not callable(previous_handler):
previous_handler = None
def handler(signum, frame):
# This following call uses `waitid` with WNOHANG from C side. Therefore,
# Python can still get and update the process status successfully.
#_error_if_any_worker_fails()
if previous_handler is not None:
previous_handler(signum, frame)
signal.signal(signal.SIGCHLD, handler)
_SIGCHLD_handler_set = True
class DataLoaderIter(object):
"Iterates once over the DataLoader's dataset, as specified by the sampler"
def __init__(self, loader):
self.dataset = loader.dataset
self.collate_fn = loader.collate_fn
self.batch_sampler = loader.batch_sampler
self.num_workers = loader.num_workers
self.pin_memory = loader.pin_memory and torch.cuda.is_available()
self.timeout = loader.timeout
self.done_event = threading.Event()
self.sample_iter = iter(self.batch_sampler)
if self.num_workers > 0:
self.worker_init_fn = loader.worker_init_fn
self.index_queue = multiprocessing.SimpleQueue()
self.worker_result_queue = multiprocessing.SimpleQueue()
self.batches_outstanding = 0
self.worker_pids_set = False
self.shutdown = False
self.send_idx = 0
self.rcvd_idx = 0
self.reorder_dict = {}
base_seed = torch.LongTensor(1).random_()[0]
self.workers = [
multiprocessing.Process(
target=_worker_loop,
args=(self.dataset, self.index_queue, self.worker_result_queue, self.collate_fn,
base_seed + i, self.worker_init_fn, i))
for i in range(self.num_workers)]
if self.pin_memory or self.timeout > 0:
self.data_queue = queue.Queue()
self.worker_manager_thread = threading.Thread(
target=_worker_manager_loop,
args=(self.worker_result_queue, self.data_queue, self.done_event, self.pin_memory,
torch.cuda.current_device()))
self.worker_manager_thread.daemon = True
self.worker_manager_thread.start()
else:
self.data_queue = self.worker_result_queue
for w in self.workers:
w.daemon = True # ensure that the worker exits on process exit
w.start()
#_update_worker_pids(id(self), tuple(w.pid for w in self.workers))
_set_SIGCHLD_handler()
self.worker_pids_set = True
# prime the prefetch loop
for _ in range(2 * self.num_workers):
self._put_indices()
def __len__(self):
return len(self.batch_sampler)
def _get_batch(self):
if self.timeout > 0:
try:
return self.data_queue.get(True, self.timeout)
except queue.Empty:
raise RuntimeError('DataLoader timed out after {} seconds'.format(self.timeout))
else:
return self.data_queue.get()
def __next__(self):
if self.num_workers == 0: # same-process loading
indices = next(self.sample_iter) # may raise StopIteration
batch = self.collate_fn([self.dataset[i] for i in indices])
if self.pin_memory:
batch = pin_memory_batch(batch)
return batch
# check if the next sample has already been generated
if self.rcvd_idx in self.reorder_dict:
batch = self.reorder_dict.pop(self.rcvd_idx)
return self._process_next_batch(batch)
if self.batches_outstanding == 0:
self._shutdown_workers()
raise StopIteration
while True:
assert (not self.shutdown and self.batches_outstanding > 0)
idx, batch = self._get_batch()
self.batches_outstanding -= 1
if idx != self.rcvd_idx:
# store out-of-order samples
self.reorder_dict[idx] = batch
continue
return self._process_next_batch(batch)
next = __next__ # Python 2 compatibility
def __iter__(self):
return self
def _put_indices(self):
assert self.batches_outstanding < 2 * self.num_workers
indices = next(self.sample_iter, None)
if indices is None:
return
self.index_queue.put((self.send_idx, indices))
self.batches_outstanding += 1
self.send_idx += 1
def _process_next_batch(self, batch):
self.rcvd_idx += 1
self._put_indices()
if isinstance(batch, ExceptionWrapper):
raise batch.exc_type(batch.exc_msg)
return batch
def __getstate__(self):
# TODO: add limited pickling support for sharing an iterator
# across multiple threads for HOGWILD.
# Probably the best way to do this is by moving the sample pushing
# to a separate thread and then just sharing the data queue
# but signalling the end is tricky without a non-blocking API
raise NotImplementedError("DataLoaderIterator cannot be pickled")
def _shutdown_workers(self):
if not self.shutdown:
self.shutdown = True
self.done_event.set()
# if worker_manager_thread is waiting to put
while not self.data_queue.empty():
self.data_queue.get()
for _ in self.workers:
self.index_queue.put(None)
# done_event should be sufficient to exit worker_manager_thread, but
# be safe here and put another None
self.worker_result_queue.put(None)
if self.worker_pids_set:
#_remove_worker_pids(id(self))
self.worker_pids_set = False
def __del__(self):
if self.num_workers > 0:
self._shutdown_workers()
class DataLoader(object):
"""
Data loader. Combines a dataset and a sampler, and provides
single- or multi-process iterators over the dataset.
Arguments:
dataset (Dataset): dataset from which to load the data.
batch_size (int, optional): how many samples per batch to load
(default: 1).
shuffle (bool, optional): set to ``True`` to have the data reshuffled
at every epoch (default: False).
sampler (Sampler, optional): defines the strategy to draw samples from
the dataset. If specified, ``shuffle`` must be False.
batch_sampler (Sampler, optional): like sampler, but returns a batch of
indices at a time. Mutually exclusive with batch_size, shuffle,
sampler, and drop_last.
num_workers (int, optional): how many subprocesses to use for data
loading. 0 means that the data will be loaded in the main process.
(default: 0)
collate_fn (callable, optional): merges a list of samples to form a mini-batch.
pin_memory (bool, optional): If ``True``, the data loader will copy tensors
into CUDA pinned memory before returning them.
drop_last (bool, optional): set to ``True`` to drop the last incomplete batch,
if the dataset size is not divisible by the batch size. If ``False`` and
the size of dataset is not divisible by the batch size, then the last batch
will be smaller. (default: False)
timeout (numeric, optional): if positive, the timeout value for collecting a batch
from workers. Should always be non-negative. (default: 0)
worker_init_fn (callable, optional): If not None, this will be called on each
worker subprocess with the worker id as input, after seeding and before data
loading. (default: None)
.. note:: By default, each worker will have its PyTorch seed set to
``base_seed + worker_id``, where ``base_seed`` is a long generated
by main process using its RNG. You may use ``torch.initial_seed()`` to access
this value in :attr:`worker_init_fn`, which can be used to set other seeds
(e.g. NumPy) before data loading.
.. warning:: If ``spawn'' start method is used, :attr:`worker_init_fn` cannot be an
unpicklable object, e.g., a lambda function.
"""
def __init__(self, dataset, batch_size=1, shuffle=False, sampler=None, batch_sampler=None,
num_workers=0, collate_fn=default_collate, pin_memory=False, drop_last=False,
timeout=0, worker_init_fn=None):
self.dataset = dataset
self.batch_size = batch_size
self.num_workers = num_workers
self.collate_fn = collate_fn
self.pin_memory = pin_memory
self.drop_last = drop_last
self.timeout = timeout
self.worker_init_fn = worker_init_fn
if timeout < 0:
raise ValueError('timeout option should be non-negative')
if batch_sampler is not None:
if batch_size > 1 or shuffle or sampler is not None or drop_last:
raise ValueError('batch_sampler is mutually exclusive with '
'batch_size, shuffle, sampler, and drop_last')
if sampler is not None and shuffle:
raise ValueError('sampler is mutually exclusive with shuffle')
if self.num_workers < 0:
raise ValueError('num_workers cannot be negative; '
'use num_workers=0 to disable multiprocessing.')
if batch_sampler is None:
if sampler is None:
if shuffle:
sampler = RandomSampler(dataset)
else:
sampler = SequentialSampler(dataset)
batch_sampler = BatchSampler(sampler, batch_size, drop_last)
self.sampler = sampler
self.batch_sampler = batch_sampler
def __iter__(self):
return DataLoaderIter(self)
def __len__(self):
return len(self.batch_sampler)
|
deep-variance-reduction-main
|
UpdatedDataLoaderMult.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import print_function
import argparse
import pickle
import os
from timeit import default_timer as timer
import torch
import torch.nn as nn
from torch.autograd import Variable
import sys
import math
import problems
import optimizers
import logging
import pdb
from torch.nn.functional import nll_loss, log_softmax
import numpy as np
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
#######################################################################
class Struct:
def __init__(self, **entries):
self.__dict__.update(entries)
def in_run_diagnostics(epoch, batch_idx, args, train_loader, optimizer, model, criterion):
#logging.info("in run diagnostics invoked")
if (epoch % 10) == 0 or args.log_diagnostics_every_epoch:
nbatches = len(train_loader)
if args.log_diagnostics_deciles:
log_intervals = math.ceil(nbatches/10.0)
log_now = batch_idx % log_intervals == 0
else:
lp = math.ceil(nbatches/100.0)
log_now = batch_idx == int(math.ceil(nbatches/50.0))
log_now = log_now or batch_idx == int(math.ceil(nbatches/9.0))
log_now = log_now or batch_idx == int(math.ceil(nbatches/3.0))
log_now = log_now or batch_idx == nbatches-1
if log_now:
print("interval, batch_idx = {}".format(batch_idx))
optimizer.logging_pass_start()
if optimizer.epoch >= optimizer.vr_from_epoch:
for inner_batch_idx, (data, target) in enumerate(train_loader):
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
def eval_closure():
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target)
loss.backward()
return loss
optimizer.logging_pass(inner_batch_idx, closure=eval_closure)
logging.info("Logging pass finished")
optimizer.logging_pass_end(batch_idx)
def online_svrg_diagnostics(epoch, batch_idx, args, train_loader, optimizer, model, criterion):
if (epoch == 1 or (epoch % 10) == 0) and optimizer.epoch >= optimizer.vr_from_epoch and batch_idx == 0:
nbatches = len(train_loader)
mega_batch_size = optimizer.megabatch_size
recalibration_interval = optimizer.recalibration_interval
#print("interval, interval = {}".format(interval))
optimizer.logging_pass_start()
# Compute the snapshot
snapshot_i = 0
for inner_batch_idx, (data, target) in enumerate(train_loader):
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
def eval_closure():
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target)
loss.backward()
return loss
optimizer.snapshot_pass(inner_batch_idx, closure=eval_closure)
snapshot_i += 1
if snapshot_i == mega_batch_size:
break
logging.info("Snapshot computed")
for interval in range(recalibration_interval):
logging.info("Interval: {}, recal_i: {}".format(interval, optimizer.recalibration_i))
optimizer.full_grad_init()
# Do a full gradient calculation:
for inner_batch_idx, (data, target) in enumerate(train_loader):
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
def eval_closure():
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target)
loss.backward()
return loss
optimizer.full_grad_calc(inner_batch_idx, closure=eval_closure)
logging.info("Full grad calculation finished")
for inner_batch_idx, (data, target) in enumerate(train_loader):
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
def eval_closure():
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target)
loss.backward()
return loss
optimizer.logging_pass(interval, inner_batch_idx, closure=eval_closure)
logging.info("Logging pass finished")
# Take a single step at the end to progress in the interval
# Using whatever minibatch was last in the stats logging pass
optimizer.step(inner_batch_idx, closure=eval_closure)
def scsg_diagnostics(epoch, args, train_loader, optimizer, model, criterion):
if (epoch == 1 or (epoch % 10) == 0) and optimizer.epoch >= optimizer.vr_from_epoch:
nbatches = len(train_loader)
mega_batch_size = optimizer.megabatch_size
recalibration_interval = optimizer.recalibration_interval
#print("interval, interval = {}".format(interval))
optimizer.logging_pass_start()
# Compute the snapshot
data_buffer = []
inner_iters = optimizer.recalibration_interval
megabatch_size = optimizer.megabatch_size
optimizer.recalibration_i = 0
for batch_idx, (data, target) in enumerate(train_loader):
batch_id = batch_idx
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
data_buffer.append((data, target))
# Store megabatch gradients
def outer_closure():
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target)
loss.backward()
return loss
loss = optimizer.step_outer_part(closure=outer_closure)
if len(data_buffer) == megabatch_size:
logging.info("Snapshot complete")
for interval in range(recalibration_interval):
logging.info("Interval: {}, recal_i: {}".format(interval, optimizer.recalibration_i))
optimizer.full_grad_init()
# Do a full gradient calculation:
for inner_batch_idx, (data, target) in enumerate(train_loader):
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
def eval_closure():
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target)
loss.backward()
return loss
optimizer.full_grad_calc(closure=eval_closure)
logging.info("Full grad calculation finished")
for inner_i in range(inner_iters):
data, target = data_buffer[inner_i]
def eval_closure():
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target)
loss.backward()
return loss
optimizer.logging_pass(interval, closure=eval_closure)
logging.info("Logging pass finished")
# Take a single step at the end to progress in the interval
data, target = data_buffer[interval]
def eval_closure():
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target)
loss.backward()
return loss
optimizer.step_inner_part(closure=eval_closure)
data_buffer = []
optimizer.recalibration_i = 0
return
def minibatch_stats():
# This is just copied from run.py, needs to be modified to work.
if False:
batch_idx, (data, target) = next(enumerate(train_loader))
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
idx = 0
###
optimizer.zero_grad()
output = model(data)
#pdb.set_trace()
loss = criterion(output[idx, None], target[idx])
loss.backward()
baseline_sq = 0.0
for group in optimizer.param_groups:
for p in group['params']:
gk = p.grad.data
param_state = optimizer.state[p]
param_state['baseline'] = gk.clone()
baseline_sq += torch.dot(gk, gk)
for idx in range(1, 5):
optimizer.zero_grad()
output = model(data)
loss = criterion(output[idx, None], target[idx])
loss.backward()
total_dot = 0.0
square_norm = 0.0
corrs = []
for group in optimizer.param_groups:
for p in group['params']:
gk = p.grad.data
param_state = optimizer.state[p]
baseline = param_state['baseline']
# Compute correlation
dp = torch.dot(baseline, gk)
corr = dp/(torch.norm(baseline)*torch.norm(gk))
corrs.append(corr)
total_dot += dp
square_norm += torch.dot(gk, gk)
total_corr = total_dot/math.sqrt(square_norm*baseline_sq)
logging.info("i={}, corr: {}, layers: {}".format(idx, total_corr, corrs))
#pdb.set_trace()
def batchnorm_diagnostics(epoch, args, train_loader, optimizer, model):
#pdb.set_trace()
bnstuff = {'epoch': epoch, 'args': args}
state = model.state_dict()
for skey in state.keys():
if skey.startswith("bn3") or skey.startswith("fc1"):
# Convert to numpy first
bnstuff[skey] = state[skey].cpu().numpy()
#print("skey: {} size: {}".format(skey, state[skey].size()))
# Search optimizer state for param_state for this variable
for group in optimizer.param_groups:
for p in group['params']:
gk = p.grad.data
param_state = optimizer.state[p]
if id(p.data) == id(state[skey]):
#print("match")
bnstuff[skey + ".gavg"] = param_state["gavg"].cpu().numpy()
# Store to disk I guess
fname = 'stats/{}_batchnorm_epoch{}.pkl'.format(args.logfname, epoch)
with open(fname, 'wb') as output:
pickle.dump(bnstuff, output)
logging.info("Wrote out batch norm stats: {}".format(fname))
|
deep-variance-reduction-main
|
diagnostics.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from torchvision import datasets, transforms
from torch.utils.data.sampler import RandomSampler
import torchvision
import torchvision.transforms as transforms
import caching_transforms
import torch.utils.data
import pdb
import logging
import numpy as np
from multiprocessing.sharedctypes import RawArray
from ctypes import Structure, c_double
class ImagenetWrapper(torch.utils.data.Dataset):
def __init__(self, root, lock_transforms):
global transform_instances
self.dataset = datasets.ImageFolder(root)
self.nimages = len(self.dataset)
self.rand_per_image = 6
self.transform_instances = RawArray(c_double, self.rand_per_image*self.nimages)
transform_instances = self.transform_instances
self.normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
# These modified transforms take the random numbers they need explicitly
# as arguments to the transform method.
self.crop = caching_transforms.RandomResizedCrop(224)
self.flip = caching_transforms.RandomHorizontalFlip()
# Initial transformation cache created.
self.retransform()
# Must be called on each child process
def child_initialize(self, worker_id):
global transform_instances
transform_instances = self.transform_instances # self is parent processes version
global_worker_id = worker_id
print("child process: {}".format(global_worker_id))
def __getitem__(self, index):
global transform_instances
item = self.dataset[index]
img, lbl = item
if index == 0:
self.print_rands()
# Apply transforms using saved random numbers
start = index*self.rand_per_image
transformed_img = self.crop.transform(img,
transform_instances[start], transform_instances[start+1], transform_instances[start+2],
transform_instances[start+3], transform_instances[start+4])
transformed_img = self.flip.transform(transformed_img, transform_instances[start+5])
transformed_img = self.normalize(caching_transforms.to_tensor(transformed_img))
return (transformed_img, lbl)
def __len__(self):
#return 1024
return len(self.dataset)
def retransform(self):
np_instances = np.frombuffer(self.transform_instances)
# Generate all the random numbers
logging.info("Generating {} random numbers ...".format(len(self.transform_instances)))
np_instances[:] = np.random.uniform(size=len(self.transform_instances))
logging.info("Numbers generated")
self.print_rands()
def print_rands(self):
global transform_instances
start = 0
#pdb.set_trace()
print("len: {}. r1 {} r2 {} r3 {} r4 {} r5 {} r6 {}".format(
len(transform_instances), transform_instances[start], transform_instances[start+1], transform_instances[start+2],
transform_instances[start+3], transform_instances[start+4], transform_instances[start+5]
))
|
deep-variance-reduction-main
|
imagenet_wrapper.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import pdb
__all__ = ['densenet']
from torch.autograd import Variable
class Bottleneck(nn.Module):
def __init__(self, inplanes, batchnorm, nonlinearity,
expansion=4, growthRate=12, dropRate=0):
super(Bottleneck, self).__init__()
planes = expansion * growthRate
self.batchnorm = batchnorm
if self.batchnorm:
self.bn1 = nn.BatchNorm2d(inplanes)
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
if self.batchnorm:
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, growthRate, kernel_size=3,
padding=1, bias=False)
self.nonlinearity = nonlinearity
self.dropRate = dropRate
def forward(self, x):
out = x
if self.batchnorm:
out = self.bn1(out)
out = self.nonlinearity(out)
out = self.conv1(out)
if self.batchnorm:
out = self.bn2(out)
out = self.nonlinearity(out)
out = self.conv2(out)
if self.dropRate > 0:
out = F.dropout(out, p=self.dropRate, training=self.training)
out = torch.cat((x, out), 1)
return out
class BasicBlock(nn.Module):
def __init__(self, inplanes, batchnorm, nonlinearity, expansion=1, growthRate=12, dropRate=0):
super(BasicBlock, self).__init__()
planes = expansion * growthRate
self.batchnorm = batchnorm
if self.batchnorm:
self.bn1 = nn.BatchNorm2d(inplanes)
self.conv1 = nn.Conv2d(inplanes, growthRate, kernel_size=3,
padding=1, bias=False)
self.nonlinearity = nonlinearity
self.dropRate = dropRate
def forward(self, x):
out = x
if self.batchnorm:
out = self.bn1(out)
out = self.nonlinearity(out)
out = self.conv1(out)
if self.dropRate > 0:
out = F.dropout(out, p=self.dropRate, training=self.training)
out = torch.cat((x, out), 1)
return out
class Transition(nn.Module):
def __init__(self, inplanes, outplanes, batchnorm, nonlinearity):
super(Transition, self).__init__()
self.batchnorm = batchnorm
if self.batchnorm:
self.bn1 = nn.BatchNorm2d(inplanes)
self.conv1 = nn.Conv2d(inplanes, outplanes, kernel_size=1,
bias=False)
self.nonlinearity = nonlinearity
def forward(self, x):
out = x
if self.batchnorm:
out = self.bn1(out)
out = self.nonlinearity(out)
out = self.conv1(out)
out = F.avg_pool2d(out, 2)
return out
class DenseNet(nn.Module):
def __init__(self, depth=22, block=Bottleneck,
dropRate=0, num_classes=10, growthRate=12,
compressionRate=2, batchnorm=True,
nonlinearity=None):
super(DenseNet, self).__init__()
assert (depth - 4) % 3 == 0, 'depth should be 3n+4'
n = (depth - 4) / 3 if block == BasicBlock else (depth - 4) // 6
self.growthRate = growthRate
self.dropRate = dropRate
self.batchnorm = batchnorm
self.nonlinearity = nonlinearity
# self.inplanes is a global variable used across multiple
# helper functions
self.inplanes = growthRate * 2
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, padding=1,
bias=False)
self.dense1 = self._make_denseblock(block, n)
self.trans1 = self._make_transition(compressionRate)
self.dense2 = self._make_denseblock(block, n)
self.trans2 = self._make_transition(compressionRate)
self.dense3 = self._make_denseblock(block, n)
if self.batchnorm:
self.bn = nn.BatchNorm2d(self.inplanes)
self.avgpool = nn.AvgPool2d(8)
self.fc = nn.Linear(self.inplanes, num_classes)
# Weight initialization
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_denseblock(self, block, blocks):
layers = []
for i in range(blocks):
# Currently we fix the expansion ratio as the default value
layers.append(block(
self.inplanes,
self.batchnorm,
self.nonlinearity,
growthRate=self.growthRate,
dropRate=self.dropRate))
self.inplanes += self.growthRate
return nn.Sequential(*layers)
def _make_transition(self, compressionRate):
inplanes = self.inplanes
outplanes = int(math.floor(self.inplanes // compressionRate))
self.inplanes = outplanes
return Transition(inplanes, outplanes, self.batchnorm, self.nonlinearity)
def forward(self, x):
x = self.conv1(x)
x = self.trans1(self.dense1(x))
x = self.trans2(self.dense2(x))
x = self.dense3(x)
if self.batchnorm:
x = self.bn(x)
x = self.nonlinearity(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def densenet(**kwargs):
"""
Constructs a DenseNet model.
"""
return DenseNet(**kwargs)
|
deep-variance-reduction-main
|
densenet.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import print_function
import argparse
import pickle
import os
from timeit import default_timer as timer
import torch
import torch.nn as nn
from torch.autograd import Variable
import sys
import math
import datetime
import problems
import optimizers
import logging
import pdb
from torch.nn.functional import nll_loss, log_softmax
import numpy as np
def recalibrate(epoch, args, train_loader, test_loader, model, train_dataset, optimizer, criterion):
if args.vr_bn_at_recalibration:
model.train()
else:
model.eval()
logging.info("Recalibration pass starting")
if hasattr(optimizer, "recalibrate_start"):
optimizer.recalibrate_start()
start = timer()
#logging.info("Recalibration loop ...")
if optimizer.epoch >= optimizer.vr_from_epoch and args.method != "online_svrg" and args.method != "scsg":
for batch_idx, (data, target) in enumerate(train_loader):
batch_id = batch_idx
#pdb.set_trace()
if args.cuda:
data, target = data.cuda(), target.cuda(non_blocking=True)
data, target = Variable(data), Variable(target)
#print("recal:")
#print(data[:2].data.cpu().numpy())
def eval_closure():
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target)
loss.backward()
return loss
optimizer.recalibrate(batch_id, closure=eval_closure)
if batch_idx % args.log_interval == 0:
mid = timer()
percent_done = 100. * batch_idx / len(train_loader)
if percent_done > 0:
time_estimate = math.ceil((mid - start)*(100/percent_done))
time_estimate = str(datetime.timedelta(seconds=time_estimate))
else:
time_estimate = "unknown"
logging.info('Recal Epoch: {} [{}/{} ({:.0f}%)] estimate: {}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
percent_done, time_estimate))
if hasattr(optimizer, "recalibrate_end"):
optimizer.recalibrate_end()
logging.info("Recalibration finished")
def train_scsg(epoch, args, train_loader, test_loader, model, train_dataset, optimizer, criterion):
logging.info("Train (SCSG version)")
model.train()
data_buffer = []
inner_iters = optimizer.recalibration_interval
megabatch_size = optimizer.megabatch_size
optimizer.recalibration_i = 0
logged = False
for batch_idx, (data, target) in enumerate(train_loader):
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
# Store megabatch gradients
def outer_closure():
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target)
loss.backward()
return loss
loss = optimizer.step_outer_part(closure=outer_closure, idx=len(data_buffer))
data_buffer.append((data, target))
# When data-buffer is full, do the actual inner steps.
if len(data_buffer) == megabatch_size:
for inner_i in range(inner_iters):
data, target = data_buffer[inner_i]
def eval_closure():
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target)
loss.backward()
return loss
optimizer.step_inner_part(closure=eval_closure, idx=inner_i)
data_buffer = []
optimizer.recalibration_i = 0
if not logged and args.log_diagnostics and epoch >= args.vr_from_epoch:
scsg_diagnostics(epoch, args, train_loader, optimizer, model, criterion)
logged = True
if batch_idx % args.log_interval == 0:
logging.info('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.data.item()))
if hasattr(model, "sampler") and hasattr(model.sampler, "reorder"):
model.sampler.reorder()
if hasattr(train_dataset, "retransform"):
logging.info("retransform")
train_dataset.retransform()
|
deep-variance-reduction-main
|
run_vr.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import pdb
import os
class VRSamplerIter(object):
def __init__(self, sampler):
self.sampler = sampler
self.i = 0
def __next__(self):
#if self.sampler.creation_process != os.getpid():
# print("__next__ called on child process")
self.i += 1
if self.i > self.sampler.nbatches:
raise StopIteration
else:
return self.sampler.batches[self.i-1]
def __len__(self):
return self.sampler.nbatches
class VRSampler(object):
"""Wraps two samplers to craete a sampler object suitable for use with
variance reduction. methods
Args:
initial_sampler (Sampler): Base sampler for initial ordering.
order (string) Either inorder, perm or random.
batch_size (int): Size of mini-batch.
drop_last (bool): If ``True``, the sampler will drop the last batch if
its size would be less than ``batch_size``
Example:
list(VRSampler(range(10), order="inorder", batch_size=3, drop_last=False))
"""
def __init__(self, order, batch_size, dataset_size, drop_last=False):
self.order = order
self.batch_size = batch_size
self.dataset_size = dataset_size
self.drop_last = drop_last
self.creation_process = os.getpid()
self.reorder()
def reorder(self):
if self.creation_process != os.getpid():
raise Exception("reorder called on child process, which is bad. {} got: {}".format(self.creation_process, os.getpid()))
print("Reordering instances: {}".format(self.order))
if self.order == "perm":
idx_list = torch.randperm(self.dataset_size)
else:
idx_list = (torch.rand(self.dataset_size)*self.dataset_size).long()
# Generate initial minibatches
self.batches = []
batch = []
for idx in idx_list:
batch.append(idx)
if len(batch) == self.batch_size:
self.batches.append(batch)
batch = []
if len(batch) > 0 and not self.drop_last:
self.batches.append(batch)
self.nbatches = len(self.batches)
#pdb.set_trace()
def __iter__(self):
print("Sampler __iter__")
return VRSamplerIter(self)
def __len__(self):
return self.nbatches
|
deep-variance-reduction-main
|
vr_sampler.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from torch.optim.optimizer import Optimizer, required
import torch
import pdb
import pickle
import math
import logging
import scipy
import scipy.stats
import scipy.stats.mstats
class SVRG(Optimizer):
r"""Implements the standard SVRG method
"""
def __init__(self, params, args, nbatches, lr=required,
momentum=required, weight_decay=required):
self.nbatches = nbatches
self.batches_processed = 0
self.epoch = 0
self.vr_from_epoch = args.vr_from_epoch
self.test_name = args.logfname #"densenet" #"resnet-" #"sgd-" #"resnet-"
if args.transform_locking:
self.test_name += "_LOCK_"
else:
self.test_name += "_ULOCK_"
self.recalibration_i = 0
self.running_interp = 0.9
self.denom_epsilon = 1e-7 # avoid divide by zero
defaults = dict(lr=lr, momentum=momentum, weight_decay=weight_decay)
self.gradient_variances = []
self.vr_step_variances = []
self.batch_indices = []
self.iterate_distances = []
self.inrun_iterate_distances = []
self.inrun_grad_distances = []
super(SVRG, self).__init__(params, defaults)
def __setstate__(self, state):
super(SVRG, self).__setstate__(state)
def initialize(self):
m = self.nbatches
for group in self.param_groups:
for p in group['params']:
momentum = group['momentum']
gsize = p.data.size()
gtbl_size = torch.Size([m] + list(gsize))
param_state = self.state[p]
if 'gktbl' not in param_state:
param_state['gktbl'] = torch.zeros(gtbl_size)
param_state['logging_gktbl'] = torch.zeros(gtbl_size)
if 'tilde_x' not in param_state:
param_state['tilde_x'] = p.data.clone()
param_state['running_x'] = p.data.clone()
if 'gavg' not in param_state:
param_state['gavg'] = p.data.clone().double().zero_()
param_state['logging_gavg'] = p.data.clone().double().zero_()
param_state['m2'] = p.data.clone().double().zero_()
param_state['running_cov'] = p.data.clone().double().zero_()
param_state['running_mean'] = p.data.clone().double().zero_()
if momentum != 0:
if 'momentum_buffer' not in param_state:
buf = param_state['momentum_buffer'] = p.data.clone().zero_()
def store_old_table(self):
"""
Stores the old gradient table for recalibration purposes.
"""
for group in self.param_groups:
for p in group['params']:
gk = p.grad.data
param_state = self.state[p]
gktbl = param_state['gktbl']
gavg = param_state['gavg']
param_state['gktbl_old'] = gktbl.clone()
param_state['gavg_old'] = gavg.clone()
def recalibrate_start(self):
""" Part of the recalibration pass with SVRG.
Stores the gradients for later use.
"""
self.epoch += 1
self.initialize()
self.recalibration_i = 0
# Write out any logging stats if needed
if len(self.gradient_variances) > 0:
fname = 'stats/{}variance_epoch{}.pkl'.format(self.test_name, self.epoch)
with open(fname, 'wb') as output:
pickle.dump({
'gradient_variances': self.gradient_variances,
'vr_step_variances': self.vr_step_variances,
'batch_indices': self.batch_indices,
'iterate_distances': self.iterate_distances,
'epoch': self.epoch,
}, output)
self.gradient_variances = []
self.vr_step_variances = []
self.batch_indices = []
print("logging pass diagnostics saved to {}".format(fname))
if self.epoch >= self.vr_from_epoch:
for group in self.param_groups:
for p in group['params']:
param_state = self.state[p]
param_state['gavg'].zero_()
param_state['m2'].zero_()
# xk is changed to the running_x
p.data.zero_().add_(param_state['running_x'])
param_state['tilde_x'] = p.data.clone()
else:
logging.info("Skipping recalibration as epoch {} not >= {}".format(
self.epoch, self.vr_from_epoch))
def recalibrate(self, batch_id, closure):
""" Part of the recalibration pass with SVRG.
Stores the gradients for later use.
"""
loss = closure()
#print("recal loss:", loss)
m = self.nbatches
self.recalibration_i += 1
if self.epoch >= self.vr_from_epoch:
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
print("grad none")
pdb.set_trace()
continue
gk = p.grad.data.double()
param_state = self.state[p]
gktbl = param_state['gktbl']
gavg = param_state['gavg']
m2 = param_state['m2']
#pdb.set_trace()
# Online mean/variance calcuation from wikipedia
delta = gk - gavg
gavg.add_(1.0/self.recalibration_i, delta)
delta2 = gk - gavg
m2.add_((delta*delta2).type_as(m2))
param_state['running_mean'].zero_().add_(gavg)
param_state['running_cov'].zero_().add_(1.0/self.nbatches, m2.double())
#########
gktbl[batch_id, :] = p.grad.data.cpu().clone()
return loss
def epoch_diagnostics(self, train_loss, train_err, test_loss, test_err):
"""
Called after recalibrate, saves stats out to disk.
"""
m = self.nbatches
logging.info("Epoch diagnostics computation")
layernum = 0
layer_gradient_norm_sqs = []
gavg_norm_acum = 0.0
gavg_acum = []
for group in self.param_groups:
for p in group['params']:
layer_gradient_norm_sqs.append([])
gavg = self.state[p]['gavg'].cpu()
gavg_acum.append(gavg.numpy())
gavg_norm_acum += gavg.norm()**2 #torch.dot(gavg, gavg)
layernum += 1
gradient_norm_sqs = []
vr_step_variance = []
cos_acums = []
variances = []
for batch_id in range(m):
norm_acum = 0.0
ginorm_acum = 0.0
vr_acum = 0.0
layernum = 0
cos_acum = 0.0
var_acum = 0.0
for group in self.param_groups:
for p in group['params']:
param_state = self.state[p]
gktbl = param_state['gktbl']
gavg = param_state['gavg'].type_as(p.data).cpu()
gi = gktbl[batch_id, :]
var_norm_sq = (gi-gavg).norm()**2 #torch.dot(gi-gavg, gi-gavg)
norm_acum += var_norm_sq
ginorm_acum += gi.norm()**2 #torch.dot(gi, gi)
layer_gradient_norm_sqs[layernum].append(var_norm_sq)
gktbl_old = param_state['gktbl_old']
gavg_old = param_state['gavg_old'].type_as(p.data).cpu()
gi_old = gktbl_old[batch_id, :]
#pdb.set_trace()
vr_step = gi - gi_old + gavg_old
vr_acum += (vr_step - gavg).norm()**2 #torch.dot(vr_step - gavg, vr_step - gavg)
cos_acum += torch.sum(gavg*gi)
var_acum += (gi - gavg).norm()**2
layernum += 1
gradient_norm_sqs.append(norm_acum)
vr_step_variance.append(vr_acum)
cosim = cos_acum/math.sqrt(ginorm_acum*gavg_norm_acum)
#pdb.set_trace()
cos_acums.append(cosim)
variances.append(var_acum)
variance = sum(variances)/len(variances)
print("mean cosine: {}".format(sum(cos_acums)/len(cos_acums)))
#pdb.set_trace()
with open('stats/{}fastdiagnostics_epoch{}.pkl'.format(self.test_name, self.epoch), 'wb') as output:
pickle.dump({
'train_loss': train_loss,
'train_err': train_err,
'test_loss': test_loss,
'test_err': test_err,
'epoch': self.epoch,
#'layer_gradient_norm_sqs': layer_gradient_norm_sqs,
#'gradient_norm_sqs': gradient_norm_sqs,
#'vr_step_variance': vr_step_variance,
#'cosine_distances': cos_acums,
#'variances': variances,
'variance': variance,
#'gavg_norm': gavg_norm_acum,
#'gavg': gavg_acum,
#'iterate_distances': self.inrun_iterate_distances,
#'grad_distances': self.inrun_grad_distances,
}, output)
print("Epoch diagnostics saved")
#pdb.set_trace()
self.inrun_iterate_distances = []
self.inrun_grad_distances = []
def logging_pass_start(self):
self.logging_evals = 0
for group in self.param_groups:
for p in group['params']:
param_state = self.state[p]
logging_gavg = param_state['logging_gavg']
logging_gavg.zero_()
def logging_pass(self, batch_id, closure):
loss = closure()
m = self.nbatches
for group in self.param_groups:
for p in group['params']:
gk = p.grad.data
param_state = self.state[p]
logging_gktbl = param_state['logging_gktbl']
logging_gavg = param_state['logging_gavg']
logging_gavg.add_(1.0/m, gk.double())
logging_gktbl[batch_id, :] = gk.cpu().clone()
self.logging_evals += 1
return loss
def logging_pass_end(self, batch_idx):
m = self.nbatches
logging.info("logging diagnostics computation")
gradient_sqs = []
vr_step_sqs = []
forth_sqs = []
dist_sq_acum = 0.0
for group in self.param_groups:
for p in group['params']:
param_state = self.state[p]
tilde_x = param_state['tilde_x']
iterate_diff = p.data - tilde_x
dist_sq_acum += iterate_diff.norm()**2 #torch.dot(iterate_diff,iterate_diff)
dist = math.sqrt(dist_sq_acum)
for batch_id in range(m):
grad_norm_acum = 0.0
vr_norm_acum = 0.0
forth_acum = 0.0
for group in self.param_groups:
for p in group['params']:
param_state = self.state[p]
gktbl = param_state['gktbl']
gavg = param_state['gavg'].type_as(p.data).cpu()
gi = gktbl[batch_id, :].type_as(p.data).cpu()
# Logging versions are at current location xk,
# compared to gavg/tktbl which are at xtilde
logging_gktbl = param_state['logging_gktbl']
logging_gavg = param_state['logging_gavg'].type_as(p.data).cpu()
logging_gi = logging_gktbl[batch_id, :].type_as(p.data).cpu()
vr_step = (logging_gi - gi + gavg) - logging_gavg
gi_step = logging_gi - logging_gavg
grad_norm_acum += gi_step.pow(2.0).sum().item()
vr_norm_acum += vr_step.pow(2.0).sum().item()
forth_acum += gi_step.pow(2.0).sum().item()
gradient_sqs.append(grad_norm_acum)
vr_step_sqs.append(vr_norm_acum)
forth_sqs.append(forth_acum**2)
# Compute variance numbers
gradient_variance = sum(gradient_sqs)/m
fourth_moment = sum(forth_sqs)/m - gradient_variance**2
vr_step_variance = sum(vr_step_sqs)/m
logging.info("gradient variance: {} vr: {}, ratio vr/g: {}".format(
gradient_variance, vr_step_variance, vr_step_variance/gradient_variance))
logging.info(f"forth: {fourth_moment} relative std: {math.sqrt(fourth_moment)/gradient_variance} rel SE: {math.sqrt(fourth_moment/m)/gradient_variance}")
logging.info("self.logging_evals: {}".format(self.logging_evals))
#pdb.set_trace()
self.gradient_variances.append(gradient_variance)
self.vr_step_variances.append(vr_step_variance)
self.batch_indices.append(batch_idx)
self.iterate_distances.append(dist)
def step(self, batch_id, closure):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = closure()
dist_sq_acum = 0.0
grad_dist_sq_acum = 0.0
#print("step loss: ", loss)
for group in self.param_groups:
momentum = group['momentum']
weight_decay = group['weight_decay']
learning_rate = group['lr']
for p in group['params']:
if p.grad is None:
continue
gk = p.grad.data
param_state = self.state[p]
gktbl = param_state['gktbl']
gavg = param_state['gavg'].type_as(p.data)
tilde_x = param_state['tilde_x']
if momentum != 0:
buf = param_state['momentum_buffer']
#########
if self.epoch < self.vr_from_epoch:
vr_gradient = gk.clone() # Just do sgd steps
else:
gi = gktbl[batch_id, :].cuda()
vr_gradient = gk.clone().sub_(gi - gavg)
# Some diagnostics
iterate_diff = p.data - tilde_x
#pdb.set_trace()
dist_sq_acum += iterate_diff.norm()**2 #torch.dot(iterate_diff,iterate_diff)
grad_diff = gi - gk
grad_dist_sq_acum += grad_diff.norm()**2 #torch.dot(grad_diff,grad_diff)
if weight_decay != 0:
vr_gradient.add_(weight_decay, p.data)
if momentum != 0:
dampening = 0.0
vr_gradient = buf.mul_(momentum).add_(1 - dampening, vr_gradient)
# Take step.
p.data.add_(-learning_rate, vr_gradient)
# Update running iterate mean:
param_state['running_x'].mul_(self.running_interp).add_(1-self.running_interp, p.data)
# track number of minibatches seen
self.batches_processed += 1
dist = math.sqrt(dist_sq_acum)
grad_dist = math.sqrt(grad_dist_sq_acum)
self.inrun_iterate_distances.append(dist)
self.inrun_grad_distances.append(grad_dist)
return loss
|
deep-variance-reduction-main
|
torch_svrg.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch.optim as optim
import torch_svrg
import recompute_svrg
import scsg
def optimizer(model, args):
print("Using", args.method)
if args.method == "sgd":
optimizer = optim.SGD(model.parameters(), lr=args.lr, weight_decay=args.decay,
momentum=args.momentum)
elif args.method == "svrg":
optimizer = torch_svrg.SVRG(model.parameters(), args=args, lr=args.lr,
nbatches=args.nbatches,
momentum=args.momentum, weight_decay=args.decay)
elif args.method == "recompute_svrg":
optimizer = recompute_svrg.RecomputeSVRG(model.parameters(), lr=args.lr,
nbatches=args.nbatches, model=model, vr_bn_at_recalibration=args.vr_bn_at_recalibration,
vr_from_epoch=args.vr_from_epoch,
momentum=args.momentum, weight_decay=args.decay)
elif args.method == "scsg":
optimizer = scsg.SCSG(model.parameters(), args=args, lr=args.lr,
nbatches=args.nbatches, model=model, vr_bn_at_recalibration=args.vr_bn_at_recalibration,
vr_from_epoch=args.vr_from_epoch,
momentum=args.momentum, weight_decay=args.decay)
else:
raise Exception("Optimizer not recognised:", args.method)
return optimizer
|
deep-variance-reduction-main
|
optimizers.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152', 'resnext50_32x4d', 'resnext101_32x8d',
'wide_resnet50_2', 'wide_resnet101_2']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
}
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
__constants__ = ['downsample']
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
__constants__ = ['downsample']
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def _resnet(arch, block, layers, pretrained, progress, **kwargs):
model = ResNet(block, layers, **kwargs)
return model
def resnet18(pretrained=False, progress=True, **kwargs):
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,
**kwargs)
def resnet34(pretrained=False, progress=True, **kwargs):
r"""ResNet-34 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet50(pretrained=False, progress=True, **kwargs):
r"""ResNet-50 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet101(pretrained=False, progress=True, **kwargs):
r"""ResNet-101 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress,
**kwargs)
def resnet152(pretrained=False, progress=True, **kwargs):
r"""ResNet-152 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress,
**kwargs)
def resnext50_32x4d(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-50 32x4d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 4
return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def resnext101_32x8d(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-101 32x8d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 8
return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
def wide_resnet50_2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-50-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def wide_resnet101_2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-101-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
|
deep-variance-reduction-main
|
resnext.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import datasets, transforms
from torch.utils.data.sampler import RandomSampler
import torchvision
import torchvision.transforms as transforms
import logging
from torch.autograd import Variable
import torch.utils
import torch.utils.data
import torch.utils.data.distributed
import torch.distributed as dist
from cifar_wrapper import CIFAR10_Wrapper
from vr_sampler import VRSampler
import UpdatedDataLoader
import UpdatedDataLoaderMult
import resnet
import pdb
import densenet
import resnext
from imagenet_wrapper import ImagenetWrapper
def load(args):
print("Problem:", args.problem)
if args.problem == "cifar10":
return cifar10(args)
elif args.problem == "imagenet":
return imagenet(args)
else:
raise Exception("Unrecognised problem:", args.problem)
def cifar10(args):
data_dir = os.path.expanduser('~/data')
kwargs = {'num_workers': 0, 'pin_memory': True} if args.cuda else {}
transform = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32, padding=4),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
# We don't do the random transforms at test time.
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
logging.info("Loading training dataset")
if (args.method.endswith("svrg") or args.method == "scsg") and args.transform_locking and args.opt_vr:
train_dataset = CIFAR10_Wrapper(
root=data_dir, train=True,
download=True, transform=transform)
else:
train_dataset = torchvision.datasets.CIFAR10(
root=data_dir, train=True,
download=True, transform=transform)
if args.method.endswith("svrg") and args.opt_vr:
if args.method == "saga":
raise Exception("vr sampler currently doesn't support saga")
logging.info("VR Sampler with order=perm")
sampler = VRSampler(order="perm",
batch_size=args.batch_size,
dataset_size=len(train_dataset))
train_loader = UpdatedDataLoader.DataLoader(
train_dataset, batch_sampler=sampler, **kwargs)
else:
sampler = RandomSampler(train_dataset)
train_loader = torch.utils.data.DataLoader(
train_dataset, sampler=sampler, batch_size=args.batch_size, **kwargs)
args.nbatches = len(sampler)
logging.info("Loading test dataset")
test_dataset = torchvision.datasets.CIFAR10(
root=data_dir, train=False,
download=True, transform=transform_test)
test_loader = torch.utils.data.DataLoader(
test_dataset, batch_size=args.batch_size,
shuffle=False, **kwargs)
nonlinearity = F.relu
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
logging.info("Initializing fully connected layers")
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
if args.batchnorm:
logging.info("Using batchnorm")
self.bn1 = nn.BatchNorm2d(6)
self.bn2 = nn.BatchNorm2d(16)
self.bn3 = nn.BatchNorm1d(120)
self.bn4 = nn.BatchNorm1d(84)
logging.info("initialized")
def forward(self, x):
x = self.conv1(x)
if args.batchnorm:
x = self.bn1(x)
x = nonlinearity (x)
x = self.pool(x)
#pdb.set_trace()
x = self.conv2(x)
if args.batchnorm:
x = self.bn2(x)
x = nonlinearity (x)
x = self.pool(x)
x = x.view(-1, 16 * 5 * 5)
x = self.fc1(x)
if args.batchnorm:
x = self.bn3(x)
x = nonlinearity (x)
x = self.fc2(x)
if args.batchnorm:
x = self.bn4(x)
x = nonlinearity (x)
x = self.fc3(x)
return x
logging.info("Loading architecture")
if args.architecture == "default":
logging.info("default architecture")
model = Net()
elif args.architecture == "resnet110":
model = resnet.ResNet110(batchnorm=args.batchnorm, nonlinearity=nonlinearity)
elif args.architecture == "resnet-small":
model = resnet.ResNetSmall(batchnorm=args.batchnorm, nonlinearity=nonlinearity)
elif args.architecture == "densenet-40-36":
model = densenet.densenet(depth=40, growthRate=36, batchnorm=args.batchnorm, nonlinearity=nonlinearity)
model = torch.nn.DataParallel(model)
else:
raise Exception("architecture not recognised:", args.architecture)
model.sampler = sampler
return train_loader, test_loader, model, train_dataset
def imagenet(args):
kwargs = {'num_workers': 32, 'pin_memory': True} if args.cuda else {}
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
lock_transforms = (args.method.endswith("svrg")) and args.transform_locking and args.opt_vr
logging.info("Loading training dataset")
train_dir = "/datasets01_101/imagenet_full_size/061417/train"
logging.info("Data ...")
train_dataset = ImagenetWrapper(train_dir, lock_transforms=lock_transforms)
logging.info("Imagenet Wrapper created")
logging.info("VR Sampler with order=perm")
sampler = VRSampler(order="perm",
batch_size=args.batch_size,
dataset_size=len(train_dataset))
train_loader = UpdatedDataLoaderMult.DataLoader(
train_dataset, batch_sampler=sampler,
worker_init_fn=train_dataset.child_initialize, **kwargs) #worker_init_fn
logging.info("Train Loader created, batches: {}".format(len(train_loader)))
test_loader = torch.utils.data.DataLoader(
datasets.ImageFolder("/datasets01_101/imagenet_full_size/061417/val",
transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size, shuffle=False, **kwargs)
args.nbatches = len(train_loader)
logging.info("Initializing model")
if args.architecture == "resnet18":
model = torchvision.models.resnet.resnet18()
elif args.architecture == "resnet50":
model = torchvision.models.resnet.resnet50()
elif args.architecture == "resnext101_32x8d":
model = resnext.resnext101_32x8d()
else:
raise Exception("Architecture not supported for imagenet")
logging.info("Lifting model to DataParallel")
model = torch.nn.DataParallel(model).cuda() # Use multiple gpus
model.sampler = sampler
return train_loader, test_loader, model, train_dataset
|
deep-variance-reduction-main
|
problems.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numbers
import math
import random
from PIL import Image, ImageOps, ImageEnhance
import numpy as np
import numbers
import types
import collections
import warnings
try:
import accimage
except ImportError:
accimage = None
def _is_pil_image(img):
if accimage is not None:
return isinstance(img, (Image.Image, accimage.Image))
else:
return isinstance(img, Image.Image)
import torch
class RandomHorizontalFlip(object):
"""Horizontally flip the given PIL Image randomly with a probability of 0.5."""
def transform(self, img, r):
if r < 0.5:
return hflip(img)
return img
class RandomResizedCrop(object):
"""Crop the given PIL Image to random size and aspect ratio.
A crop of random size (default: of 0.08 to 1.0) of the original size and a random
aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This crop
is finally resized to given size.
This is popularly used to train the Inception networks.
Args:
size: expected output size of each edge
scale: range of size of the origin size cropped
ratio: range of aspect ratio of the origin aspect ratio cropped
interpolation: Default: PIL.Image.BILINEAR
"""
def __init__(self, size, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.), interpolation=Image.BILINEAR):
self.size = (size, size)
self.interpolation = interpolation
self.scale = scale
self.ratio = ratio
@staticmethod
def get_params_from_random(img, scale, ratio, r1, r2, r3, r4, r5):
"""Get parameters for ``crop`` for a random sized crop.
Args:
img (PIL Image): Image to be cropped.
scale (tuple): range of size of the origin size cropped
ratio (tuple): range of aspect ratio of the origin aspect ratio cropped
Returns:
tuple: params (i, j, h, w) to be passed to ``crop`` for a random
sized crop.
"""
for attempt in range(6):
area = img.size[0] * img.size[1]
#target_area = random.uniform(*scale) * area
target_area = (r1*(scale[1] - scale[0]) + scale[0]) * area
#aspect_ratio = random.uniform(*ratio)
aspect_ratio = r2*(ratio[1] - ratio[0]) + ratio[0]
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if r3 < 0.5:
w, h = h, w
if w <= img.size[0] and h <= img.size[1]:
# randint is inclusive
i = int(r4*(img.size[1] - h + 1))
#i = random.randint(0, img.size[1] - h)
j = int(r5*(img.size[0] - w + 1))
#j = random.randint(0, img.size[0] - w)
return i, j, h, w
# Rotate through them.
tmp = r1
r1 = r2
r2 = r3
r3 = r4
r4 = r5
r5 = tmp
#print("Attempts failed")
# Fallback
w = min(img.size[0], img.size[1])
i = (img.size[1] - w) // 2
j = (img.size[0] - w) // 2
return i, j, w, w
def transform(self, img, r1, r2, r3, r4, r5):
#i, j, h, w, size, interpolation = r
i, j, h, w = self.get_params_from_random(img, self.scale, self.ratio, r1, r2, r3, r4, r5)
return resized_crop(img, i, j, h, w, self.size, self.interpolation)
# @staticmethod
# def delegate(i, j, h, w, size, interpolation):
# return (lambda img: resized_crop(img, i, j, h, w, size, interpolation))
#def __call__(self, img_prev):
# i, j, h, w = self.get_params(img_prev, self.scale, self.ratio)
# return ()
#return self.delegate(i, j, h, w, self.size, self.interpolation)
def resized_crop(img, i, j, h, w, size, interpolation=Image.BILINEAR):
"""Crop the given PIL Image and resize it to desired size.
Notably used in RandomResizedCrop.
Args:
img (PIL Image): Image to be cropped.
i: Upper pixel coordinate.
j: Left pixel coordinate.
h: Height of the cropped image.
w: Width of the cropped image.
size (sequence or int): Desired output size. Same semantics as ``scale``.
interpolation (int, optional): Desired interpolation. Default is
``PIL.Image.BILINEAR``.
Returns:
PIL Image: Cropped image.
"""
assert _is_pil_image(img), 'img should be PIL Image'
img = crop(img, i, j, h, w)
img = resize(img, size, interpolation)
return img
def crop(img, i, j, h, w):
"""Crop the given PIL Image.
Args:
img (PIL Image): Image to be cropped.
i: Upper pixel coordinate.
j: Left pixel coordinate.
h: Height of the cropped image.
w: Width of the cropped image.
Returns:
PIL Image: Cropped image.
"""
if not _is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
return img.crop((j, i, j + w, i + h))
def resize(img, size, interpolation=Image.BILINEAR):
"""Resize the input PIL Image to the given size.
Args:
img (PIL Image): Image to be resized.
size (sequence or int): Desired output size. If size is a sequence like
(h, w), the output size will be matched to this. If size is an int,
the smaller edge of the image will be matched to this number maintaing
the aspect ratio. i.e, if height > width, then image will be rescaled to
(size * height / width, size)
interpolation (int, optional): Desired interpolation. Default is
``PIL.Image.BILINEAR``
Returns:
PIL Image: Resized image.
"""
if not _is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
if not (isinstance(size, int) or (isinstance(size, collections.Iterable) and len(size) == 2)):
raise TypeError('Got inappropriate size arg: {}'.format(size))
if isinstance(size, int):
w, h = img.size
if (w <= h and w == size) or (h <= w and h == size):
return img
if w < h:
ow = size
oh = int(size * h / w)
return img.resize((ow, oh), interpolation)
else:
oh = size
ow = int(size * w / h)
return img.resize((ow, oh), interpolation)
else:
return img.resize(size[::-1], interpolation)
def hflip(img):
"""Horizontally flip the given PIL Image.
Args:
img (PIL Image): Image to be flipped.
Returns:
PIL Image: Horizontall flipped image.
"""
if not _is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
return img.transpose(Image.FLIP_LEFT_RIGHT)
def to_tensor(pic):
"""Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor.
See ``ToTensor`` for more details.
Args:
pic (PIL Image or numpy.ndarray): Image to be converted to tensor.
Returns:
Tensor: Converted image.
"""
if not(_is_pil_image(pic) or _is_numpy_image(pic)):
raise TypeError('pic should be PIL Image or ndarray. Got {}'.format(type(pic)))
if isinstance(pic, np.ndarray):
# handle numpy array
img = torch.from_numpy(pic.transpose((2, 0, 1)))
# backward compatibility
return img.float().div(255)
if accimage is not None and isinstance(pic, accimage.Image):
nppic = np.zeros([pic.channels, pic.height, pic.width], dtype=np.float32)
pic.copyto(nppic)
return torch.from_numpy(nppic)
# handle PIL Image
if pic.mode == 'I':
img = torch.from_numpy(np.array(pic, np.int32, copy=False))
elif pic.mode == 'I;16':
img = torch.from_numpy(np.array(pic, np.int16, copy=False))
else:
img = torch.ByteTensor(torch.ByteStorage.from_buffer(pic.tobytes()))
# PIL image mode: 1, L, P, I, F, RGB, YCbCr, RGBA, CMYK
if pic.mode == 'YCbCr':
nchannel = 3
elif pic.mode == 'I;16':
nchannel = 1
else:
nchannel = len(pic.mode)
img = img.view(pic.size[1], pic.size[0], nchannel)
# put it from HWC to CHW format
# yikes, this transpose takes 80% of the loading time/CPU
img = img.transpose(0, 1).transpose(0, 2).contiguous()
if isinstance(img, torch.ByteTensor):
return img.float().div(255)
else:
return img
|
deep-variance-reduction-main
|
caching_transforms.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
'''ResNet in PyTorch.
from
https://github.com/kuangliu/pytorch-cifar/blob/master/models/resnet.py
based on
https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
BasicBlock and Bottleneck module is from the original ResNet paper:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
PreActBlock and PreActBottleneck module is from the later paper:
[2] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Identity Mappings in Deep Residual Networks. arXiv:1603.05027
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
class PreActBlock(nn.Module):
'''Pre-activation version of the BasicBlock.'''
expansion = 1
def __init__(self, in_planes, planes, stride=1, batchnorm=True, nonlinearity=None):
super(PreActBlock, self).__init__()
self.batchnorm = batchnorm
if batchnorm:
self.bn1 = nn.BatchNorm2d(in_planes)
self.bn2 = nn.BatchNorm2d(planes)
self.conv1 = conv3x3(in_planes, planes, stride)
self.conv2 = conv3x3(planes, planes)
self.nonlinearity = nonlinearity
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False)
)
def forward(self, x):
if self.batchnorm:
out = self.bn1(x)
else:
out = x
out = self.nonlinearity(out)
shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x
out = self.conv1(out)
if self.batchnorm:
out = self.bn2(out)
out = self.nonlinearity(out)
out = self.conv2(out)
out += shortcut
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1, batchnorm=True, nonlinearity=None):
super(Bottleneck, self).__init__()
self.batchnorm = batchnorm
if batchnorm:
self.bn1 = nn.BatchNorm2d(planes)
self.bn2 = nn.BatchNorm2d(planes)
self.bn3 = nn.BatchNorm2d(self.expansion*planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
self.nonlinearity = nonlinearity
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
if batchnorm:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
else:
self.shortcut = nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False)
def forward(self, x):
out = self.conv1(x)
if self.batchnorm:
out = self.bn1(out)
out = self.nonlinearity(out)
out = self.conv2(out)
if self.batchnorm:
out = self.bn2(out)
out = self.nonlinearity(out)
out = self.conv3(out)
if self.batchnorm:
out = self.bn3(out)
out += self.shortcut(x)
out = self.nonlinearity(out)
return out
class PreActBottleneck(nn.Module):
'''Pre-activation version of the original Bottleneck module.'''
expansion = 4
def __init__(self, in_planes, planes, stride=1, batchnorm=True, nonlinearity=None):
super(PreActBottleneck, self).__init__()
self.batchnorm = batchnorm
if batchnorm:
self.bn1 = nn.BatchNorm2d(in_planes)
self.bn2 = nn.BatchNorm2d(planes)
self.bn3 = nn.BatchNorm2d(planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
self.nonlinearity = nonlinearity
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False)
)
def forward(self, x):
if self.batchnorm:
out = self.bn1(x)
else:
out = x
out = self.nonlinearity(out)
shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x
out = self.conv1(out)
if self.batchnorm:
out = self.bn2(out)
out = self.conv2(self.nonlinearity(out))
if self.batchnorm:
out = self.bn3(out)
out = self.conv3(self.nonlinearity(out))
out += shortcut
return out
class ResNetImageNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10, batchnorm=True, nonlinearity=None):
super(ResNetImageNet, self).__init__()
self.batchnorm = batchnorm
self.in_planes = 64
self.conv1 = conv3x3(3,64)
if batchnorm:
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512*block.expansion, num_classes)
self.nonlinearity = nonlinearity
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(
self.in_planes, planes, stride,
batchnorm=self.batchnorm, nonlinearity=self.nonlinearity))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
if self.batchnorm:
out = self.bn1(out)
out = self.nonlinearity(out)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
class ResNetCifar(nn.Module):
def __init__(self, block, blocks_per=3, num_classes=10,
batchnorm=True, nonlinearity=None, in_planes=16):
super(ResNetCifar, self).__init__()
self.batchnorm = batchnorm
self.in_planes = in_planes # standard resnet is 16
self.nonlinearity = nonlinearity
self.conv1 = conv3x3(3, in_planes)
if batchnorm:
self.bn1 = nn.BatchNorm2d(in_planes)
self.layer1 = self._make_layer(block, in_planes, blocks_per, stride=1)
self.layer2 = self._make_layer(block, 2*in_planes, blocks_per, stride=2)
self.layer3 = self._make_layer(block, 4*in_planes, blocks_per, stride=2)
self.linear = nn.Linear(4*in_planes*block.expansion, num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(
self.in_planes, planes, stride,
batchnorm=self.batchnorm, nonlinearity=self.nonlinearity))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
if self.batchnorm:
out = self.bn1(out)
out = self.nonlinearity(out)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
#import pdb; pdb.set_trace() # torch.Size([64, 64, 8, 8]) good.
outp = F.avg_pool2d(out, 8) # torch.Size([64, 64, 1, 1])
outv = outp.view(outp.size(0), -1) # after torch.Size([64, 256])
outl = self.linear(outv) # want 64x64?
return outl
def ResNet18(**kwargs):
return ResNetImageNet(PreActBlock, [2,2,2,2], **kwargs)
def ResNet50(**kwargs):
return ResNetImageNet(Bottleneck, [3,4,6,3], **kwargs)
def ResNetSmallWide(**kwargs):
return ResNetCifar(PreActBlock, blocks_per=1, **kwargs)
def ResNetSmall(**kwargs):
return ResNetCifar(PreActBlock, blocks_per=3, in_planes=8, **kwargs)
def ResNet(**kwargs):
return ResNetCifar(PreActBlock, **kwargs)
# I get out-of-memory errors for these larger ones
def ResNet56(**kwargs):
return ResNetCifar(PreActBlock, blocks_per=9, **kwargs)
def ResNet110(**kwargs):
return ResNetCifar(PreActBlock, blocks_per=18, **kwargs)
|
deep-variance-reduction-main
|
resnet.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.multiprocessing as multiprocessing
from torch.utils.data.sampler import SequentialSampler, RandomSampler
import collections
import sys
import traceback
import threading
import pdb
if sys.version_info[0] == 2:
import Queue as queue
string_classes = basestring
else:
import queue
string_classes = (str, bytes)
_use_shared_memory = False
"""Whether to use shared memory in default_collate"""
class BatchSampler(object):
"""Wraps another sampler to yield a mini-batch of indices.
Args:
sampler (Sampler): Base sampler.
batch_size (int): Size of mini-batch.
drop_last (bool): If ``True``, the sampler will drop the last batch if
its size would be less than ``batch_size``
Example:
>>> list(BatchSampler(range(10), batch_size=3, drop_last=False))
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
>>> list(BatchSampler(range(10), batch_size=3, drop_last=True))
[[0, 1, 2], [3, 4, 5], [6, 7, 8]]
"""
def __init__(self, sampler, batch_size, drop_last):
self.sampler = sampler
self.batch_size = batch_size
self.drop_last = drop_last
def __iter__(self):
batch = []
for idx in self.sampler:
batch.append(idx)
if len(batch) == self.batch_size:
yield batch
batch = []
if len(batch) > 0 and not self.drop_last:
yield batch
def __len__(self):
if self.drop_last:
return len(self.sampler) // self.batch_size
else:
return (len(self.sampler) + self.batch_size - 1) // self.batch_size
class ExceptionWrapper(object):
"Wraps an exception plus traceback to communicate across threads"
def __init__(self, exc_info):
self.exc_type = exc_info[0]
self.exc_msg = "".join(traceback.format_exception(*exc_info))
def _worker_loop(dataset, index_queue, data_queue, collate_fn):
global _use_shared_memory
_use_shared_memory = True
torch.set_num_threads(1)
while True:
r = index_queue.get()
if r is None:
data_queue.put(None)
break
idx, batch_indices = r
try:
samples = collate_fn([dataset[i] for i in batch_indices])
except Exception:
data_queue.put((idx, ExceptionWrapper(sys.exc_info())))
else:
data_queue.put((idx, samples))
def _pin_memory_loop(in_queue, out_queue, done_event):
while True:
try:
r = in_queue.get()
except:
if done_event.is_set():
return
raise
if r is None:
break
if isinstance(r[1], ExceptionWrapper):
out_queue.put(r)
continue
idx, batch = r
try:
batch = pin_memory_batch(batch)
except Exception:
out_queue.put((idx, ExceptionWrapper(sys.exc_info())))
else:
out_queue.put((idx, batch))
numpy_type_map = {
'float64': torch.DoubleTensor,
'float32': torch.FloatTensor,
'float16': torch.HalfTensor,
'int64': torch.LongTensor,
'int32': torch.IntTensor,
'int16': torch.ShortTensor,
'int8': torch.CharTensor,
'uint8': torch.ByteTensor,
}
def default_collate(batch):
"Puts each data field into a tensor with outer dimension batch size"
if torch.is_tensor(batch[0]):
out = None
if _use_shared_memory:
# If we're in a background process, concatenate directly into a
# shared memory tensor to avoid an extra copy
numel = sum([x.numel() for x in batch])
storage = batch[0].storage()._new_shared(numel)
out = batch[0].new(storage)
return torch.stack(batch, 0, out=out)
elif type(batch[0]).__module__ == 'numpy':
elem = batch[0]
if type(elem).__name__ == 'ndarray':
return torch.stack([torch.from_numpy(b) for b in batch], 0)
if elem.shape == (): # scalars
py_type = float if elem.dtype.name.startswith('float') else int
return numpy_type_map[elem.dtype.name](list(map(py_type, batch)))
elif isinstance(batch[0], int):
return torch.LongTensor(batch)
elif isinstance(batch[0], float):
return torch.DoubleTensor(batch)
elif isinstance(batch[0], string_classes):
return batch
elif isinstance(batch[0], collections.Mapping):
return {key: default_collate([d[key] for d in batch]) for key in batch[0]}
elif isinstance(batch[0], collections.Sequence):
transposed = zip(*batch)
return [default_collate(samples) for samples in transposed]
raise TypeError(("batch must contain tensors, numbers, dicts or lists; found {}"
.format(type(batch[0]))))
def pin_memory_batch(batch):
if torch.is_tensor(batch):
return batch.pin_memory()
elif isinstance(batch, string_classes):
return batch
elif isinstance(batch, collections.Mapping):
return {k: pin_memory_batch(sample) for k, sample in batch.items()}
elif isinstance(batch, collections.Sequence):
return [pin_memory_batch(sample) for sample in batch]
else:
return batch
class DataLoaderIter(object):
"Iterates once over the DataLoader's dataset, as specified by the sampler"
def __init__(self, loader):
self.dataset = loader.dataset
self.collate_fn = loader.collate_fn
self.batch_sampler = loader.batch_sampler
self.num_workers = loader.num_workers
self.pin_memory = loader.pin_memory
self.done_event = threading.Event()
self.sample_iter = iter(self.batch_sampler)
if self.num_workers > 0:
self.index_queue = multiprocessing.SimpleQueue()
self.data_queue = multiprocessing.SimpleQueue()
self.batches_outstanding = 0
self.shutdown = False
self.send_idx = 0
self.rcvd_idx = 0
self.reorder_dict = {}
self.workers = [
multiprocessing.Process(
target=_worker_loop,
args=(self.dataset, self.index_queue, self.data_queue, self.collate_fn))
for _ in range(self.num_workers)]
for w in self.workers:
w.daemon = True # ensure that the worker exits on process exit
w.start()
if self.pin_memory:
in_data = self.data_queue
self.data_queue = queue.Queue()
self.pin_thread = threading.Thread(
target=_pin_memory_loop,
args=(in_data, self.data_queue, self.done_event))
self.pin_thread.daemon = True
self.pin_thread.start()
# prime the prefetch loop
for _ in range(2 * self.num_workers):
self._put_indices()
def __len__(self):
return len(self.batch_sampler)
def __next__(self):
if self.num_workers == 0: # same-process loading
indices = next(self.sample_iter) # may raise StopIteration
batch = self.collate_fn([self.dataset[i] for i in indices])
if self.pin_memory:
batch = pin_memory_batch(batch)
return batch
# check if the next sample has already been generated
if self.rcvd_idx in self.reorder_dict:
batch = self.reorder_dict.pop(self.rcvd_idx)
return self._process_next_batch(batch)
if self.batches_outstanding == 0:
self._shutdown_workers()
raise StopIteration
while True:
assert (not self.shutdown and self.batches_outstanding > 0)
idx, batch = self.data_queue.get()
self.batches_outstanding -= 1
if idx != self.rcvd_idx:
# store out-of-order samples
self.reorder_dict[idx] = batch
continue
#pdb.set_trace()
return self._process_next_batch(batch)
next = __next__ # Python 2 compatibility
def __iter__(self):
return self
def _put_indices(self):
assert self.batches_outstanding < 2 * self.num_workers
indices = next(self.sample_iter, None)
if indices is None:
return
self.index_queue.put((self.send_idx, indices))
self.batches_outstanding += 1
self.send_idx += 1
def _process_next_batch(self, batch):
self.rcvd_idx += 1
self._put_indices()
if isinstance(batch, ExceptionWrapper):
raise batch.exc_type(batch.exc_msg)
return batch
def __getstate__(self):
# TODO: add limited pickling support for sharing an iterator
# across multiple threads for HOGWILD.
# Probably the best way to do this is by moving the sample pushing
# to a separate thread and then just sharing the data queue
# but signalling the end is tricky without a non-blocking API
raise NotImplementedError("DataLoaderIterator cannot be pickled")
def _shutdown_workers(self):
if not self.shutdown:
self.shutdown = True
self.done_event.set()
for _ in self.workers:
self.index_queue.put(None)
def __del__(self):
if self.num_workers > 0:
self._shutdown_workers()
class DataLoader(object):
"""
Data loader. Combines a dataset and a sampler, and provides
single- or multi-process iterators over the dataset.
Arguments:
dataset (Dataset): dataset from which to load the data.
batch_size (int, optional): how many samples per batch to load
(default: 1).
shuffle (bool, optional): set to ``True`` to have the data reshuffled
at every epoch (default: False).
sampler (Sampler, optional): defines the strategy to draw samples from
the dataset. If specified, ``shuffle`` must be False.
batch_sampler (Sampler, optional): like sampler, but returns a batch of
indices at a time. Mutually exclusive with batch_size, shuffle,
sampler, and drop_last.
num_workers (int, optional): how many subprocesses to use for data
loading. 0 means that the data will be loaded in the main process
(default: 0)
collate_fn (callable, optional): merges a list of samples to form a mini-batch.
pin_memory (bool, optional): If ``True``, the data loader will copy tensors
into CUDA pinned memory before returning them.
drop_last (bool, optional): set to ``True`` to drop the last incomplete batch,
if the dataset size is not divisible by the batch size. If False and
the size of dataset is not divisible by the batch size, then the last batch
will be smaller. (default: False)
"""
def __init__(self, dataset, batch_size=1, shuffle=False, sampler=None, batch_sampler=None,
num_workers=0, collate_fn=default_collate, pin_memory=False, drop_last=False):
self.dataset = dataset
self.batch_size = batch_size
self.num_workers = num_workers
self.collate_fn = collate_fn
self.pin_memory = pin_memory
self.drop_last = drop_last
if batch_sampler is not None:
if batch_size > 1 or shuffle or sampler is not None or drop_last:
raise ValueError('batch_sampler is mutually exclusive with '
'batch_size, shuffle, sampler, and drop_last')
if sampler is not None and shuffle:
raise ValueError('sampler is mutually exclusive with shuffle')
if batch_sampler is None:
if sampler is None:
if shuffle:
sampler = RandomSampler(dataset)
else:
sampler = SequentialSampler(dataset)
batch_sampler = BatchSampler(sampler, batch_size, drop_last)
self.sampler = sampler
self.batch_sampler = batch_sampler
def __iter__(self):
return DataLoaderIter(self)
def __len__(self):
return len(self.batch_sampler)
|
deep-variance-reduction-main
|
UpdatedDataLoader.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from math import atan2,degrees
import numpy as np
#Label line with line2D label data
def labelLine(line,x,label=None,align=True,**kwargs):
ax = line.axes
xdata = line.get_xdata()
ydata = line.get_ydata()
if (x < xdata[0]) or (x > xdata[-1]):
print('x label location is outside data range!')
return
#Find corresponding y co-ordinate and angle of the line
ip = 1
for i in range(len(xdata)):
if x < xdata[i]:
ip = i
break
y = ydata[ip-1] + (ydata[ip]-ydata[ip-1])*(x-xdata[ip-1])/(xdata[ip]-xdata[ip-1])
if not label:
label = line.get_label()
if align:
#Compute the slope
dx = xdata[ip] - xdata[ip-1]
dy = ydata[ip] - ydata[ip-1]
ang = degrees(atan2(dy,dx))
#Transform to screen co-ordinates
pt = np.array([x,y]).reshape((1,2))
trans_angle = ax.transData.transform_angles(np.array((ang,)),pt)[0]
else:
trans_angle = 0
#Set a bunch of keyword arguments
if 'color' not in kwargs:
kwargs['color'] = line.get_color()
if ('horizontalalignment' not in kwargs) and ('ha' not in kwargs):
kwargs['ha'] = 'center'
if ('verticalalignment' not in kwargs) and ('va' not in kwargs):
kwargs['va'] = 'center'
if 'backgroundcolor' not in kwargs:
kwargs['backgroundcolor'] = ax.get_facecolor()
if 'clip_on' not in kwargs:
kwargs['clip_on'] = True
if 'zorder' not in kwargs:
kwargs['zorder'] = 2.5
# Add box with rounded corners around the label
kwargs["bbox"] = dict(
fill=True,
facecolor=kwargs['backgroundcolor'],
edgecolor=kwargs["color"],
#linestyle="solid",
linewidth=0.3, #None is default
boxstyle="round,rounding_size=0.5,pad=0.15"
)
ax.text(x,y,label,rotation=trans_angle,**kwargs)
def labelLines(lines,align=True,xvals=None,**kwargs):
ax = lines[0].axes
labLines = []
labels = []
#Take only the lines which have labels other than the default ones
for line in lines:
label = line.get_label()
if "_line" not in label:
labLines.append(line)
labels.append(label)
if xvals is None:
xmin,xmax = ax.get_xlim()
xvals = np.linspace(xmin,xmax,len(labLines)+2)[1:-1]
for line,x,label in zip(labLines,xvals,labels):
labelLine(line,x,label,align,**kwargs)
|
deep-variance-reduction-main
|
reproduce/label_lines.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import sys
import run
methods = ["sgd", "recompute_svrg", "scsg"]
try:
pindex = int(sys.argv[1])
seed = int(sys.argv[2])
print(f"problem index {pindex}")
except:
pindex = 0
seed = 0
method = methods[pindex]
runargs = {
'method': method,
'seed': seed,
'problem': 'imagenet',
'architecture': 'resnext101_32x8d',
'momentum': 0.9,
'lr': 0.1,
'decay': 0.0001,
'lr_reduction': "every30",
'batch_size': 256,
'epochs': 90,
'save_model': True,
'full_checkpointing': True,
'log_interval': 80,
}
run.run(runargs)
|
deep-variance-reduction-main
|
reproduce/reproduce_test_error_imagenet_next.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import sys
import run
archs = ['default', 'densenet-40-36']
try:
pindex = int(sys.argv[1])
print(f"problem index {pindex}")
except:
pindex = 0
arch = archs[pindex]
runargs = {
'transform_locking': True,
'problem': 'cifar10',
'architecture': arch,
'method': "svrg",
'logfname': 'reproduce-iterate-distance-{}'.format(arch),
'momentum': 0.9,
'decay': 0.0001,
'lr': 0.1,
'lr_reduction': "150-225",
'batch_size': 128,
'epochs': 3,
'log_diagnostics': True,
'log_diagnostics_every_epoch': True,
'log_diagnostics_deciles': True,
}
run.run(runargs)
|
deep-variance-reduction-main
|
reproduce/reproduce_iterate_distance.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import sys
import run
vr_froms = [1, 21, 41, 61, 81, 1234]
try:
pindex = int(sys.argv[1])
print(f"problem index {pindex}")
except:
pindex = 0
seed = 0
method = methods[pindex]
runargs = {
'vr_from_epoch': vr_froms[pindex],
'method': 'recompute_svrg',
'problem': 'imagenet',
'architecture': 'resnet50',
'momentum': 0.9,
'lr': 0.1,
'decay': 0.0001,
'lr_reduction': "every30",
'batch_size': 256,
'epochs': 100,
'save_model': True,
'full_checkpointing': True,
'log_interval': 80,
}
run.run(runargs)
|
deep-variance-reduction-main
|
reproduce/reproduce_finetuning.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.